text
stringlengths 11
4.05M
|
|---|
package config
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/wish/ctl/pkg/client"
"os"
)
func deleteCmd(c *client.Client) *cobra.Command {
return &cobra.Command{
Use: "delete",
Short: "Update extensions",
RunE: func(cmd *cobra.Command, args []string) error {
return os.Remove(viper.ConfigFileUsed())
},
}
}
|
package ebakusdb
import (
"encoding/hex"
"fmt"
"math/big"
"reflect"
"strconv"
"github.com/ebakus/go-ebakus/common"
)
// hasHexPrefix validates str begins with '0x' or '0X'.
func hasHexPrefix(str string) bool {
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
}
func byteArrayToReflectValue(value []byte, t reflect.Type) (reflect.Value, error) {
var (
kind = t.Kind()
stringValue = string(value)
)
switch kind {
case reflect.Bool:
val, err := strconv.ParseBool(stringValue)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(val), nil
case reflect.Int:
val, err := strconv.ParseInt(stringValue, 0, 0)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(int(val)), nil
case reflect.Int8:
val, err := strconv.ParseInt(stringValue, 0, 8)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(int8(val)), nil
case reflect.Int16:
val, err := strconv.ParseInt(stringValue, 0, 16)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(int16(val)), nil
case reflect.Int32:
val, err := strconv.ParseInt(stringValue, 0, 32)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(int32(val)), nil
case reflect.Int64:
val, err := strconv.ParseInt(stringValue, 0, 64)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(int64(val)), nil
case reflect.Uint:
val, err := strconv.ParseUint(stringValue, 0, 0)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(uint(val)), nil
case reflect.Uint8:
val, err := strconv.ParseUint(stringValue, 0, 8)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(uint8(val)), nil
case reflect.Uint16:
val, err := strconv.ParseUint(stringValue, 0, 16)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(uint16(val)), nil
case reflect.Uint32:
val, err := strconv.ParseUint(stringValue, 0, 32)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(uint32(val)), nil
case reflect.Uint64:
val, err := strconv.ParseUint(stringValue, 0, 64)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(uint64(val)), nil
case reflect.Float32:
val, err := strconv.ParseFloat(stringValue, 32)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(float32(val)), nil
case reflect.Float64:
val, err := strconv.ParseFloat(stringValue, 64)
if err != nil {
return reflect.Value{}, err
}
return reflect.ValueOf(float64(val)), nil
case reflect.String:
return reflect.ValueOf(stringValue), nil
case reflect.Slice, reflect.Array:
if hasHexPrefix(stringValue) {
decoded, err := hex.DecodeString(stringValue[2:])
if err != nil {
return reflect.Value{}, err
}
if t == reflect.TypeOf(common.Address{}) {
return reflect.ValueOf(common.BytesToAddress(decoded)), nil
}
return reflect.ValueOf(decoded), nil
} else if t == reflect.TypeOf(common.Address{}) {
return reflect.ValueOf(common.BytesToAddress(value)), nil
}
case reflect.Ptr:
if t == reflect.TypeOf(&big.Int{}) {
var val *big.Int
if hasHexPrefix(stringValue) {
decoded, err := hex.DecodeString(stringValue[2:])
if err != nil {
return reflect.Value{}, err
}
val = big.NewInt(0).SetBytes(decoded)
} else {
var ok bool
val, ok = big.NewInt(0).SetString(stringValue, 10)
if !ok {
return reflect.Value{}, fmt.Errorf("unpack: failed to unpack big.Int")
}
}
return reflect.ValueOf(val), nil
}
}
return reflect.ValueOf(stringValue), nil
}
|
package main
import (
"fmt"
"github.com/sanguohot/medichain/chain"
"github.com/sanguohot/medichain/etc"
"log"
"time"
"math/big"
)
func main() {
name := etc.ContractController
err, address := chain.GetAddressFromCns(name)
if err != nil {
log.Fatal(err)
}
fmt.Println("ContractController address ===>", address.Hex())
if err, hash := chain.UsersDataAddSuper(*address); err != nil {
log.Fatal(err)
}else {
fmt.Printf("tx sent: %s\n", hash.Hex()) // tx sent: 0x8d490e535678e9a24360e955d75b27ad307bdfb97a1dca51d0f3035dcee3e870
}
time.Sleep(time.Second * 1)
size, err := chain.UsersDataGetSuperSize()
if err != nil {
log.Fatal(err)
}
fmt.Println("super size ===>", size)
if size.Uint64() > 0 {
super, err := chain.UsersDataGetSuperByIndex(big.NewInt(0))
if err != nil {
log.Fatal(err)
}
fmt.Println("super 0 ===>",super.Hex())
}
//password := "123456"
//userUuid := uuid.New()
//_, _, address, err = util.NewWallet(password)
//if err != nil {
// log.Fatal(err)
//}
//publicKey, err := util.GetPublicKeyBytes32_2FromStore(*address, password)
//if err != nil {
// log.Fatal(err)
//}
}
|
package ymongo
import (
"context"
"fmt"
"log"
"testing"
"time"
"go.mongodb.org/mongo-driver/bson"
)
func Test_mongo_insert(t *testing.T) {
var ctx = context.Background()
var doc = bson.M{"a": 100, "b": 30}
client, err := NewMongoClient()
defer client.Disconnect(ctx)
if err != nil {
fmt.Println("------aa-----------")
log.Println(err)
return
}
log.Println("cnt ok")
//d
dbName, tbName := "test", "t"
tb := client.Database(dbName).Collection(tbName)
result, err := tb.InsertOne(ctx, doc)
if err != nil {
log.Println("insert One err:", err)
}
fmt.Println("-----------------")
log.Println(result)
//
c, err := tb.Find(ctx, bson.M{})
if err != nil {
fmt.Println("find err:", err)
return
}
//--------result -----------------------------
for c.Next(ctx) {
log.Println(c.Current.String())
}
}
func Test_mongo_insert2(t *testing.T) {
var ctx = context.Background()
var doc = bson.M{"a": 100, "b": 30}
client, err := NewMongoClient()
defer client.Disconnect(ctx)
if err != nil {
fmt.Println("------aa-----------")
log.Println(err)
return
}
log.Println("cnt ok")
//d
dbName, tbName := "test", "t"
tb := client.Database(dbName).Collection(tbName)
h := 10000
t0 := time.Now()
for i := 0; i < h; i++ {
doc = bson.M{"a": i, "b": i * 10}
_, err := tb.InsertOne(ctx, doc)
if err != nil {
log.Println("insert One err:", err)
}
}
fmt.Println("-------------time----", time.Since(t0))
//
c, err := tb.CountDocuments(ctx, bson.M{})
if err != nil {
fmt.Println("count err:", err)
return
}
fmt.Println("------count:----", c)
//--------result -----------------------------
}
func Test_mongo_insert_batch(t *testing.T) {
var ctx = context.Background()
var docs []interface{}
client, err := NewMongoClient()
defer client.Disconnect(ctx)
if err != nil {
fmt.Println("------aa-----------")
log.Println(err)
return
}
log.Println("cnt ok")
//d
dbName, tbName := "test", "t"
tb := client.Database(dbName).Collection(tbName)
h := 10000
t0 := time.Now()
for i := 0; i < h; i++ {
docs = append(docs, bson.M{"a": i, "b": i * 10})
if err != nil {
log.Println("insert One err:", err)
}
}
if _, err := tb.InsertMany(ctx, docs); err != nil {
log.Println("insert err:", err)
}
fmt.Println("-------------time----", time.Since(t0))
fmt.Println("------count-----------")
c, err := tb.CountDocuments(ctx, bson.M{})
if err != nil {
fmt.Println("count err:", err)
return
}
fmt.Println("------count:----", c)
//--------result -----------------------------
}
|
package array_test
import "testing"
func TestArrayInit(t *testing.T) {
var arr [3]int
arr1 := [4]int{1, 2, 3, 4}
arr2 := [...]int{1, 3, 4, 5}
t.Log(arr[0], arr[1], arr[2]) // 0 0 0
t.Log(arr1[1]) // 2
t.Log(arr2) // [1 3 4 5]
}
func TestArrayTravel(t *testing.T) {
arr := [...]int{1, 2, 3, 4}
for i := 0; i < len(arr); i++ {
t.Log(arr[i])
}
for idx, e := range arr {
t.Log(idx, e)
}
}
func TestArraySection(t *testing.T) {
arr := [...]int{1, 2, 3, 4, 5}
arrSec := arr[:3]
arrSec1 := arr[1:]
arrSec2 := arr[1:2]
arrSec3 := arr[1:len(arr)]
t.Log(arrSec) // [1 2 3]
t.Log(arrSec1) // [2 3 4 5]
t.Log(arrSec2) // [2]
t.Log(arrSec3) //[2 3 4 5]
}
|
package server
import (
"TruckMonitor-Backend/context"
"TruckMonitor-Backend/controller"
)
type Instance struct {
Configuration context.Configuration
}
func (instance Instance) Start() error {
appContext := context.NewApplicationContext(instance.Configuration)
defer appContext.DbContext().Close()
return controller.NewRouter(appContext).Run(instance.Configuration.ServerConfiguration.Port)
}
|
package client
import (
"encoding/json"
"log"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/taglme/nfc-goclient/pkg/models"
)
func TestBuildJobsQueryParams(t *testing.T) {
l := 25
q := buildRunsQueryParams(RunFilter{
Limit: &l,
})
assert.Equal(t, "?limit=25", q)
dir := "asc"
by := "n"
offset := 321
status := models.JobStatusActive
q1 := buildJobsQueryParams(JobFilter{
SortBy: &by,
SortDir: &dir,
Offset: &offset,
Status: &status,
})
assert.Equal(t, "?status=active&sortby=n&sortdir=asc&offset=321", q1)
}
func TestJobsGetAll(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Test request parameters
assert.Equal(t, "/adapters/id/jobs", req.URL.String())
resp, err := json.Marshal(models.JobListResource{
Total: 0,
Length: 0,
Limit: 0,
Offset: 0,
Items: []models.JobResource{{
JobID: "id",
JobName: "name",
AdapterID: "adid",
AdapterName: "adname",
CreatedAt: "2006-01-02T15:04:05Z",
Status: models.JobStatusActive.String(),
Steps: []models.JobStepResource{{
Command: models.CommandRemovePassword.String(),
Params: nil,
}},
}},
})
if err != nil {
log.Fatal("Can't marshall test model", err)
}
rw.WriteHeader(200)
_, err = rw.Write(resp)
if err != nil {
log.Fatal("Can't return err", err)
}
}))
// Close the server when test finishes
defer server.Close()
api := newJobService(server.Client(), server.URL)
body, pagInfo, err := api.GetAll("id")
if err != nil {
log.Fatal("Can't get jobs\n", err)
}
assert.Equal(t, "id", body[0].JobID)
assert.Equal(t, "name", body[0].JobName)
assert.Equal(t, "adname", body[0].AdapterName)
assert.Equal(t, "2006-01-02T15:04:05Z", body[0].CreatedAt.Format(time.RFC3339))
assert.Equal(t, models.JobStatusActive, body[0].Status)
assert.Equal(t, models.CommandRemovePassword, body[0].Steps[0].Command)
assert.Equal(t, 0, pagInfo.Total)
assert.Equal(t, 0, pagInfo.Offset)
assert.Equal(t, 0, pagInfo.Limit)
assert.Equal(t, 0, pagInfo.Length)
}
func TestJobsGetFiltered(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Test request parameters
assert.Equal(t, "/adapters/id/jobs?status=active&sortdir=asc", req.URL.String())
resp, err := json.Marshal(models.JobListResource{
Total: 0,
Length: 0,
Limit: 0,
Offset: 0,
Items: []models.JobResource{{
JobID: "id",
JobName: "name",
AdapterID: "adid",
AdapterName: "adname",
CreatedAt: "2006-01-02T15:04:05Z",
Status: models.JobStatusActive.String(),
Steps: []models.JobStepResource{{
Command: models.CommandRemovePassword.String(),
Params: nil,
}},
}},
})
if err != nil {
log.Fatal("Can't marshall test model", err)
}
rw.WriteHeader(200)
_, err = rw.Write(resp)
if err != nil {
log.Fatal("Can't return err", err)
}
}))
// Close the server when test finishes
defer server.Close()
api := newJobService(server.Client(), server.URL)
s := models.JobStatusActive
sortDir := "asc"
body, pagInfo, err := api.GetFiltered("id", JobFilter{
Status: &s,
SortDir: &sortDir,
})
if err != nil {
log.Fatal("Can't get jobs", err)
}
assert.Equal(t, "id", body[0].JobID)
assert.Equal(t, "name", body[0].JobName)
assert.Equal(t, "adname", body[0].AdapterName)
assert.Equal(t, "2006-01-02T15:04:05Z", body[0].CreatedAt.Format(time.RFC3339))
assert.Equal(t, models.JobStatusActive, body[0].Status)
assert.Equal(t, models.CommandRemovePassword, body[0].Steps[0].Command)
assert.Equal(t, 0, pagInfo.Total)
assert.Equal(t, 0, pagInfo.Offset)
assert.Equal(t, 0, pagInfo.Limit)
assert.Equal(t, 0, pagInfo.Length)
}
func TestJobsGet(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Test request parameters
assert.Equal(t, "/adapters/id/jobs/jid", req.URL.String())
resp, err := json.Marshal(models.JobResource{
JobID: "id",
JobName: "name",
AdapterID: "adid",
AdapterName: "adname",
CreatedAt: "2006-01-02T15:04:05Z",
Status: models.JobStatusActive.String(),
Steps: []models.JobStepResource{{
Command: models.CommandRemovePassword.String(),
Params: nil,
}},
})
if err != nil {
log.Fatal("Can't marshall test model", err)
}
rw.WriteHeader(200)
_, err = rw.Write(resp)
if err != nil {
log.Fatal("Can't return err", err)
}
}))
defer server.Close()
api := newJobService(server.Client(), server.URL)
body, err := api.Get("id", "jid")
if err != nil {
log.Fatal("Can't get runs", err)
}
assert.Equal(t, "id", body.JobID)
assert.Equal(t, "name", body.JobName)
assert.Equal(t, "adname", body.AdapterName)
assert.Equal(t, "2006-01-02T15:04:05Z", body.CreatedAt.Format(time.RFC3339))
assert.Equal(t, models.JobStatusActive, body.Status)
assert.Equal(t, models.CommandRemovePassword, body.Steps[0].Command)
}
func TestJobsDeleteAll(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Test request parameters
assert.Equal(t, "/adapters/id/jobs", req.URL.String())
rw.WriteHeader(200)
_, err := rw.Write(nil)
if err != nil {
log.Fatal("Can't return err", err)
}
}))
defer server.Close()
api := newJobService(server.Client(), server.URL)
err := api.DeleteAll("id")
if err != nil {
log.Fatal("Can't get runs", err)
}
assert.Equal(t, nil, err)
}
func TestJobsDelete(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Test request parameters
assert.Equal(t, "/adapters/id/jobs/jid", req.URL.String())
rw.WriteHeader(200)
_, err := rw.Write(nil)
if err != nil {
log.Fatal("Can't return err", err)
}
}))
defer server.Close()
api := newJobService(server.Client(), server.URL)
err := api.Delete("id", "jid")
if err != nil {
log.Fatal("Can't get runs", err)
}
assert.Equal(t, nil, err)
}
func TestJobService_UpdateStatus(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
// Test request parameters
assert.Equal(t, "/adapters/id/jobs/jid", req.URL.String())
// Test request parameters
assert.Equal(t, "/adapters/id/jobs/jid", req.URL.String())
resp, err := json.Marshal(models.JobResource{
JobID: "id",
JobName: "name",
AdapterID: "adid",
AdapterName: "adname",
CreatedAt: "2006-01-02T15:04:05Z",
Status: models.JobStatusActive.String(),
Steps: []models.JobStepResource{{
Command: models.CommandRemovePassword.String(),
Params: nil,
}},
})
if err != nil {
log.Fatal("Can't marshall test model", err)
}
rw.WriteHeader(200)
_, err = rw.Write(resp)
if err != nil {
log.Fatal("Can't return err", err)
}
}))
defer server.Close()
api := newJobService(server.Client(), server.URL)
body, err := api.UpdateStatus("id", "jid", models.JobStatusActive)
if err != nil {
log.Fatal("Can't get runs", err)
}
assert.Equal(t, "id", body.JobID)
assert.Equal(t, "name", body.JobName)
assert.Equal(t, "adname", body.AdapterName)
assert.Equal(t, "2006-01-02T15:04:05Z", body.CreatedAt.Format(time.RFC3339))
assert.Equal(t, models.JobStatusActive, body.Status)
assert.Equal(t, models.CommandRemovePassword, body.Steps[0].Command)
}
|
package ZFic
import (
"fmt"
)
//noinspection GoUnusedGlobalVariable
var Serv *ServerConfig
var ZFServ *HttpServer
var DB = &[]*User{}
var Sesses *map[string]*Session
//noinspection GoUnusedExportedFunction
func Load() (*HttpServer, error) {
Archive, err := GetArchive()
LoadDataBase()
LoadSessions()
StartReaper()
if err != nil {
fmt.Println(err.Error())
}
Serv = &ServerConfig{
Archiveworker: false,
Stories: int64(len(Archive)),
AWQ: make(chan ARequest, 100),
}
ZFServ = &HttpServer{
port: "9000",
address: "",
}
return ZFServ, err
}
|
package dht
import (
"context"
"sync"
u "gx/ipfs/QmNohiVssaPw3KVLZik59DBVGTSm2dGvYT9eoXt5DQ36Yz/go-ipfs-util"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
pset "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer/peerset"
pstore "gx/ipfs/QmQFFp4ntkd4C14sP3FaH9WJyBuetuGUVo6dShNHvnoEvC/go-libp2p-peerstore"
queue "gx/ipfs/QmQFFp4ntkd4C14sP3FaH9WJyBuetuGUVo6dShNHvnoEvC/go-libp2p-peerstore/queue"
todoctr "gx/ipfs/QmQNQhNmY4STU1MURjH9vYEMpx2ncMS4gbwxXWtrEjzVAq/go-todocounter"
routing "gx/ipfs/QmRjT8Bkut84fHf9nxMQBxGsqLAkqzMdFaemDK7e61dBNZ/go-libp2p-routing"
notif "gx/ipfs/QmRjT8Bkut84fHf9nxMQBxGsqLAkqzMdFaemDK7e61dBNZ/go-libp2p-routing/notifications"
process "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess"
ctxproc "gx/ipfs/QmSF8fPo3jgVBAy8fpdjjYqgG87dkJgUprRBHRd2tmfgpP/goprocess/context"
inet "gx/ipfs/QmZ7cBWUXkyWTMN4qH6NGoyMVs7JugyFChBNP4ZUp5rJHH/go-libp2p-net"
logging "gx/ipfs/QmcuXC5cxs79ro2cUuHs4HQ2bkDLJUYokwL8aivcX6HW3C/go-log"
)
var maxQueryConcurrency = AlphaValue
type dhtQuery struct {
dht *IpfsDHT
key string // the key we're querying for
qfunc queryFunc // the function to execute per peer
concurrency int // the concurrency parameter
}
type dhtQueryResult struct {
value []byte // GetValue
peer *pstore.PeerInfo // FindPeer
providerPeers []pstore.PeerInfo // GetProviders
closerPeers []*pstore.PeerInfo // *
success bool
finalSet *pset.PeerSet
queriedSet *pset.PeerSet
}
// constructs query
func (dht *IpfsDHT) newQuery(k string, f queryFunc) *dhtQuery {
return &dhtQuery{
key: k,
dht: dht,
qfunc: f,
concurrency: maxQueryConcurrency,
}
}
// QueryFunc is a function that runs a particular query with a given peer.
// It returns either:
// - the value
// - a list of peers potentially better able to serve the query
// - an error
type queryFunc func(context.Context, peer.ID) (*dhtQueryResult, error)
// Run runs the query at hand. pass in a list of peers to use first.
func (q *dhtQuery) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
runner := newQueryRunner(q)
return runner.Run(ctx, peers)
}
type dhtQueryRunner struct {
query *dhtQuery // query to run
peersSeen *pset.PeerSet // all peers queried. prevent querying same peer 2x
peersQueried *pset.PeerSet // peers successfully connected to and queried
peersDialed *dialQueue // peers we have dialed to
peersToQuery *queue.ChanQueue // peers remaining to be queried
peersRemaining todoctr.Counter // peersToQuery + currently processing
result *dhtQueryResult // query result
errs u.MultiErr // result errors. maybe should be a map[peer.ID]error
rateLimit chan struct{} // processing semaphore
log logging.EventLogger
runCtx context.Context
proc process.Process
sync.RWMutex
}
func newQueryRunner(q *dhtQuery) *dhtQueryRunner {
proc := process.WithParent(process.Background())
ctx := ctxproc.OnClosingContext(proc)
peersToQuery := queue.NewChanQueue(ctx, queue.NewXORDistancePQ(string(q.key)))
r := &dhtQueryRunner{
query: q,
peersRemaining: todoctr.NewSyncCounter(),
peersSeen: pset.New(),
peersQueried: pset.New(),
rateLimit: make(chan struct{}, q.concurrency),
peersToQuery: peersToQuery,
proc: proc,
}
dq, err := newDialQueue(&dqParams{
ctx: ctx,
target: q.key,
in: peersToQuery,
dialFn: r.dialPeer,
config: dqDefaultConfig(),
})
if err != nil {
panic(err)
}
r.peersDialed = dq
return r
}
func (r *dhtQueryRunner) Run(ctx context.Context, peers []peer.ID) (*dhtQueryResult, error) {
r.log = logger
r.runCtx = ctx
if len(peers) == 0 {
logger.Warning("Running query with no peers!")
return nil, nil
}
// setup concurrency rate limiting
for i := 0; i < r.query.concurrency; i++ {
r.rateLimit <- struct{}{}
}
// add all the peers we got first.
for _, p := range peers {
r.addPeerToQuery(p)
}
// go do this thing.
// do it as a child proc to make sure Run exits
// ONLY AFTER spawn workers has exited.
r.proc.Go(r.spawnWorkers)
// so workers are working.
// wait until they're done.
err := routing.ErrNotFound
// now, if the context finishes, close the proc.
// we have to do it here because the logic before is setup, which
// should run without closing the proc.
ctxproc.CloseAfterContext(r.proc, ctx)
select {
case <-r.peersRemaining.Done():
r.proc.Close()
r.RLock()
defer r.RUnlock()
err = routing.ErrNotFound
// if every query to every peer failed, something must be very wrong.
if len(r.errs) > 0 && len(r.errs) == r.peersSeen.Size() {
logger.Debugf("query errs: %s", r.errs)
err = r.errs[0]
}
case <-r.proc.Closed():
r.RLock()
defer r.RUnlock()
err = context.DeadlineExceeded
}
if r.result != nil && r.result.success {
return r.result, nil
}
return &dhtQueryResult{
finalSet: r.peersSeen,
queriedSet: r.peersQueried,
}, err
}
func (r *dhtQueryRunner) addPeerToQuery(next peer.ID) {
// if new peer is ourselves...
if next == r.query.dht.self {
r.log.Debug("addPeerToQuery skip self")
return
}
if !r.peersSeen.TryAdd(next) {
return
}
notif.PublishQueryEvent(r.runCtx, ¬if.QueryEvent{
Type: notif.AddingPeer,
ID: next,
})
r.peersRemaining.Increment(1)
select {
case r.peersToQuery.EnqChan <- next:
case <-r.proc.Closing():
}
}
func (r *dhtQueryRunner) spawnWorkers(proc process.Process) {
for {
select {
case <-r.peersRemaining.Done():
return
case <-r.proc.Closing():
return
case <-r.rateLimit:
ch := r.peersDialed.Consume()
select {
case p, ok := <-ch:
if !ok {
// this signals context cancellation.
return
}
// do it as a child func to make sure Run exits
// ONLY AFTER spawn workers has exited.
proc.Go(func(proc process.Process) {
r.queryPeer(proc, p)
})
case <-r.proc.Closing():
return
case <-r.peersRemaining.Done():
return
}
}
}
}
func (r *dhtQueryRunner) dialPeer(ctx context.Context, p peer.ID) error {
// short-circuit if we're already connected.
if r.query.dht.host.Network().Connectedness(p) == inet.Connected {
return nil
}
logger.Debug("not connected. dialing.")
notif.PublishQueryEvent(r.runCtx, ¬if.QueryEvent{
Type: notif.DialingPeer,
ID: p,
})
pi := pstore.PeerInfo{ID: p}
if err := r.query.dht.host.Connect(ctx, pi); err != nil {
logger.Debugf("error connecting: %s", err)
notif.PublishQueryEvent(r.runCtx, ¬if.QueryEvent{
Type: notif.QueryError,
Extra: err.Error(),
ID: p,
})
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()
return err
}
logger.Debugf("connected. dial success.")
return nil
}
func (r *dhtQueryRunner) queryPeer(proc process.Process, p peer.ID) {
// ok let's do this!
// create a context from our proc.
ctx := ctxproc.OnClosingContext(proc)
// make sure we do this when we exit
defer func() {
// signal we're done processing peer p
r.peersRemaining.Decrement(1)
r.rateLimit <- struct{}{}
}()
// finally, run the query against this peer
res, err := r.query.qfunc(ctx, p)
r.peersQueried.Add(p)
if err != nil {
logger.Debugf("ERROR worker for: %v %v", p, err)
r.Lock()
r.errs = append(r.errs, err)
r.Unlock()
} else if res.success {
logger.Debugf("SUCCESS worker for: %v %s", p, res)
r.Lock()
r.result = res
r.Unlock()
go r.proc.Close() // signal to everyone that we're done.
// must be async, as we're one of the children, and Close blocks.
} else if len(res.closerPeers) > 0 {
logger.Debugf("PEERS CLOSER -- worker for: %v (%d closer peers)", p, len(res.closerPeers))
for _, next := range res.closerPeers {
if next.ID == r.query.dht.self { // don't add self.
logger.Debugf("PEERS CLOSER -- worker for: %v found self", p)
continue
}
// add their addresses to the dialer's peerstore
r.query.dht.peerstore.AddAddrs(next.ID, next.Addrs, pstore.TempAddrTTL)
r.addPeerToQuery(next.ID)
logger.Debugf("PEERS CLOSER -- worker for: %v added %v (%v)", p, next.ID, next.Addrs)
}
} else {
logger.Debugf("QUERY worker for: %v - not found, and no closer peers.", p)
}
}
|
/*
Copyright 2020 The Qmgo Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package qmgo
import (
"context"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
"testing"
opts "github.com/qiniu/qmgo/options"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/bson"
)
func TestDatabase(t *testing.T) {
ast := require.New(t)
var sTimeout int64 = 500000
var cTimeout int64 = 3000
var maxPoolSize uint64 = 3000
var minPoolSize uint64 = 0
collName := "testopen"
dbName := "qmgotest"
cfg := Config{
Uri: "mongodb://localhost:27017",
Database: dbName,
Coll: collName,
ConnectTimeoutMS: &cTimeout,
SocketTimeoutMS: &sTimeout,
MaxPoolSize: &maxPoolSize,
MinPoolSize: &minPoolSize,
}
c, err := NewClient(context.Background(), &cfg)
ast.NoError(err)
cli := c.Database(cfg.Database)
ast.Nil(err)
ast.Equal(dbName, cli.GetDatabaseName())
coll := cli.Collection(collName)
ast.Equal(collName, coll.GetCollectionName())
cli.Collection(collName).DropCollection(context.Background())
cli.DropDatabase(context.Background())
}
func TestRunCommand(t *testing.T) {
ast := require.New(t)
cli := initClient("test")
opts := opts.RunCommandOptions{RunCmdOptions: options.RunCmd().SetReadPreference(readpref.Primary())}
res := cli.RunCommand(context.Background(), bson.D{
{"ping", 1}}, opts)
ast.NoError(res.Err())
}
//func TestCreateCollection(t *testing.T) {
// ast := require.New(t)
//
// cli := initClient("test")
//
// timeSeriesOpt := options.TimeSeriesOptions{
// TimeField:"timestamp",
// }
// timeSeriesOpt.SetMetaField("metadata")
// ctx := context.Background()
// createCollectionOpts := opts.CreateCollectionOptions{CreateCollectionOptions: options.CreateCollection().SetTimeSeriesOptions(&timeSeriesOpt)}
// if err := cli.CreateCollection(ctx, "syslog", createCollectionOpts); err != nil {
// ast.NoError(err)
// }
// cli.DropCollection(ctx)
// cli.DropDatabase(ctx)
//}
|
/*
Given a positive number n, rotate its base-10 digits m positions rightward. That is, output the result of m steps of moving the last digit to the start. The rotation count m will be a non-negative integer.
You should remove leading zeroes in the final result, but not in any of the intermediate steps. For example, for the test case 100,2 => 1, we first rotate to 010, then to 001, then finally drop the leading zeroes to get 1.
Tests
n,m => Output
123,1 => 312
123,2 => 231
123,3 => 123
123,4 => 312
1,637 => 1
10,1 => 1
100,2 => 1
10,2 => 10
110,2 => 101
123,0 => 123
9998,2 => 9899
*/
package main
import (
"fmt"
"strconv"
)
func main() {
test(123, 1, 312)
test(123, 2, 231)
test(123, 3, 123)
test(123, 4, 312)
test(1, 637, 1)
test(10, 1, 1)
test(100, 2, 1)
test(10, 2, 10)
test(110, 2, 101)
test(123, 0, 123)
test(9998, 2, 9899)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(n, m, r uint) {
p, err := rotate(n, m)
fmt.Println(p)
assert(err == nil)
assert(p == r)
}
func rotate(n, m uint) (uint, error) {
s := fmt.Sprint(n)
l := uint(len(s))
w := make([]byte, l)
for i := uint(0); i < l; i++ {
w[(i+m)%l] = s[i]
}
v, err := strconv.ParseUint(string(w), 10, 64)
return uint(v), err
}
|
package runner
// This file contains functions and data used to deal with local disk space allocation
import (
"encoding/json"
"fmt"
"sync"
"syscall"
"github.com/dustin/go-humanize"
"github.com/go-stack/stack"
"github.com/karlmutch/errors"
)
type diskTracker struct {
Device string // The local storage device being tracked, if change this will clear our all old allocations and releases will be ignored for the old device
AllocSpace uint64 // The amount of local storage, in the file system nominated by the user, currently allocated
SoftMinFree uint64 // The amount of local storage that is available in total for allocations, specified by the user, defaults to 15% of pyshical storage on devices
InitErr errors.Error // Any error that might have been recorded during initialization, if set this package may produce unexpected results
sync.Mutex
}
var (
diskTrack = &diskTracker{}
)
func initDiskResource(device string) (err errors.Error) {
_, diskTrack.InitErr = SetDiskLimits(device, 0)
return diskTrack.InitErr
}
// GetDiskFree is used to retrieve the amount of available disk
// space we have
//
func GetDiskFree() (free uint64) {
diskTrack.Lock()
defer diskTrack.Unlock()
fs := syscall.Statfs_t{}
if err := syscall.Statfs(diskTrack.Device, &fs); err != nil {
return 0
}
hardwareFree := uint64(float64(fs.Bavail * uint64(fs.Bsize))) // Space available to user, allows for quotas etc, leave 15% headroom
return hardwareFree - diskTrack.SoftMinFree - diskTrack.AllocSpace
}
func GetPathFree(path string) (free uint64, err errors.Error) {
fs := syscall.Statfs_t{}
if errGo := syscall.Statfs(path, &fs); errGo != nil {
return 0, errors.Wrap(errGo).With("path", path).With("stack", stack.Trace().TrimRuntime())
}
return uint64(float64(fs.Bavail * uint64(fs.Bsize))), nil
}
// DumpDisk is used by the monitoring system to dump out a JSON base representation of
// the current state of the local disk space resources allocated to the runners clients
//
func DumpDisk() (output string) {
diskTrack.Lock()
defer diskTrack.Unlock()
b, err := json.Marshal(diskTrack)
if err != nil {
return ""
}
return string(b)
}
func SetDiskLimits(device string, minFree uint64) (avail uint64, err errors.Error) {
fs := syscall.Statfs_t{}
if errGo := syscall.Statfs(device, &fs); err != nil {
return 0, errors.Wrap(errGo).With("stack", stack.Trace().TrimRuntime())
}
softMinFree := uint64(float64(fs.Bavail*uint64(fs.Bsize)) * 0.15) // Space available to user, allows for quotas etc, leave 15% headroom
if minFree != 0 && minFree < softMinFree {
// Get the actual free space and if as
softMinFree = minFree
}
diskTrack.Lock()
defer diskTrack.Unlock()
if device != diskTrack.Device {
diskTrack.AllocSpace = 0
}
diskTrack.SoftMinFree = softMinFree
diskTrack.Device = device
diskTrack.InitErr = nil
return uint64(float64(fs.Bavail*uint64(fs.Bsize))) - diskTrack.SoftMinFree, nil
}
func AllocDisk(maxSpace uint64) (alloc *DiskAllocated, err errors.Error) {
alloc = &DiskAllocated{}
diskTrack.Lock()
defer diskTrack.Unlock()
fs := syscall.Statfs_t{}
if errGo := syscall.Statfs(diskTrack.Device, &fs); err != nil {
return nil, errors.Wrap(errGo).With("stack", stack.Trace().TrimRuntime())
}
avail := fs.Bavail * uint64(fs.Bsize)
newAlloc := (diskTrack.AllocSpace + maxSpace)
if avail-newAlloc <= diskTrack.SoftMinFree {
return nil, errors.New(fmt.Sprintf("insufficient space %s (%s) on %s to allocate %s", humanize.Bytes(avail), humanize.Bytes(diskTrack.SoftMinFree), diskTrack.Device, humanize.Bytes(maxSpace))).With("stack", stack.Trace().TrimRuntime())
}
diskTrack.InitErr = nil
diskTrack.AllocSpace += maxSpace
alloc.device = diskTrack.Device
alloc.size = maxSpace
return alloc, nil
}
func (alloc *DiskAllocated) Release() (err errors.Error) {
if alloc == nil {
return errors.New("empty allocation supplied for releasing disk storage").With("stack", stack.Trace().TrimRuntime())
}
diskTrack.Lock()
defer diskTrack.Unlock()
if diskTrack.InitErr != nil {
return diskTrack.InitErr
}
if alloc.device != diskTrack.Device {
return errors.New(fmt.Sprintf("allocated space %s came from untracked local storage %s", humanize.Bytes(alloc.size), alloc.device)).With("stack", stack.Trace().TrimRuntime())
}
diskTrack.AllocSpace -= alloc.size
return nil
}
|
package server
import (
"fmt"
"github.com/go-chi/render"
log "github.com/sirupsen/logrus"
"net/http"
)
func NewApiRenderer() func(w http.ResponseWriter, r *http.Request, v interface{}) {
return func(w http.ResponseWriter, r *http.Request, v interface{}) {
if err, ok := v.(error); ok {
if _, ok := r.Context().Value(render.StatusCtxKey).(int); !ok {
w.WriteHeader(http.StatusInternalServerError)
}
log.Error(fmt.Errorf("request processing failed: %w", err))
render.DefaultResponder(w, r, createErrorResponse(fmt.Errorf("something went wrong")))
return
}
render.DefaultResponder(w, r, v)
}
}
|
package main
import (
"./controllers"
"./middleware"
"github.com/gin-gonic/gin"
)
func main() {
route := gin.Default()
route.Use(middleware.ConnectDB)
route.GET("/", func(c *gin.Context) {
c.String(200, "Welcome golang")
})
route.POST("/task/manager", controllers.CreateTask)
route.DELETE("/task/manager/:id", controllers.DeleteTask)
route.DELETE("/task/manager", controllers.DeleteTask)
route.PUT("/task/manager/:id", controllers.UpdateTask)
route.PUT("/task/manager", controllers.UpdateTask)
route.GET("/task/manager/:id", controllers.GetTask) // find one task
route.GET("/task/manager", controllers.GetTask) // find all task list
route.Run(":5432")
}
|
package grpcauth
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/binary"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"math/big"
"net"
"strings"
"time"
"github.com/cloudflare/cfssl/log"
pb "github.com/immesys/wave/eapi/pb"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
const XBOSPermissionSet_b64 = "GyC5wUUGKON6uC4gxuH6TpzU9vvuKHGeJa1jUr4G-j_NbA=="
const XBOSPermissionSet = "\x1b\x20\xb9\xc1\x45\x06\x28\xe3\x7a\xb8\x2e\x20\xc6\xe1\xfa\x4e\x9c\xd4\xf6\xfb\xee\x28\x71\x9e\x25\xad\x63\x52\xbe\x06\xfa\x3f\xcd\x6c"
const GRPCServePermission = "serve_grpc"
const GRPCCallPermission = "call_grpc"
// type TransportCredentials interface {
// // ClientHandshake does the authentication handshake specified by the corresponding
// // authentication protocol on rawConn for clients. It returns the authenticated
// // connection and the corresponding auth information about the connection.
// // Implementations must use the provided context to implement timely cancellation.
// // gRPC will try to reconnect if the error returned is a temporary error
// // (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
// // If the returned error is a wrapper error, implementations should make sure that
// // the error implements Temporary() to have the correct retry behaviors.
// //
// // If the returned net.Conn is closed, it MUST close the net.Conn provided.
// ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// // ServerHandshake does the authentication handshake for servers. It returns
// // the authenticated connection and the corresponding auth information about
// // the connection.
// //
// // If the returned net.Conn is closed, it MUST close the net.Conn provided.
// ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
// // Info provides the ProtocolInfo of this TransportCredentials.
// Info() ProtocolInfo
// // Clone makes a copy of this TransportCredentials.
// Clone() TransportCredentials
// // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
// // gRPC internals also use it to override the virtual hosting name if it is set.
// // It must be called before dialing. Currently, this is only used by grpclb.
// OverrideServerName(string) error
// }
type WaveCredentials struct {
perspective *pb.Perspective
info map[string]grpc.ServiceInfo
grpcservice string
perspectiveHash []byte
proof []byte
namespace string
wave pb.WAVEClient
}
func NewServerCredentials(perspective *pb.Perspective, agent string) (*WaveCredentials, error) {
conn, err := grpc.Dial(agent, grpc.WithInsecure(), grpc.FailOnNonTempDialError(true), grpc.WithBlock())
if err != nil {
return nil, errors.Wrapf(err, "Failed to connect to agent at %s", agent)
}
wave := pb.NewWAVEClient(conn)
wc := &WaveCredentials{
perspective: perspective,
wave: wave,
}
// learn the perspective hash
iresp, err := wc.wave.Inspect(context.Background(), &pb.InspectParams{
Content: perspective.EntitySecret.DER,
})
if err != nil {
return nil, errors.Wrap(err, "could not inspect perspective entity file")
}
if iresp.Error != nil {
return nil, errors.Wrap(err, "could not inspect perspective entity file")
}
wc.perspectiveHash = iresp.Entity.Hash
return wc, nil
}
func (wc *WaveCredentials) AddServiceInfo(server *grpc.Server) {
wc.info = server.GetServiceInfo()
var uris []string
// form a list of <package name>/<service name>/<method name> URIs
log.Infof("Info %+v", wc.info)
for pkg_svc_name, svc_info := range wc.info {
uri_pkg_svc_name := strings.Replace(pkg_svc_name, ".", "/", -1)
for _, method_info := range svc_info.Methods {
// TODO: get rid of this hack
wc.grpcservice = uri_pkg_svc_name + "/*"
uris = append(uris, uri_pkg_svc_name+"/"+method_info.Name)
log.Info("GRPC Resource: ", uris[len(uris)-1])
}
}
}
func (wc *WaveCredentials) AddGRPCProofFile(filename string) (ns string, proof []byte, err error) {
contents, err := ioutil.ReadFile(filename)
if err != nil {
return "", nil, errors.Wrap(err, "could not read designated routing file")
}
der := contents
pblock, _ := pem.Decode(contents)
if pblock != nil {
der = pblock.Bytes
}
resp, err := wc.wave.VerifyProof(context.Background(), &pb.VerifyProofParams{
ProofDER: der,
})
if err != nil {
return "", nil, errors.Wrap(err, "could not verify dr file")
}
if resp.Error != nil {
return "", nil, fmt.Errorf("could not verify dr file: %v", resp.Error.Message)
}
ns = base64.URLEncoding.EncodeToString(resp.Result.Policy.RTreePolicy.Namespace)
//Check proof actually grants the right permissions:
found := false
outer:
for _, s := range resp.Result.Policy.RTreePolicy.Statements {
log.Info("matches resource? ", s.Resource == wc.grpcservice, " ", s.Resource, " ", wc.grpcservice, " matches permset? ", bytes.Equal(s.GetPermissionSet(), []byte(XBOSPermissionSet)))
if s.Resource == wc.grpcservice && bytes.Equal(s.GetPermissionSet(), []byte(XBOSPermissionSet)) {
for _, perm := range s.Permissions {
log.Info("match perm? ", perm == GRPCServePermission, " ", perm, " ", GRPCServePermission)
//TODO: need to MATCH the uri here for each of the uris, make sure we prove it
if perm == GRPCServePermission {
found = true
break outer
}
}
}
}
if !found {
return "", nil, fmt.Errorf("designated routing proof does not actually prove xbos:serve_grpc on any namespace")
}
wc.namespace = ns
wc.proof = der
return ns, der, nil
}
func (wc *WaveCredentials) ServerTransportCredentials() credentials.TransportCredentials {
return wc
}
func (wc *WaveCredentials) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return nil, nil, errors.New("NOT IMPLEMENTED")
}
func (wc *WaveCredentials) VerifyClientHandshake(nsString string, hdr clientHeader) error {
log.Info("Server verifying server handshake ", nsString)
resp, err := wc.wave.VerifySignature(context.Background(), &pb.VerifySignatureParams{
Signer: hdr.entityHash,
Signature: hdr.signature,
Content: hdr.proof,
})
if err != nil {
log.Error(err)
return err
}
if resp.Error != nil {
log.Error(resp.Error.Message)
return errors.New(resp.Error.Message)
}
ns, err := base64.URLEncoding.DecodeString(nsString)
if err != nil {
log.Error(err)
return err
}
//Signature ok, verify proof
presp, err := wc.wave.VerifyProof(context.Background(), &pb.VerifyProofParams{
ProofDER: hdr.proof,
Subject: hdr.entityHash,
RequiredRTreePolicy: &pb.RTreePolicy{
Namespace: ns,
Statements: []*pb.RTreePolicyStatement{
{
PermissionSet: []byte(XBOSPermissionSet),
Permissions: []string{GRPCCallPermission},
// grpc_package/ServiceName/* (all methods)
// grpc_package/ServiceName/Method1 (only method 1)
Resource: wc.grpcservice, // TODO: replace this with the name, etc of the GRPC service
},
},
},
})
if err != nil {
log.Error(err)
return err
}
if presp.Error != nil {
log.Error(presp.Error.Message)
return errors.New(presp.Error.Message)
}
if !bytes.Equal(presp.Result.Subject, hdr.entityHash) {
log.Error("proof valid")
return errors.New("proof valid but for a different entity")
}
return nil
}
func (wc *WaveCredentials) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
//Generate TLS certificate
cert, cert2 := genCert()
tlsConfig := tls.Config{Certificates: []tls.Certificate{cert}}
conn := tls.Server(rawConn, &tlsConfig)
err := conn.Handshake()
if err != nil {
rawConn.Close()
return nil, nil, err
}
namespace := make([]byte, 34)
_, err = io.ReadFull(conn, namespace)
log.Debug("namespace ", namespace)
if err != nil {
rawConn.Close()
return nil, nil, fmt.Errorf("could not generate header: %v", err)
}
hdr, err := wc.ReadPeerHeader(conn)
if err != nil {
rawConn.Close()
return nil, nil, errors.Wrap(err, "Could not read server header")
}
err = wc.VerifyClientHandshake(wc.namespace, hdr)
if err != nil {
return nil, nil, errors.Wrap(err, "Could not verify server handshake")
}
// if true {
// log.Debug("server read proof size")
// proofSizeBA := make([]byte, 4)
// _, err = io.ReadFull(conn, proofSizeBA)
// if err != nil {
// rawConn.Close()
// return nil, nil, fmt.Errorf("could not read proof: %v\n", err)
// }
// proofSize := binary.LittleEndian.Uint32(proofSizeBA)
// if proofSize > 10*1024*1024 {
// rawConn.Close()
// return nil, nil, fmt.Errorf("bad proof")
// }
// log.Debug("server read proof")
// proof := make([]byte, proofSize)
// _, err = io.ReadFull(conn, proof)
// if err != nil {
// rawConn.Close()
// return nil, nil, fmt.Errorf("could not read proof: %v\n", err)
// }
//
// // verify proof
//
// }
header, err := wc.GeneratePeerHeader(namespace, cert2.Signature)
if err != nil {
rawConn.Close()
return nil, nil, fmt.Errorf("could not generate header: %v", err)
}
_, err = conn.Write(header)
if err != nil {
rawConn.Close()
return nil, nil, err
}
return conn, nil, nil
}
func (wc *WaveCredentials) Info() credentials.ProtocolInfo {
return credentials.ProtocolInfo{
SecurityProtocol: "tls",
SecurityVersion: "1.2",
}
}
func (wc *WaveCredentials) Clone() credentials.TransportCredentials {
return &WaveCredentials{
perspective: wc.perspective,
perspectiveHash: wc.perspectiveHash,
namespace: wc.namespace,
wave: wc.wave,
}
}
func (wc *WaveCredentials) OverrideServerName(name string) error {
return nil
}
//A 34 byte multihash
func (wc *WaveCredentials) GeneratePeerHeader(ns []byte, cert []byte) ([]byte, error) {
hdr := bytes.Buffer{}
if len(wc.perspectiveHash) != 34 {
panic(wc.perspectiveHash)
}
//First: 34 byte entity hash
hdr.Write(wc.perspectiveHash)
//Second: signature of cert
sigresp, err := wc.wave.Sign(context.Background(), &pb.SignParams{
Perspective: wc.perspective,
Content: cert,
})
if err != nil {
return nil, err
}
if sigresp.Error != nil {
return nil, errors.New(sigresp.Error.Message)
}
siglen := make([]byte, 2)
sig := sigresp.Signature
binary.LittleEndian.PutUint16(siglen, uint16(len(sig)))
hdr.Write(siglen)
hdr.Write(sig)
//Third: the namespace proof for this namespace
prooflen := make([]byte, 4)
binary.LittleEndian.PutUint32(prooflen, uint32(len(wc.proof)))
hdr.Write(prooflen)
hdr.Write(wc.proof)
return hdr.Bytes(), nil
}
func genCert() (tls.Certificate, *x509.Certificate) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
log.Fatalf("failed to generate serial number: %s", err)
panic(err)
}
priv, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
panic(err)
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: "wavemq-dr",
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(365 * 24 * time.Hour),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
template.IsCA = true
template.KeyUsage |= x509.KeyUsageCertSign
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
log.Fatalf("Failed to create certificate: %s", err)
panic(err)
}
x509cert, err := x509.ParseCertificate(derBytes)
if err != nil {
log.Fatal(err)
panic(err)
}
keybytes := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)})
certbytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
cert, err := tls.X509KeyPair(certbytes, keybytes)
if err != nil {
panic(err)
}
return cert, x509cert
}
func (wc *WaveCredentials) ReadPeerHeader(conn io.Reader) (clientHeader, error) {
var (
err error
hdr clientHeader
)
entityHashBA := make([]byte, 34)
_, err = io.ReadFull(conn, entityHashBA)
if err != nil {
return hdr, fmt.Errorf("could not read proof: %v\n", err)
}
signatureSizeBA := make([]byte, 2)
_, err = io.ReadFull(conn, signatureSizeBA)
if err != nil {
return hdr, fmt.Errorf("could not read proof: %v\n", err)
}
signatureSize := binary.LittleEndian.Uint16(signatureSizeBA)
signature := make([]byte, signatureSize)
_, err = io.ReadFull(conn, signature)
if err != nil {
return hdr, fmt.Errorf("could not read proof: %v\n", err)
}
proofSizeBA := make([]byte, 4)
_, err = io.ReadFull(conn, proofSizeBA)
if err != nil {
return hdr, fmt.Errorf("could not read proof: %v\n", err)
}
proofSize := binary.LittleEndian.Uint32(proofSizeBA)
if proofSize > 10*1024*1024 {
return hdr, fmt.Errorf("bad proof")
}
log.Debug("server read proof")
proof := make([]byte, proofSize)
_, err = io.ReadFull(conn, proof)
if err != nil {
return hdr, fmt.Errorf("could not read proof: %v\n", err)
}
hdr.entityHash = entityHashBA
hdr.signature = signature
hdr.proof = proof
return hdr, nil
////First verify the signature
//log.Debug("Verify Server handshake")
//err = cc.VerifyServerHandshake(cc.namespace, entityHashBA, signature, proof, cs.PeerCertificates[0].Signature)
//if err != nil {
// return err
//}
}
type clientHeader struct {
entityHash []byte
signature []byte
proof []byte
}
|
package file
import (
"encoding/csv"
"fmt"
"net/http"
)
func Read(w http.ResponseWriter, r *http.Request) (records [][]string) {
file, _, err := r.FormFile("file")
if err != nil {
w.Write([]byte(fmt.Sprintf("error %s", err.Error())))
return
}
defer file.Close()
records, err = csv.NewReader(file).ReadAll()
if err != nil {
w.Write([]byte(fmt.Sprintf("error %s", err.Error())))
return
}
return
}
|
package main
import "fmt"
// 同一个结构体实现多个接口
// 接口嵌套
type animal interface {
mover
eater
}
type mover interface {
move()
}
type eater interface {
eat(string)
}
type cat struct {
name string
feet int8
}
// cat同时实现了move()接口和eat()接口
func (c *cat) move() {
fmt.Println("走猫步~")
}
func (c *cat) eat(food string) {
fmt.Printf("猫吃%s~", food)
}
func main() {
}
|
package server_test
import (
"chlorine/server"
"net/http"
"net/http/httptest"
"testing"
)
func TestMyPlaylistsHandler(t *testing.T) {
req, err := http.NewRequest("GET", "/me/playlists", nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
handler := server.MyPlaylistsHandler{}
handler.ServeHTTP(rr, req)
if rr.Code != http.StatusOK {
t.Errorf("unexpected status code: got %d, expect %d", rr.Code, http.StatusOK)
}
}
|
package cli
import (
"fmt"
"os"
"github.com/ikaven1024/bolt-cli/cli/command"
"github.com/ikaven1024/bolt-cli/cli/framework"
"github.com/ikaven1024/bolt-cli/db"
)
const info = `Welcome to the boltDB monitor.
Type 'help;' or 'h' for help.
Type 'ctrl+C' to clear the current input statement.
Type 'ctrl+C' to exit when empty input statement.
`
func Run(db db.Interface) {
stdout, stderr := os.Stdout, os.Stderr
fmt.Fprintln(stdout, info)
fw := framework.New(db, stdout, stderr)
fw.EnterContext(command.NewRootContext(fw))
fw.Run()
}
|
package proxy
import (
"github.com/colefan/gsgo/gameprotocol/protocol_proxy"
"github.com/colefan/gsgo/netio"
"github.com/colefan/gsgo/netio/packet"
)
//节点服务
//管理与各服务器之间的物理连接;
type NodeService struct {
*netio.Server
netio.DefaultPackDispatcher
}
func NewNodeService() *NodeService {
s := &NodeService{}
s.Server = netio.NewTcpSocketServer()
return s
}
func (n *NodeService) InitService() error {
n.SetListenAddress(ProxyConf.ForwardIp)
n.SetListenPort(uint16(ProxyConf.ForwardPort))
n.SetPackParser(netio.NewDefaultParser())
n.SetPackDispatcher(n)
n.GetPackDispatcher().AddPackEventListener("nodeserver", n)
n.Init(n.GetConfigJson())
go n.Start()
return nil
}
func (n *NodeService) SessionOpen(conn netio.ConnInf) {
//TODO
if conn == nil {
panic("NodeService.SessionOpen(conn) error, conn is nil")
}
conn.SetBsStatus(BS_STATUS_OPENED)
ProxyLog.Info("NodeService received a session: %q", conn.GetRemoteIp())
}
func (n *NodeService) SessionClose(conn netio.ConnInf) {
//TODO
if conn.GetBsStatus() == BS_STATUS_AUTHED {
NodeManagerInst.UnRegNodeConnection(conn.GetConnID())
}
conn.SetBsStatus(BS_STATUS_CLOSED)
}
//收到来自各服务节点的消息
func (n *NodeService) HandleMsg(cmdID uint16, pack *packet.Packet, conn netio.ConnInf) {
if conn == nil {
panic("NodeService.HandleMsg error,conn is nil ")
}
switch conn.GetBsStatus() {
case 0, BS_STATUS_OPENED:
//需要先判断节点是已经通过了验证
if protocol_proxy.CMD_S_P_REG_REQ == cmdID {
node := NewServerNode()
var regReq protocol_proxy.NodeRegReq
regReq.Packet = pack
if !regReq.DecodePacket() {
ProxyLog.Error("invalid CMD_S_S_REG_REQ,Packet decode failed")
conn.Close()
} else {
node.NodeType = regReq.NodeType
node.GameId = regReq.GameId
node.GameAreaId = regReq.GameAreaId
node.GameServerId = regReq.GameServerId
node.GameServerName = regReq.GameServerName
node.GameServerDesc = regReq.GameServerDesc
node.Ip = conn.GetRemoteIp()
nRetCode := NodeManagerInst.RegNodeConnection(node)
if 0 == nRetCode {
node.SetPhysicalLink(conn)
conn.SetBsStatus(BS_STATUS_AUTHED)
} else {
ProxyLog.Info("server node register failed, ip = ", conn.GetRemoteIp(), " NodeType = ", node.NodeType, " IP = ", node.Ip, " errcode = ", nRetCode)
}
resp := protocol_proxy.NodeRegResp{}
resp.Packet = packet.NewEmptyPacket()
resp.Code = uint16(nRetCode)
resp.CmdID = protocol_proxy.CMD_S_P_REG_RESP
buf := resp.EncodePacket(256)
conn.Write(buf.GetData())
}
} else {
//不合法的请求,将其关闭
ProxyLog.Error("invalid request ,cmdid = ", cmdID, " before server node is authed.")
conn.Close()
}
case BS_STATUS_AUTHED:
//请进入转发模式
if pack.FSID == NODE_TYPE_LS {
lsForwarderInst.FowardToClient(cmdID, pack, conn)
} else if pack.FSID == NODE_TYPE_HS {
hsForwarderInst.FowardToClient(cmdID, pack, conn)
} else if pack.FSID == NODE_TYPE_GS {
gsForwarderInst.FowardToClient(cmdID, pack, conn)
} else {
ProxyLog.Error("unknow server node type :", pack.FSID)
}
//fowardrule::: TODO
default:
ProxyLog.Error("unknown server status : ", conn.GetBsStatus())
}
}
|
// Copyright (c) 2020 Doc.ai and/or its affiliates.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fanout
import (
"context"
"fmt"
"net"
"sync"
"testing"
"time"
"github.com/caddyserver/caddy"
"github.com/coredns/coredns/plugin/pkg/dnstest"
"github.com/coredns/coredns/plugin/test"
"github.com/miekg/dns"
)
const testQuery = "example1."
type cachedDNSWriter struct {
answers []*dns.Msg
mutex sync.Mutex
*test.ResponseWriter
}
func (w *cachedDNSWriter) WriteMsg(m *dns.Msg) error {
w.mutex.Lock()
defer w.mutex.Unlock()
w.answers = append(w.answers, m)
return w.ResponseWriter.WriteMsg(m)
}
type server struct {
addr string
inner *dns.Server
}
func (s *server) close() {
logErrIfNotNil(s.inner.Shutdown())
}
func newServer(f dns.HandlerFunc) *server {
ch := make(chan bool)
s := &dns.Server{}
s.Handler = f
for i := 0; i < 10; i++ {
s.Listener, _ = net.Listen(tcp, ":0")
if s.Listener != nil {
break
}
}
if s.Listener == nil {
panic("failed to create new client")
}
s.NotifyStartedFunc = func() { close(ch) }
go func() {
logErrIfNotNil(s.ActivateAndServe())
}()
<-ch
return &server{inner: s, addr: s.Listener.Addr().String()}
}
func makeRecordA(rr string) *dns.A {
r, _ := dns.NewRR(rr)
return r.(*dns.A)
}
func TestFanoutCanReturnUnsuccessRespnse(t *testing.T) {
s := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
msg := nxdomainMsg()
msg.SetRcode(r, msg.Rcode)
logErrIfNotNil(w.WriteMsg(msg))
})
f := New()
f.from = "."
c := NewClient(s.addr, "tcp")
f.addClient(c)
req := new(dns.Msg)
req.SetQuestion(testQuery, dns.TypeA)
writer := &cachedDNSWriter{ResponseWriter: new(test.ResponseWriter)}
_, err := f.ServeDNS(context.TODO(), writer, req)
if err != nil {
t.Fatal(err)
}
if len(writer.answers) != 1 {
t.FailNow()
}
if writer.answers[0].MsgHdr.Rcode != dns.RcodeNameError {
t.Error("fanout plugin returns first negative answer if other answers on request are negative")
}
}
func TestFanoutTwoServersNotSuccessResponse(t *testing.T) {
rcode := 1
rcodeMutex := sync.Mutex{}
s1 := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
if r.Question[0].Name == testQuery {
msg := nxdomainMsg()
rcodeMutex.Lock()
msg.SetRcode(r, rcode)
rcode++
rcode %= dns.RcodeNotZone
rcodeMutex.Unlock()
logErrIfNotNil(w.WriteMsg(msg))
}
})
s2 := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
if r.Question[0].Name == testQuery {
msg := dns.Msg{
Answer: []dns.RR{makeRecordA("example1. 3600 IN A 10.0.0.1")},
}
msg.SetReply(r)
logErrIfNotNil(w.WriteMsg(&msg))
}
})
defer s1.close()
defer s2.close()
c1 := NewClient(s1.addr, "tcp")
c2 := NewClient(s2.addr, "tcp")
f := New()
f.from = "."
f.addClient(c1)
f.addClient(c2)
writer := &cachedDNSWriter{ResponseWriter: new(test.ResponseWriter)}
for i := 0; i < 10; i++ {
req := new(dns.Msg)
req.SetQuestion(testQuery, dns.TypeA)
_, err := f.ServeDNS(context.TODO(), writer, req)
if err != nil {
t.Fatal(err.Error())
}
}
for _, m := range writer.answers {
if m.MsgHdr.Rcode != dns.RcodeSuccess {
t.Error("fanout should return only positive answers")
}
}
}
func TestFanoutTwoServers(t *testing.T) {
const expected = 1
var mutex sync.Mutex
answerCount1 := 0
answerCount2 := 0
s1 := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
if r.Question[0].Name == testQuery {
msg := dns.Msg{
Answer: []dns.RR{makeRecordA("example1 3600 IN A 10.0.0.1")},
}
mutex.Lock()
answerCount1++
mutex.Unlock()
msg.SetReply(r)
logErrIfNotNil(w.WriteMsg(&msg))
}
})
s2 := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
if r.Question[0].Name == "example2." {
msg := dns.Msg{
Answer: []dns.RR{makeRecordA("example2. 3600 IN A 10.0.0.1")},
}
mutex.Lock()
answerCount2++
mutex.Unlock()
msg.SetReply(r)
logErrIfNotNil(w.WriteMsg(&msg))
}
})
defer s1.close()
defer s2.close()
c1 := NewClient(s1.addr, "tcp")
c2 := NewClient(s2.addr, "tcp")
f := New()
f.from = "."
f.addClient(c1)
f.addClient(c2)
req := new(dns.Msg)
req.SetQuestion(testQuery, dns.TypeA)
_, err := f.ServeDNS(context.TODO(), &test.ResponseWriter{}, req)
if err != nil {
t.Fatal(err.Error())
}
<-time.After(time.Second)
req = new(dns.Msg)
req.SetQuestion("example2.", dns.TypeA)
_, err = f.ServeDNS(context.TODO(), &test.ResponseWriter{}, req)
if err != nil {
t.Fatal(err.Error())
}
mutex.Lock()
defer mutex.Unlock()
if answerCount2 != expected || answerCount1 != expected {
t.Errorf("Expected number of health checks to be %d, got s1: %d, s2: %d", expected, answerCount1, answerCount2)
}
}
func TestFanouWorkerCountLessThenServers(t *testing.T) {
const expected = 1
answerCount1 := 0
var mutex sync.Mutex
var closeFuncs []func()
free := func() {
for _, f := range closeFuncs {
f()
}
}
defer free()
f := New()
f.from = "."
for i := 0; i < 4; i++ {
incorrectServer := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
})
f.addClient(NewClient(incorrectServer.addr, "tcp"))
closeFuncs = append(closeFuncs, incorrectServer.close)
}
correctServer := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
if r.Question[0].Name == testQuery {
msg := dns.Msg{
Answer: []dns.RR{makeRecordA("example1 3600 IN A 10.0.0.1")},
}
mutex.Lock()
answerCount1++
mutex.Unlock()
msg.SetReply(r)
logErrIfNotNil(w.WriteMsg(&msg))
}
})
f.addClient(NewClient(correctServer.addr, "tcp"))
f.workerCount = 1
req := new(dns.Msg)
req.SetQuestion(testQuery, dns.TypeA)
_, err := f.ServeDNS(context.TODO(), &test.ResponseWriter{}, req)
if err != nil {
t.FailNow()
}
<-time.After(time.Second)
mutex.Lock()
defer mutex.Unlock()
if answerCount1 != expected {
t.Errorf("Expected number of health checks to be %d, got : %d,", expected, answerCount1)
}
}
func TestFanout(t *testing.T) {
s := newServer(func(w dns.ResponseWriter, r *dns.Msg) {
ret := new(dns.Msg)
ret.SetReply(r)
ret.Answer = append(ret.Answer, test.A("example.org. IN A 127.0.0.1"))
logErrIfNotNil(w.WriteMsg(ret))
})
defer s.close()
source := `fanout . %v {
NETWORK tcp
}`
c := caddy.NewTestController("dns", fmt.Sprintf(source, s.addr))
f, err := parseFanout(c)
if err != nil {
t.Fatalf("Failed to create fanout: %s", err)
}
err = f.OnStartup()
if err != nil {
t.Fatal(err.Error())
}
defer func() {
logErrIfNotNil(f.OnShutdown())
}()
m := new(dns.Msg)
m.SetQuestion("example.org.", dns.TypeA)
rec := dnstest.NewRecorder(&test.ResponseWriter{})
if _, err := f.ServeDNS(context.TODO(), rec, m); err != nil {
t.Fatal("Expected to receive reply, but didn't")
}
if x := rec.Msg.Answer[0].Header().Name; x != "example.org." {
t.Errorf("Expected %s, got %s", "example.org.", x)
}
}
func nxdomainMsg() *dns.Msg {
return &dns.Msg{MsgHdr: dns.MsgHdr{Rcode: dns.RcodeNameError},
Question: []dns.Question{{Name: "wwww.example1.", Qclass: dns.ClassINET, Qtype: dns.TypeTXT}},
Ns: []dns.RR{test.SOA("example1. 1800 IN SOA example1.net. example1.com 1461471181 14400 3600 604800 14400")},
}
}
|
package response
import (
"encoding/xml"
)
type Response struct {
ToUserName string `xml:"ToUserName"`
FromUserName string `xml:"FromUserName"`
CreateTime int `xml:"CreateTime"`
MsgType string `xml:"MsgType"`
XMLName xml.Name `xml:"xml"`
}
func NewResponse(msgType string) Response {
r := Response{
MsgType: msgType,
}
return r
}
func ResponseToString(resp interface{}) (string, error) {
res, err := xml.Marshal(resp)
if err != nil {
return "", err
}
return string(res), nil
}
|
package experiment
import (
"context"
"errors"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestExperimentRun(t *testing.T) {
t.Run("success experiment", func(t *testing.T) {
testName := "test-success"
wg := sync.WaitGroup{}
wg.Add(1)
ref := func(ctx context.Context) (interface{}, error) {
return "success", nil
}
exp := func(ctx context.Context) (interface{}, error) {
wg.Done()
return "success", nil
}
wg.Add(1)
validator := MockValidator{ValidateMock: func(ref interface{}, experiment interface{}) error {
wg.Done()
return nil
}}
wg.Add(1)
reporter := NewMockReporter(&wg)
experiment := NewExperiment(testName, ref, exp, validator, reporter)
_, err := experiment.Run(context.Background())
assert.NoError(t, err)
wg.Wait()
assert.Len(t, reporter.failures, 0)
assert.Len(t, reporter.errs, 0)
assert.Len(t, reporter.successes, 1)
})
t.Run("validation fails", func(t *testing.T) {
testName := "test-validation-fails"
wg := sync.WaitGroup{}
wg.Add(1)
ref := func(ctx context.Context) (interface{}, error) {
return "success", nil
}
exp := func(ctx context.Context) (interface{}, error) {
wg.Done()
return "success", nil
}
wg.Add(1)
validator := MockValidator{ValidateMock: func(ref interface{}, experiment interface{}) error {
wg.Done()
return errors.New("no watch")
}}
wg.Add(1)
reporter := NewMockReporter(&wg)
experiment := NewExperiment(testName, ref, exp, validator, reporter)
_, err := experiment.Run(context.Background())
assert.NoError(t, err)
wg.Wait()
assert.Len(t, reporter.failures, 1)
assert.Len(t, reporter.errs, 0)
assert.Len(t, reporter.successes, 0)
})
t.Run("experiment error", func(t *testing.T) {
testName := "test-experiment-error"
wg := sync.WaitGroup{}
wg.Add(1)
ref := func(ctx context.Context) (interface{}, error) {
return "success", nil
}
exp := func(ctx context.Context) (interface{}, error) {
wg.Done()
return nil, errors.New("failed")
}
wg.Add(1)
reporter := NewMockReporter(&wg)
experiment := NewExperiment(testName, ref, exp, nil, reporter)
_, err := experiment.Run(context.Background())
assert.NoError(t, err)
wg.Wait()
assert.Len(t, reporter.failures, 0)
assert.Len(t, reporter.errs, 1)
assert.Len(t, reporter.successes, 0)
})
t.Run("ref error", func(t *testing.T) {
testName := "test-ref-error"
wg := sync.WaitGroup{}
wg.Add(1)
ref := func(ctx context.Context) (interface{}, error) {
return nil, errors.New("failed")
}
exp := func(ctx context.Context) (interface{}, error) {
wg.Done()
return "success", nil
}
wg.Add(1)
reporter := NewMockReporter(&wg)
experiment := NewExperiment(testName, ref, exp, nil, reporter)
_, err := experiment.Run(context.Background())
assert.Error(t, err)
wg.Wait()
assert.Len(t, reporter.failures, 0)
assert.Len(t, reporter.errs, 1)
assert.Len(t, reporter.successes, 0)
})
}
type MockValidator struct {
ValidateMock func(ref interface{}, experiment interface{}) error
}
func (m MockValidator) Validate(ref interface{}, experiment interface{}) error {
return m.ValidateMock(ref, experiment)
}
func NewMockReporter(wg *sync.WaitGroup) MockReporter {
return MockReporter{
errs: make(map[string]int),
successes: make(map[string]int),
failures: make(map[string]int),
waitGroup: wg,
}
}
type MockReporter struct {
waitGroup *sync.WaitGroup
successes map[string]int
failures map[string]int
errs map[string]int
}
func (t MockReporter) Success(_ context.Context, named string, _ string, _ time.Duration, _ time.Duration) {
t.successes[named] = t.successes[named] + 1
t.waitGroup.Done()
}
// Failure reports an experiment failure (in which the validation failed)
func (t MockReporter) Failure(_ context.Context, named string, _ string, _ error, _ time.Duration, _ time.Duration) {
t.failures[named] = t.failures[named] + 1
t.waitGroup.Done()
}
// Error reports an error occurring during the experiment
func (t MockReporter) Error(_ context.Context, named string, _ string, _ error, _ time.Duration, _ time.Duration) {
t.errs[named] = t.errs[named] + 1
t.waitGroup.Done()
}
|
package service
import "context"
// ReadOnlyDB used to get database object from any database implementation.
// For consistency reason both TransactionDB and ReadOnlyDB will seek database object under the context params
type ReadOnlyDB interface {
GetDatabase(ctx context.Context) (context.Context, error)
}
// ReadOnly is helper function that simplify the readonly db
func ReadOnly(ctx context.Context, trx ReadOnlyDB, trxFunc func(dbCtx context.Context) error) error {
dbCtx, err := trx.GetDatabase(ctx)
if err != nil {
return err
}
return trxFunc(dbCtx)
}
|
package main
//go:generate protoc --go-grpc_out=require_unimplemented_servers=false:./grpcapi --go_out=./grpcapi subscribe.proto
import (
"context"
"demo/grpcapi"
"flag"
"log"
"net/http"
"time"
"github.com/gorilla/websocket"
"google.golang.org/grpc"
)
var (
ip = flag.String("ip", "backend", "Backend server IP that we will subscribe to")
upgrader = websocket.Upgrader{} // use default options
)
func main() {
flag.Parse()
conn, err := grpc.Dial(*ip+":8081", grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
grpc := grpcapi.NewSubscriberClient(conn)
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
res, err := grpc.Subscribe(ctx, &grpcapi.SubscribeRequest{
ID: "DEMO_1",
Observable: "NBA",
})
if err != nil {
log.Printf("could not subscribe: %v\n", err)
}
log.Printf("response -> success: %t | message: %s", res.GetSuccess(), res.GetMessage())
http.HandleFunc("/notify", message)
err = http.ListenAndServe("0.0.0.0:8002", nil)
if err != nil {
log.Fatal(err)
}
}
func message(w http.ResponseWriter, r *http.Request) {
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Print("upgrade:", err)
return
}
defer c.Close()
for {
mt, message, err := c.ReadMessage()
if err != nil {
log.Println("read:", err)
break
}
log.Printf("recv: %s", message)
err = c.WriteMessage(mt, message)
if err != nil {
log.Println("write:", err)
break
}
}
}
|
package main
import (
corev1 "k8s.io/api/core/v1"
"log"
)
type patches []patchOperation
func (p patches) patchReport() {
log.Printf("--------------APPLYING PATCHES ARE----------------------")
for _, patch := range p {
log.Printf("Operation: %s \n", patch.Op)
log.Printf("Path: %s \n", patch.Path)
log.Printf("Value: %s \n", patch.Value)
}
log.Printf("--------------------------------------------------------")
}
func (p *patches) addVolumes(pod *corev1.Pod, volumes []corev1.Volume) {
first := len(pod.Spec.Volumes) == 0
path := "/spec/volumes"
var value interface{}
for _, v := range volumes {
value = v
tempPath := path
if first {
first = false
value = []corev1.Volume{v}
} else {
tempPath = path + "/-"
}
*p = append(*p, patchOperation{
Op: "add",
Path: tempPath,
Value: value,
})
}
}
func (p *patches) addContainers(pod *corev1.Pod, containers []corev1.Container) {
first := len(pod.Spec.Containers) == 0
path := "/spec/containers"
var value interface{}
for _, c := range containers {
value = c
tempPath := path
if first {
first = false
value = []corev1.Container{c}
} else {
tempPath = path + "/-"
}
*p = append(*p, patchOperation{
Op: "add",
Path: tempPath,
Value: value,
})
}
}
func (p *patches) addInitContainers(pod *corev1.Pod, containers []corev1.Container) {
first := len(pod.Spec.InitContainers) == 0
path := "/spec/initContainers"
var value interface{}
for _, c := range containers {
value = c
tempPath := path
if first {
first = false
value = []corev1.Container{c}
} else {
tempPath = path + "/-"
}
*p = append(*p, patchOperation{
Op: "add",
Path: tempPath,
Value: value,
})
}
}
func (p *patches) addVolumeMounts(pod *corev1.Pod, vms []corev1.VolumeMount) {
first := len(pod.Spec.Containers[0].VolumeMounts) == 0
path := "/spec/containers/0/volumeMounts"
var value interface{}
for _, vm := range vms {
value = vm
tempPath := path
if first {
first = false
value = []corev1.VolumeMount{vm}
} else {
tempPath = path + "/-"
}
*p = append(*p, patchOperation{
Op: "add",
Path: tempPath,
Value: value,
})
}
}
|
package postgresql
import(
"testing"
"context"
"github.com/Mindslave/skade/backend/internal/entities"
"github.com/stretchr/testify/require"
)
func TestStoreFile(t *testing.T) {
arg := entities.DbStoreFileParams {
Filename: "testfile",
Filesize: 100,
FileExtension: "exe",
}
err := testrepo.StoreFile(context.Background(), arg)
require.NoError(t, err)
}
|
package rest
import (
"encoding/json"
"fmt"
"log"
"net/http"
"strconv"
"github.com/brigadecore/brigade/v2/apiserver/internal/api"
"github.com/brigadecore/brigade/v2/apiserver/internal/lib/restmachinery"
"github.com/brigadecore/brigade/v2/apiserver/internal/meta"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/xeipuuv/gojsonschema"
)
// WorkersEndpoints implements restmachinery.Endpoints to provide Worker-related
// URL --> action mappings to a restmachinery.Server.
type WorkersEndpoints struct {
AuthFilter restmachinery.Filter
WorkerStatusSchemaLoader gojsonschema.JSONLoader
Service api.WorkersService
}
// Register is invoked by restmachinery.Server to register Worker-related URL
// --> action mappings to a restmachinery.Server.
func (w *WorkersEndpoints) Register(router *mux.Router) {
// Start worker
router.HandleFunc(
"/v2/events/{eventID}/worker/start",
w.AuthFilter.Decorate(w.start),
).Methods(http.MethodPut)
// Get/stream worker status
router.HandleFunc(
"/v2/events/{eventID}/worker/status",
w.AuthFilter.Decorate(w.getOrStreamStatus),
).Methods(http.MethodGet)
// Update worker status
router.HandleFunc(
"/v2/events/{eventID}/worker/status",
w.AuthFilter.Decorate(w.updateStatus),
).Methods(http.MethodPut)
// Clean up a worker
router.HandleFunc(
"/v2/events/{eventID}/worker/cleanup",
w.AuthFilter.Decorate(w.cleanup),
).Methods(http.MethodPut)
// Timeout a worker
router.HandleFunc(
"/v2/events/{eventID}/worker/timeout",
w.AuthFilter.Decorate(w.timeout),
).Methods(http.MethodPut)
}
func (w *WorkersEndpoints) start(wr http.ResponseWriter, r *http.Request) {
restmachinery.ServeRequest(
restmachinery.InboundRequest{
W: wr,
R: r,
EndpointLogic: func() (interface{}, error) {
return nil, w.Service.Start(r.Context(), mux.Vars(r)["eventID"])
},
SuccessCode: http.StatusOK,
},
)
}
func (w *WorkersEndpoints) getOrStreamStatus(
wr http.ResponseWriter,
r *http.Request,
) {
eventID := mux.Vars(r)["eventID"]
// nolint: errcheck
watch, _ := strconv.ParseBool(r.URL.Query().Get("watch"))
// Clients can request use of the SSE protocol instead of HTTP/2 streaming.
// Not every potential client language has equally good support for both of
// those, so allowing clients to pick is useful.
sse, _ := strconv.ParseBool(r.URL.Query().Get("sse")) // nolint: errcheck
if !watch {
restmachinery.ServeRequest(
restmachinery.InboundRequest{
W: wr,
R: r,
EndpointLogic: func() (interface{}, error) {
return w.Service.GetStatus(r.Context(), eventID)
},
SuccessCode: http.StatusOK,
},
)
return
}
statusCh, err := w.Service.WatchStatus(r.Context(), eventID)
if err != nil {
if _, ok := errors.Cause(err).(*meta.ErrNotFound); ok {
restmachinery.WriteAPIResponse(wr, http.StatusNotFound, errors.Cause(err))
return
}
log.Printf(
"error retrieving worker status stream for event %q: %s",
eventID,
err,
)
restmachinery.WriteAPIResponse(
wr,
http.StatusInternalServerError,
&meta.ErrInternalServer{},
)
return
}
wr.Header().Set("Content-Type", "text/event-stream")
// This can't not be a http.Flusher
flusher := wr.(http.Flusher) // nolint: forcetypeassert
flusher.Flush()
for status := range statusCh {
statusBytes, err := json.Marshal(status)
if err != nil {
log.Println(errors.Wrapf(err, "error marshaling worker status"))
return
}
if sse {
fmt.Fprintf(wr, "event: message\ndata: %s\n\n", string(statusBytes))
} else {
fmt.Fprint(wr, string(statusBytes))
}
flusher.Flush()
if status.Phase.IsTerminal() {
// If we're using SSE, we'll explicitly send an event that denotes the end
// of the stream.
if sse {
fmt.Fprintf(wr, "event: done\ndata: done\n\n")
flusher.Flush()
}
return
}
}
}
func (w *WorkersEndpoints) updateStatus(
wr http.ResponseWriter,
r *http.Request,
) {
status := api.WorkerStatus{}
restmachinery.ServeRequest(
restmachinery.InboundRequest{
W: wr,
R: r,
ReqBodySchemaLoader: w.WorkerStatusSchemaLoader,
ReqBodyObj: &status,
EndpointLogic: func() (interface{}, error) {
return nil,
w.Service.UpdateStatus(r.Context(), mux.Vars(r)["eventID"], status)
},
SuccessCode: http.StatusOK,
},
)
}
func (w *WorkersEndpoints) cleanup(
wr http.ResponseWriter,
r *http.Request,
) {
restmachinery.ServeRequest(
restmachinery.InboundRequest{
W: wr,
R: r,
EndpointLogic: func() (interface{}, error) {
return nil,
w.Service.Cleanup(r.Context(), mux.Vars(r)["eventID"])
},
SuccessCode: http.StatusOK,
},
)
}
func (w *WorkersEndpoints) timeout(
wr http.ResponseWriter,
r *http.Request,
) {
restmachinery.ServeRequest(
restmachinery.InboundRequest{
W: wr,
R: r,
EndpointLogic: func() (interface{}, error) {
return nil,
w.Service.Timeout(r.Context(), mux.Vars(r)["eventID"])
},
SuccessCode: http.StatusOK,
},
)
}
|
package dao
import (
"bytes"
"fmt"
"ssq-spider/logger"
"ssq-spider/model"
"strings"
)
const (
sysConfigTable = "sys_config_t"
ssqNumberTable = "ssq_number_t"
)
func GetOneSysConfig(typeId string, name string) (string, error) {
db, err := NewMysqlDBClient()
if err != nil {
logger.Logger.Error("NewMysqlDBClient error: ", err)
return "", err
}
var sysConfig model.SysConfig
if err := db.Table(sysConfigTable).Where("type_id=? and name=?",
typeId, name).First(&sysConfig).Error; err != nil {
logger.Logger.Error("query one sys config error:", err)
return "", err
}
return sysConfig.Value, nil
}
func UpdateOneSysConfig(typeId string, name string, value string) error {
db, err := NewMysqlDBClient()
if err != nil {
logger.Logger.Error("NewMysqlDBClient error: ", err)
return err
}
return db.Table(sysConfigTable).Where("type_id=? and name=?",
typeId, name).UpdateColumn("value", value).Error
}
func BulkSaveSSQOpenNumbers(ssqOpenNums *[]model.SSQOpenNumber) error {
if len(*ssqOpenNums) == 0 {
return nil
}
db, err := NewMysqlDBClient()
if err != nil {
logger.Logger.Error("NewMysqlDBClient error: ", err)
return err
}
fields := []string{"open_no", "red_num", "blue_num", "ball_sort"}
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("insert into %s (%s) values ", ssqNumberTable, strings.Join(fields, ",")))
dataFormat := "(\"%s\", \"%s\", \"%s\", \"%s\")"
for _, ssqNum := range *ssqOpenNums {
buffer.WriteString(fmt.Sprintf(dataFormat,
ssqNum.OpenNo,
ssqNum.RedNum,
ssqNum.BlueNum,
ssqNum.BallSort))
buffer.WriteString(",")
}
SQL := buffer.String()[:len(buffer.String())-1] + ";"
return db.Exec(SQL).Error
}
|
package coordinator
import (
"errors"
"fmt"
. "github.com/fmstephe/matching_engine/msg"
"runtime"
"testing"
)
type chanWriter struct {
out chan *RMessage
}
func newChanWriter(out chan *RMessage) *chanWriter {
return &chanWriter{out: out}
}
func (c chanWriter) Write(b []byte) (int, error) {
r := &RMessage{}
r.Unmarshal(b)
c.out <- r
return len(b), nil
}
func (c chanWriter) Close() error {
return nil
}
func startMockedResponder() (fromApp chan *Message, fromListener chan *RMessage, out chan *RMessage) {
fromApp = make(chan *Message, 100)
fromListener = make(chan *RMessage, 100)
out = make(chan *RMessage, 100)
w := newChanWriter(out)
originId := uint32(1)
r := newReliableResponder(w, fromApp, fromListener, "Mocked Responder", originId, false)
go r.Run()
return fromApp, fromListener, out
}
func TestAppMsgWrittenOut(t *testing.T) {
fromApp, _, out := startMockedResponder()
m := &Message{Kind: BUY, TraderId: 1, TradeId: 1, StockId: 1, Price: 1, Amount: 1}
rm := &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: *m}
fromApp <- m
validateRMsg(t, <-out, rm, 1)
m1 := &Message{Kind: SELL, TraderId: 2, TradeId: 2, StockId: 2, Price: 2, Amount: 2}
rm1 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 2, message: *m1}
fromApp <- m1
validateRMsg(t, <-out, rm1, 1)
m2 := &Message{Kind: PARTIAL, TraderId: 3, TradeId: 3, StockId: 3, Price: 3, Amount: 3}
rm2 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 3, message: *m2}
fromApp <- m2
validateRMsg(t, <-out, rm2, 1)
m3 := &Message{Kind: FULL, TraderId: 4, TradeId: 4, StockId: 4, Price: 4, Amount: 4}
rm3 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 4, message: *m3}
fromApp <- m3
validateRMsg(t, <-out, rm3, 1)
}
func TestOutAckWrittenOut(t *testing.T) {
_, fromListener, out := startMockedResponder()
a := &RMessage{route: ACK, direction: OUT, originId: 2, msgId: 10, message: Message{Kind: BUY, TraderId: 1, TradeId: 1, StockId: 1, Price: 1, Amount: 1}}
rm := &RMessage{}
*rm = *a
rm.direction = IN
rm.originId = 1
rm.msgId = 1
fromListener <- a
validateRMsg(t, <-out, rm, 1)
a1 := &RMessage{route: ACK, direction: OUT, originId: 2, msgId: 11, message: Message{Kind: BUY, TraderId: 2, TradeId: 2, StockId: 2, Price: 2, Amount: 2}}
rm1 := &RMessage{}
*rm1 = *a1
rm1.direction = IN
rm1.originId = 1
rm1.msgId = 2
fromListener <- a1
validateRMsg(t, <-out, rm1, 1)
a2 := &RMessage{route: ACK, direction: OUT, originId: 3, msgId: 12, message: Message{Kind: BUY, TraderId: 3, TradeId: 3, StockId: 3, Price: 3, Amount: 3}}
rm2 := &RMessage{}
*rm2 = *a2
rm2.direction = IN
rm2.originId = 1
rm2.msgId = 3
fromListener <- a2
validateRMsg(t, <-out, rm2, 1)
a3 := &RMessage{route: ACK, direction: OUT, originId: 4, msgId: 13, message: Message{Kind: BUY, TraderId: 4, TradeId: 4, StockId: 4, Price: 4, Amount: 4}}
rm3 := &RMessage{}
*rm3 = *a3
rm3.direction = IN
rm3.originId = 1
rm3.msgId = 4
fromListener <- a3
validateRMsg(t, <-out, rm3, 1)
}
// Two response messages with the same traderId/tradeId should both be resent (until acked)
// When this test was written the CANCELLED message would overwrite the PARTIAL, and only the CANCELLED would be resent
func TestServerAckNotOverwrittenByCancel(t *testing.T) {
out := make(chan *RMessage, 100)
w := chanWriter{out}
r := &reliableResponder{writer: w, unacked: newSet()}
p := &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: PARTIAL, TraderId: 10, TradeId: 43, StockId: 1, Price: 1, Amount: 1}}
c := &RMessage{route: APP, direction: IN, originId: 1, msgId: 2, message: Message{Kind: CANCELLED, TraderId: 10, TradeId: 43, StockId: 1, Price: 1, Amount: 1}}
// Add PARTIAL to unacked list
r.addToUnacked(p)
r.resend()
allResent(t, out, p)
// Add CANCEL to unacked list
r.addToUnacked(c)
r.resend()
allResent(t, out, p, c)
}
func TestUnackedInDetail(t *testing.T) {
out := make(chan *RMessage, 100)
w := chanWriter{out}
r := &reliableResponder{writer: w, unacked: newSet()}
// Pre-canned message/ack pairs
m1 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: FULL, TraderId: 10, TradeId: 43, StockId: 1, Price: 1, Amount: 1}}
a1 := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 1, message: Message{Kind: FULL, TraderId: 10, TradeId: 43, StockId: 1, Price: 1, Amount: 1}}
m2 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 2, message: Message{Kind: FULL, TraderId: 123, TradeId: 2000, StockId: 1, Price: 1, Amount: 1}}
a2 := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 2, message: Message{Kind: FULL, TraderId: 123, TradeId: 2000, StockId: 1, Price: 1, Amount: 1}}
m3 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 3, message: Message{Kind: FULL, TraderId: 777, TradeId: 5432, StockId: 1, Price: 1, Amount: 1}}
a3 := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 3, message: Message{Kind: FULL, TraderId: 777, TradeId: 5432, StockId: 1, Price: 1, Amount: 1}}
m4 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 4, message: Message{Kind: FULL, TraderId: 371, TradeId: 999, StockId: 1, Price: 1, Amount: 1}}
a4 := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 4, message: Message{Kind: FULL, TraderId: 371, TradeId: 999, StockId: 1, Price: 1, Amount: 1}}
m5 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 5, message: Message{Kind: FULL, TraderId: 87, TradeId: 50, StockId: 1, Price: 1, Amount: 1}}
a5 := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 5, message: Message{Kind: FULL, TraderId: 87, TradeId: 50, StockId: 1, Price: 1, Amount: 1}}
m6 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 6, message: Message{Kind: FULL, TraderId: 40, TradeId: 499, StockId: 1, Price: 1, Amount: 1}}
a6 := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 6, message: Message{Kind: FULL, TraderId: 40, TradeId: 499, StockId: 1, Price: 1, Amount: 1}}
m7 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 7, message: Message{Kind: FULL, TraderId: 99, TradeId: 700000, StockId: 1, Price: 1, Amount: 1}}
a7 := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 7, message: Message{Kind: FULL, TraderId: 99, TradeId: 700000, StockId: 1, Price: 1, Amount: 1}}
aUnkown := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 8, message: Message{Kind: FULL, TraderId: 1, TradeId: 1, StockId: 1, Price: 1, Amount: 1}}
// Add m1-5 to unacked list
r.addToUnacked(m1)
r.resend()
allResent(t, out, m1)
r.resend()
allResent(t, out, m1)
r.addToUnacked(m2)
r.resend()
allResent(t, out, m1, m2)
r.addToUnacked(m3)
r.resend()
allResent(t, out, m1, m2, m3)
r.addToUnacked(m4)
r.resend()
allResent(t, out, m1, m2, m3, m4)
r.addToUnacked(m5)
r.resend()
allResent(t, out, m1, m2, m3, m4, m5)
// ack m3
r.handleInAck(a3)
r.resend()
allResent(t, out, m1, m2, m4, m5)
// ack m1
r.handleInAck(a1)
r.resend()
allResent(t, out, m2, m4, m5)
// ack unknown
r.handleInAck(aUnkown)
r.resend()
allResent(t, out, m2, m4, m5)
// Add m6
r.addToUnacked(m6)
r.resend()
allResent(t, out, m2, m4, m5, m6)
// ack m4 and m6, and add m7
r.handleInAck(a4)
r.handleInAck(a6)
r.addToUnacked(m7)
r.resend()
allResent(t, out, m2, m5, m7)
// ack m2, m5 and m7
r.handleInAck(a2)
r.handleInAck(a5)
r.handleInAck(a7)
r.resend()
if len(out) != 0 {
t.Errorf("Expecting no messages re-sent, found %d", len(out))
}
}
func allResent(t *testing.T, out chan *RMessage, expect ...*RMessage) {
received := make([]*RMessage, 0)
for len(out) > 0 {
received = append(received, <-out)
}
if len(received) != len(expect) {
_, fname, lnum, _ := runtime.Caller(1)
t.Errorf("Expecting %d messages, received %d\n%s:%d", len(expect), len(received), fname, lnum)
}
allOfIn(t, expect, received)
allOfIn(t, received, expect)
}
func allOfIn(t *testing.T, first, second []*RMessage) {
for _, f := range first {
found := false
for _, s := range second {
if *f == *s {
found = true
break
}
}
if !found {
_, fname, lnum, _ := runtime.Caller(2)
t.Errorf("Expecting %v, not found\n%s:%d", f, fname, lnum)
}
}
}
type chanReader struct {
in chan *RMessage
readErrors []bool
readSizes []int
}
func newChanReader(in chan *RMessage, readErrors []bool, readSizes []int) *chanReader {
return &chanReader{in: in, readErrors: readErrors, readSizes: readSizes}
}
func (r *chanReader) Read(b []byte) (int, error) {
bb := b[:r.readSize()]
m := <-r.in
m.Marshal(bb)
if r.readError() {
return 0, errors.New("fake error")
}
return len(bb), nil
}
func (r *chanReader) readSize() int {
var bytes = r.readSizes[0]
if len(r.readSizes) > 1 {
r.readSizes = r.readSizes[1:]
}
if bytes > rmsgByteSize || bytes < 0 {
panic(fmt.Sprintf("Illegal readSizes value (%d). Must be between 0 and %d", bytes, rmsgByteSize))
}
return bytes
}
func (r *chanReader) readError() bool {
var err = r.readErrors[0]
if len(r.readErrors) > 1 {
r.readErrors = r.readErrors[1:]
}
return err
}
func (r *chanReader) Close() error {
return nil
}
func startMockedListener() (in chan *RMessage, outApp chan *Message, outResponder chan *RMessage) {
return startMockedListenerFaulty([]bool{false}, []int{rmsgByteSize})
}
func startMockedListenerFaulty(shouldErr []bool, readN []int) (in chan *RMessage, outApp chan *Message, outResponder chan *RMessage) {
in = make(chan *RMessage, 100)
outApp = make(chan *Message, 100)
outResponder = make(chan *RMessage, 100)
r := newChanReader(in, shouldErr, readN)
originId := uint32(1)
l := newReliableListener(r, outApp, outResponder, "Mocked Listener", originId, false)
go l.Run()
return in, outApp, outResponder
}
func TestSmallReadError(t *testing.T) {
in, outApp, outResponder := startMockedListenerFaulty([]bool{false}, []int{rmsgByteSize - 1, rmsgByteSize})
rmSmall := &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
rm := &RMessage{route: APP, direction: IN, originId: 1, msgId: 2, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 1, TradeId: 2, StockId: 1}}
in <- rmSmall
in <- rm
// Expected server ack only for rm
a := &RMessage{}
a.WriteAckFor(rm)
// Expect app to receive rm.message
m := &Message{}
*m = rm.message
validateRMsg(t, <-outResponder, a, 1)
validateMsg(t, <-outApp, m, 1)
}
func TestReadError(t *testing.T) {
in, outApp, outResponder := startMockedListenerFaulty([]bool{true, false}, []int{rmsgByteSize})
rmBroken := &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
rm := &RMessage{route: APP, direction: IN, originId: 1, msgId: 2, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 1, TradeId: 2, StockId: 1}}
in <- rmBroken
in <- rm
// Expected server ack only for rm
a := &RMessage{}
a.WriteAckFor(rm)
// Expect app to receive rm.message
m := &Message{}
*m = rm.message
validateRMsg(t, <-outResponder, a, 1)
validateMsg(t, <-outApp, m, 1)
}
func TestDuplicate(t *testing.T) {
in, outApp, outResponder := startMockedListener()
m := &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
in <- m
in <- m
// Expected server ack
a := &RMessage{}
a.WriteAckFor(m)
// Expected app msgs
am := &Message{}
*am = m.message
// Expect an ack for both messages but the message is only forwarded on once
validateRMsg(t, <-outResponder, a, 1)
validateMsg(t, <-outApp, am, 1)
validateRMsg(t, <-outResponder, a, 1)
m2 := &RMessage{route: APP, direction: IN, originId: 1, msgId: 2, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 2, TradeId: 1, StockId: 1}}
in <- m2
// Expected server ack 2
a2 := &RMessage{}
a2.WriteAckFor(m2)
// Expected app msg
am2 := &Message{}
*am2 = m2.message
// An ack for m2 and m2 (but nothing relating to m)
validateRMsg(t, <-outResponder, a2, 1)
validateMsg(t, <-outApp, am2, 1)
}
// Test ACK sent in, twice, expect ACK both times
func TestDuplicateAck(t *testing.T) {
in, _, outResponder := startMockedListener()
m := &RMessage{route: ACK, direction: IN, originId: 1, msgId: 1, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
in <- m
in <- m
in <- m
// Expect the ack to be passed through both times
validateRMsg(t, <-outResponder, m, 1)
validateRMsg(t, <-outResponder, m, 1)
validateRMsg(t, <-outResponder, m, 1)
}
func TestOrdersAckedSentAndDeduped(t *testing.T) {
// Test BUY sent in, twice, expect ACK, BUY and just ACK for the duplicate
m := &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: BUY, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
sendThriceAckMsgAckAck(t, m)
// Test SELL sent in, twice, expect ACK, SELL and just ACK for the duplicate
m = &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: SELL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
sendThriceAckMsgAckAck(t, m)
// Test PARTIAL sent in, twice, expect ACK, PARTIAL and just ACK for the duplicate
m = &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: PARTIAL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
sendThriceAckMsgAckAck(t, m)
// Test FULL sent in, twice, expect ACK, FULL and just ACK for the duplicate
m = &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: FULL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
sendThriceAckMsgAckAck(t, m)
// Test CANCELLED sent in, twice, expect ACK, CANCELLEd and just ACK for the duplicate
m = &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: CANCELLED, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
sendThriceAckMsgAckAck(t, m)
// Test NOT_CANCELLED sent in, twice, expect ACK, NOT_CANCELLED and just ACK for the duplicate
m = &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: NOT_CANCELLED, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
sendThriceAckMsgAckAck(t, m)
// Test CANCEL sent in, twice, expect ACK, CANCEL and just ACK for the duplicate
m = &RMessage{route: APP, direction: IN, originId: 1, msgId: 1, message: Message{Kind: CANCEL, Price: 7, Amount: 1, TraderId: 1, TradeId: 1, StockId: 1}}
sendThriceAckMsgAckAck(t, m)
}
func sendThriceAckMsgAckAck(t *testing.T, m *RMessage) {
in, outApp, outResponder := startMockedListener()
in <- m
in <- m
in <- m
// Ack
a := &RMessage{}
a.WriteAckFor(m)
// App
am := &Message{}
*am = m.message
validateRMsg(t, <-outResponder, a, 2)
validateMsg(t, <-outApp, am, 2)
validateRMsg(t, <-outResponder, a, 2)
validateRMsg(t, <-outResponder, a, 2)
}
func TestBadNetwork(t *testing.T) {
testBadNetwork(t, 0.5, Reliable)
}
func validateMsg(t *testing.T, m, e *Message, stackOffset int) {
if *m != *e {
_, fname, lnum, _ := runtime.Caller(stackOffset)
t.Errorf("\nExpecting: %v\nFound: %v \n%s:%d", e, m, fname, lnum)
}
}
func validateRMsg(t *testing.T, m, e *RMessage, stackOffset int) {
if *m != *e {
_, fname, lnum, _ := runtime.Caller(stackOffset)
t.Errorf("\nExpecting: %v\nFound: %v \n%s:%d", e, m, fname, lnum)
}
}
|
package tdb
import (
"database/sql"
"fmt"
"log"
"time"
)
type dailyreport base
var insertRecodeSQL *sql.Stmt
var err error
func NewDailyReportDB() *dailyreport {
table := "dailyreport"
if insertRecodeSQL, err = conn.Prepare(fmt.Sprintf("Insert into %s(no, filter, timestamp) Values(?,?,?)", table)); err != nil {
log.Fatal(err)
}
return &dailyreport{
table: table,
}
}
func (d dailyreport) InsertRecode(no string, filterno uint64, date time.Time) (sql.Result, error) {
return insertRecodeSQL.Exec(no, filterno, date)
}
func (d dailyreport) Close() error {
return insertRecodeSQL.Close()
}
|
/*
LRUCache is a simple LRU cache. It is based on the LRU implementation in groupcache:
https://github.com/golang/groupcache/tree/master/lru
*/
package sqlmonitor
import "container/list"
import (
"sync"
)
// LRUCache is an LRU cache. It is not safe for concurrent access.
type LRUCache struct {
// MaxEntries is the maximum number of cache entries before
// an item is evicted. Zero means no limit.
MaxEntries int
// OnEvicted optionally specificies a callback function to be
// executed when an entry is purged from the cache.
OnEvicted func(key Key, value interface{})
// OnBeforeAdded optionally specificies a callback function to be
// executed when an entry is added to the cache.
OnValueUpdate func(key Key, valueOld interface{}, valueNew interface{}) interface{}
lock sync.RWMutex
ll *list.List
cache map[interface{}]*list.Element
}
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
type Key interface{}
type entry struct {
key Key
value interface{}
}
// New creates a new LRUCache.
// If maxEntries is zero, the cache has no limit and it's assumed
// that eviction is done by the caller.
func NewLRUCache(maxEntries int) *LRUCache {
return &LRUCache{
MaxEntries: maxEntries,
ll: list.New(),
cache: make(map[interface{}]*list.Element),
}
}
// Update MaxEntries
func (c *LRUCache) SetMaxEntries(maxEntries int) {
cutSize := c.Len() - maxEntries
c.MaxEntries = maxEntries
if cutSize > 0 {
for i := 0; i < cutSize; i++ {
c.RemoveOldest()
}
}
}
// Add adds a value to the cache.
func (c *LRUCache) Add(key Key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
if c.cache == nil {
c.cache = make(map[interface{}]*list.Element)
c.ll = list.New()
}
if ee, ok := c.cache[key]; ok {
c.ll.MoveToFront(ee)
if nil == c.OnValueUpdate {
ee.Value.(*entry).value = value
} else {
ee.Value.(*entry).value = c.OnValueUpdate(key, ee.Value.(*entry).value, value)
}
return
}
if nil != c.OnValueUpdate {
value = c.OnValueUpdate(key, nil, value)
}
ele := c.ll.PushFront(&entry{key, value})
c.cache[key] = ele
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
ele := c.ll.Back()
if ele != nil {
c.removeElement(ele)
}
}
}
// Get looks up a key's value from the cache.
func (c *LRUCache) Get(key Key) (value interface{}, ok bool) {
if c.cache == nil {
return
}
c.lock.RLock()
defer c.lock.RUnlock()
if ele, hit := c.cache[key]; hit {
//c.ll.MoveToFront(ele)
return ele.Value.(*entry).value, true
}
return
}
// Remove removes the provided key from the cache.
func (c *LRUCache) Remove(key Key) {
if c.cache == nil {
return
}
c.lock.Lock()
defer c.lock.Unlock()
if ele, hit := c.cache[key]; hit {
c.removeElement(ele)
}
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRUCache) RemoveOldest() interface{} {
if c.cache == nil {
return nil
}
c.lock.Lock()
defer c.lock.Unlock()
ele := c.ll.Back()
if ele != nil {
c.removeElement(ele)
return ele.Value.(*entry).value
}
return nil
}
func (c *LRUCache) removeElement(e *list.Element) {
c.ll.Remove(e)
kv := e.Value.(*entry)
delete(c.cache, kv.key)
if c.OnEvicted != nil {
c.OnEvicted(kv.key, kv.value)
}
}
// Len returns the number of items in the cache.
func (c *LRUCache) Len() int {
if c.cache == nil {
return 0
}
c.lock.RLock()
defer c.lock.RUnlock()
return c.ll.Len()
}
// Clear purges all stored items from the cache.
func (c *LRUCache) Clear() {
c.lock.Lock()
defer c.lock.Unlock()
if c.OnEvicted != nil {
for _, e := range c.cache {
kv := e.Value.(*entry)
c.OnEvicted(kv.key, kv.value)
}
}
c.ll = nil
c.cache = nil
}
func (c *LRUCache) GetVals(limit int, offset int) (vals []interface{}) {
if c.cache == nil {
return
}
elem := c.getElementAt(offset)
if nil == elem {
return
}
for i := 0; i < limit && nil != elem; i++ {
vals = append(vals, elem.Value.(*entry).value)
elem = elem.Next()
}
return vals
}
func (c *LRUCache) getElementAt(index int) *list.Element {
c.lock.RLock()
defer c.lock.RUnlock()
if index >= c.ll.Len() {
return nil
}
if index > c.ll.Len()/2 {
i := 0
reverseIndex := c.ll.Len() - index - 1
for val := c.ll.Back(); nil != val; val = val.Prev() {
i++
if i <= reverseIndex {
continue
}
return val
}
} else {
i := 0
for val := c.ll.Front(); nil != val; val = val.Next() {
i++
if i <= index {
continue
}
return val
}
}
return nil
}
|
package core
import (
"math"
"time"
)
type (
score struct {
dones []doneWord
}
doneWord struct {
word string
time time.Duration
}
)
func (s *score) addDoneWord(d doneWord) {
s.dones = append(s.dones, d)
}
func (s *score) averageTime() float64 {
var sumMin float64
for _, done := range s.dones {
sumMin += done.time.Seconds()
}
return math.Round((sumMin/float64(len(s.dones)))*100) / 100
}
func newDoneWord(word string, time time.Duration) doneWord {
return *&doneWord{word, time}
}
// 結果処理
// タイムアップまで集計を続ける。タイムアップを検知したらキューに積まれた結果を処理してチャネルを閉じる
func consumeDoneQue(endGameChan chan struct{}, doneQue chan doneWord) chan score {
scoreChan := make(chan score, 0)
go func(endGameChan chan struct{}, doneQue chan doneWord) {
defer close(doneQue)
s := &score{dones: make([]doneWord, 0, 0)}
var isGameEnd bool = false
for {
select {
case t := <-doneQue:
s.addDoneWord(t)
case <-endGameChan:
isGameEnd = true
}
if isGameEnd {
break
}
}
// タイミングの問題で起こりうる処理こぼし対応(スマートじゃないけど)
for i := 0; i < len(doneQue); i++ {
s.addDoneWord(<-doneQue)
}
scoreChan <- *s
}(endGameChan, doneQue)
return scoreChan
}
|
package main
import (
"context"
"log"
"os"
"github.com/rodrigo-brito/ninjabot/example"
"github.com/rodrigo-brito/ninjabot"
"github.com/rodrigo-brito/ninjabot/pkg/exchange"
"github.com/rodrigo-brito/ninjabot/pkg/model"
"github.com/rodrigo-brito/ninjabot/pkg/notification"
)
func main() {
var (
ctx = context.Background()
apiKey = os.Getenv("API_KEY")
secretKey = os.Getenv("API_SECRET")
telegramKey = os.Getenv("TELEGRAM_KEY")
telegramID = os.Getenv("TELEGRAM_ID")
telegramChannel = os.Getenv("TELEGRAM_CHANNEL")
)
settings := model.Settings{
Pairs: []string{
"BTCUSDT",
"ETHUSDT",
},
}
// Initialize your exchange
binance, err := exchange.NewBinance(ctx, exchange.WithBinanceCredentials(apiKey, secretKey))
if err != nil {
log.Fatalln(err)
}
// (Optional) Telegram notifier
notifier := notification.NewTelegram(telegramID, telegramKey, telegramChannel)
strategy := &example.MyStrategy{}
bot, err := ninjabot.NewBot(ctx, settings, binance, strategy)
if err != nil {
log.Fatalln(err)
}
bot.SubscribeOrder(notifier)
err = bot.Run(ctx)
if err != nil {
log.Fatalln(err)
}
}
|
package main
import "fmt"
func calc(index string, a, b int) int {
ret:=a+b
fmt.Println(index,a,b,ret)
return ret
}
//10 1 2 3
//20 0 2 2
//2 0 2 2
//1 1 3 4
|
package aws
import (
"context"
"os"
"time"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/awslabs/k8s-cloudwatch-adapter/pkg/apis/metrics/v1alpha1"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"k8s.io/klog"
)
func NewCloudWatchManager() CloudWatchManager {
return &cloudwatchManager{
localRegion: GetLocalRegion(),
}
}
type cloudwatchManager struct {
localRegion string
}
func (c *cloudwatchManager) getClient(role, region *string) *cloudwatch.CloudWatch {
// Using the Config value, create the CloudWatch client
sess := session.Must(session.NewSession())
// Using the SDK's default configuration, loading additional config
// and credentials values from the environment variables, shared
// credentials, and shared configuration files
cfg := aws.NewConfig().WithSTSRegionalEndpoint(endpoints.RegionalSTSEndpoint)
// check if roleARN is passed
if role != nil {
creds := stscreds.NewCredentials(sess, *role)
cfg = cfg.WithCredentials(creds)
klog.Infof("using IAM role ARN: %s", *role)
}
// check if region is set
if region != nil {
cfg = cfg.WithRegion(*region)
} else if aws.StringValue(cfg.Region) == "" {
cfg.Region = aws.String(c.localRegion)
}
klog.Infof("using AWS Region: %s", aws.StringValue(cfg.Region))
if os.Getenv("DEBUG") == "true" {
cfg = cfg.WithLogLevel(aws.LogDebugWithHTTPBody)
}
svc := cloudwatch.New(sess, cfg)
return svc
}
func (c *cloudwatchManager) QueryCloudWatch(request v1alpha1.ExternalMetric) ([]*cloudwatch.MetricDataResult, error) {
role := request.Spec.RoleARN
region := request.Spec.Region
cwQuery := toCloudWatchQuery(&request)
now := time.Now()
endTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), now.Minute(), 0, 0, now.Location())
// CloudWatch metrics have latency, we will grab in a 5 minute window and extract the latest value
startTime := endTime.Add(-5 * time.Minute)
cwQuery.EndTime = &endTime
cwQuery.StartTime = &startTime
cwQuery.ScanBy = aws.String("TimestampDescending")
req, resp := c.getClient(role, region).GetMetricDataRequest(&cwQuery)
req.SetContext(context.Background())
if err := req.Send(); err != nil {
klog.Errorf("err: %v", err)
return []*cloudwatch.MetricDataResult{}, err
}
return resp.MetricDataResults, nil
}
|
// Copyright 2016 Google, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/docker/docker/cliconfig"
"github.com/google/subcommands"
"golang.org/x/net/context"
)
type dockerConfigCmd struct {
cmd
overwrite bool // overwrite any previously configured credential store
}
// see https://github.com/docker/docker/blob/master/cliconfig/credentials/native_store.go
const credHelperPrefix = "docker-credential-"
// NewDockerConfigSubcommand returns a subcommands.Command which configures
// the docker client to use this credential helper
func NewDockerConfigSubcommand() subcommands.Command {
return &dockerConfigCmd{
cmd{
name: "configure-docker",
synopsis: fmt.Sprintf("configures the Docker client to use %s", os.Args[0]),
},
false,
}
}
func (c *dockerConfigCmd) SetFlags(fs *flag.FlagSet) {
fs.BoolVar(&c.overwrite, "overwrite", false, "overwrite any previously configured credential helper")
}
func (c *dockerConfigCmd) Execute(context.Context, *flag.FlagSet, ...interface{}) subcommands.ExitStatus {
binaryName := filepath.Base(os.Args[0])
if !strings.HasPrefix(binaryName, credHelperPrefix) {
printErrorln("Binary name must be prefixed with '%s': %s", credHelperPrefix, binaryName)
return subcommands.ExitFailure
}
// the Docker client can only use binaries on the $PATH
if _, err := exec.LookPath(binaryName); err != nil {
printErrorln("'%s' must exist on your PATH", binaryName)
return subcommands.ExitFailure
}
config, err := cliconfig.Load("")
if err != nil {
printErrorln("Unable to load docker config: %v", err)
return subcommands.ExitFailure
}
// 'credsStore' takes the suffix of the credential helper binary
credHelperSuffix := binaryName[len(credHelperPrefix):]
// Optimization. Don't modify the config if we're already configured.
if config.CredentialsStore != credHelperSuffix {
if config.CredentialsStore != "" && !c.overwrite {
printErrorln("Docker is currently configured to use '%s%s' as its credential store. Retry with --overwrite", credHelperPrefix, config.CredentialsStore)
return subcommands.ExitFailure
}
config.CredentialsStore = credHelperSuffix
if err = config.Save(); err != nil {
printErrorln("Unable to save docker config: %v", err)
return subcommands.ExitFailure
}
}
fmt.Printf("%s configured to use %s as its credential store\n", config.Filename, binaryName)
return subcommands.ExitSuccess
}
func printErrorln(fmtString string, v ...interface{}) {
fmt.Fprintf(os.Stderr, "ERROR: "+fmtString+"\n", v...)
}
|
package main
import (
"fmt"
)
func trap(height []int) int {
sum := 0
if len(height)==0 {
return 0
}
left :=0
right := len(height)-1
maxLeft := 0
maxRight := 0
for ;left < right; {
if height[left] < height[right] {
if maxLeft < height[left] {
maxLeft = height[left]
} else {
sum = sum + (maxLeft - height[left])
}
left++
} else {
if maxRight < height[right] {
maxRight = height[right]
} else {
sum = sum + (maxRight - height[right])
}
right--
}
}
return sum
}
func main() {
height := []int{0,1,0,2,1,0,1,3,2,1,2,1}
fmt.Println(trap(height))
}
|
package models
type Gateway struct {
ID uint `json:"id" gorm:"primary_key"`
Serial string `json:"serial"`
Name string `json:"name"`
IPv4Address string `json:"ipv4Address"`
}
|
package main
import (
"bufio"
"flag"
"fmt"
"os"
"sort"
"strings"
"time"
)
type action int
const (
beginShift action = iota
fallAsleep
wakeUp
)
func (a *action) String() string {
return [...]string{"begins shift", "falls asleep", "wakes up"}[*a]
}
type entry struct {
guard int
action action
timestamp time.Time
}
func (e *entry) String() string {
if e.action == beginShift {
return fmt.Sprintf("[%s] Guard #%d %s", e.timestamp, e.guard, e.action.String())
}
return fmt.Sprintf("[%s] %s", e.timestamp, e.action.String())
}
type entrySlice []entry
func (e entrySlice) Len() int {
return len(e)
}
func (e entrySlice) Less(i, j int) bool {
return e[i].timestamp.Before(e[j].timestamp)
}
func (e entrySlice) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func main() {
filePath := flag.String("p", "input.txt", "Input file path")
flag.Parse()
f, err := os.Open(*filePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Opening input file: %v\n", err)
os.Exit(1)
}
defer f.Close()
entries := make(entrySlice, 0)
scanner := bufio.NewScanner(f)
for scanner.Scan() {
text := scanner.Text()
// [1518-07-08 00:20] wakes up
var year, month, day, hour, minute int
if _, err := fmt.Sscanf(text, "[%d-%d-%d %d:%d]", &year, &month, &day, &hour, &minute); err != nil {
fmt.Fprintf(os.Stderr, "scanning data: %v\n", err)
os.Exit(1)
}
e := entry{timestamp: time.Date(year, time.Month(month), day, hour, minute, 0, 0, time.UTC)}
text = text[strings.Index(text, "] ")+2:]
n, _ := fmt.Sscanf(text, "Guard #%d begins shift", &e.guard)
switch {
case n == 1:
e.action = beginShift
case text == "falls asleep":
e.action = fallAsleep
case text == "wakes up":
e.action = wakeUp
}
entries = append(entries, e)
}
sort.Sort(entries)
firstStrategy(entries)
secondStrategy(entries)
}
func firstStrategy(entries []entry) {
var sleepyguard int
asleep := map[int]int{}
var guard, from int
for _, e := range entries {
switch e.action {
case beginShift:
guard = e.guard
case fallAsleep:
from = e.timestamp.Minute()
case wakeUp:
t := e.timestamp.Minute() - from
asleep[guard] += t
if asleep[guard] > asleep[sleepyguard] {
sleepyguard = guard
}
}
}
minutes := [60]int{}
guard = -1
var sleepyminute int
for _, e := range entries {
if e.action == beginShift {
guard = e.guard
continue
}
if guard != sleepyguard {
continue
}
switch e.action {
case fallAsleep:
from = e.timestamp.Minute()
case wakeUp:
to := e.timestamp.Minute()
for i := from; i < to; i++ {
minutes[i]++
if minutes[i] > minutes[sleepyminute] {
sleepyminute = i
}
}
}
}
fmt.Printf("Answer: guard %d * minute %d = %d\n",
sleepyguard, sleepyminute, sleepyguard*sleepyminute)
}
func secondStrategy(entries []entry) {
var sleepyguard, sleepyminute int
minutes := map[int]*[60]int{}
var guard, from int
for _, e := range entries {
switch e.action {
case beginShift:
guard = e.guard
if minutes[guard] == nil {
minutes[guard] = &[60]int{}
}
if minutes[sleepyguard] == nil {
sleepyguard = guard
}
case fallAsleep:
from = e.timestamp.Minute()
case wakeUp:
to := e.timestamp.Minute()
for i := from; i < to; i++ {
minutes[guard][i]++
if minutes[guard][i] > minutes[sleepyguard][sleepyminute] {
sleepyguard = guard
sleepyminute = i
}
}
}
}
fmt.Printf("Answer: guard %d * minute %d = %d\n",
sleepyguard, sleepyminute, sleepyguard*sleepyminute)
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"path"
"runtime"
"strings"
)
func main() {
// http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
// w.Write([]byte("Hello World"))
// })
mux := http.NewServeMux()
mh := &MyHandler{}
mux.Handle("/", mh)
// mh := &MyHandler{}
//http.Handle("/", mh)
fmt.Println(http.ListenAndServe(":8000", mux))
}
// MyHandler -
type MyHandler struct {
http.Handler
}
// urlHelper - returns the absolute path for the main file, go run main.go does not have the same path as an executable
func parentFilePathHelper() string {
_, filename, _, ok := runtime.Caller(0)
if !ok {
panic("No caller information")
}
fmt.Printf("Filename : %q, Dir : %q\n", filename, path.Dir(filename))
return strings.Replace(path.Dir(filename), "src/main", "", 1)
}
// ServerHTTP -
func (m *MyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if req != nil && req.URL != nil {
path := fmt.Sprintf("%s/public%s", parentFilePathHelper(), req.URL.Path)
data, err := ioutil.ReadFile(string(path))
if err == nil {
w.Write(data)
} else {
fmt.Println("err : ", err)
w.WriteHeader(404)
w.Write([]byte("404 - " + http.StatusText(404)))
}
} else {
w.Write([]byte("Hello World"))
}
}
|
package facsqs
import (
"github.com/kataras/golog"
facclients "github.com/wagner-aos/go-fast-aws-connections/fac_clients"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"
)
var (
err error
sqsAPI sqsiface.SQSAPI
)
//Start - initializes SQS client
func Start(region, profile, endpoint string) {
sqsAPI = facclients.SQS(region, profile, endpoint)
}
//SendMessageInputToQueueURL - it sends message input to any SQS Queue URL
func SendMessageInputToQueueURL(messageInput *sqs.SendMessageInput) (*sqs.SendMessageOutput, error) {
return messageSender(messageInput)
}
//SendMessageInput - it sends message input to any SQS Queue
func SendMessageInput(messageInput *sqs.SendMessageInput) (*sqs.SendMessageOutput, error) {
queueURL, err := GetQueueURL(*messageInput.QueueUrl)
if err != nil {
return nil, err
}
messageInput.SetQueueUrl(*queueURL)
return messageSender(messageInput)
}
//SendMessage - it sends message to any SQS Queue
func SendMessage(queueName string, message string) (*sqs.SendMessageOutput, error) {
queueURL, err := GetQueueURL(queueName)
if err != nil {
return nil, err
}
messageInput := &sqs.SendMessageInput{
MessageBody: aws.String(message),
//MessageGroupId: aws.String("GroupID"),
QueueUrl: queueURL,
}
return messageSender(messageInput)
}
//SendMessageToQueueURL - it sends message to any SQS Queue URL
func SendMessageToQueueURL(queueURL string, message string) (*sqs.SendMessageOutput, error) {
messageInput := &sqs.SendMessageInput{
MessageBody: aws.String(message),
//MessageGroupId: aws.String("GroupID"),
QueueUrl: aws.String(queueURL),
}
return messageSender(messageInput)
}
func messageSender(messageInput *sqs.SendMessageInput) (*sqs.SendMessageOutput, error) {
result, err := sqsAPI.SendMessage(messageInput)
if err != nil {
golog.Errorf("[fac_sqs]-Error sending message to queue: %s , %s ", *messageInput.QueueUrl, err)
return nil, err
}
golog.Info("[fac_sqs]-Send Message OK.")
golog.Infof("[fac_sqs]-MessageID: %s", *result.MessageId)
return result, nil
}
//ListQueues - list all available sqs queues
func ListQueues() []*string {
result, err := sqsAPI.ListQueues(nil)
if err != nil {
golog.Errorf("[fac_sqs]-Error listing queues: %s", err)
}
for _, b := range result.QueueUrls {
golog.Infof("[fac_sqs]-* %s", aws.StringValue(b))
}
return result.QueueUrls
}
//ReceiveMessage - it receives message from any SQS Queue
func ReceiveMessage(queueName string) (*sqs.ReceiveMessageOutput, error) {
queueURL, err := GetQueueURL(queueName)
if err != nil {
return nil, err
}
receiveMessageInput := &sqs.ReceiveMessageInput{
AttributeNames: []*string{
aws.String(sqs.MessageSystemAttributeNameSentTimestamp),
},
MessageAttributeNames: []*string{
aws.String(sqs.QueueAttributeNameAll),
},
QueueUrl: queueURL,
MaxNumberOfMessages: aws.Int64(int64(1)),
WaitTimeSeconds: aws.Int64(int64(1)),
}
output, err := messageReceiver(receiveMessageInput)
return output, err
}
//ReceiveMessageInput - it receives message input to any SQS Queue
func ReceiveMessageInput(messageInput *sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) {
output, err := messageReceiver(messageInput)
return output, err
}
func messageReceiver(receiveMessageInput *sqs.ReceiveMessageInput) (*sqs.ReceiveMessageOutput, error) {
output, err := sqsAPI.ReceiveMessage(receiveMessageInput)
if err != nil {
golog.Errorf("[fac_sqs] - %s", err)
return nil, err
}
golog.Debug("[fac_sqs]-Receive Message OK.")
golog.Debugf("[fac_sqs]-Message: %v", output.Messages)
return output, nil
}
//DeleteMessageInput - it deletes messages
func DeleteMessageInput(deleteMessageInput *sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) {
output, err := messageDelete(deleteMessageInput)
return output, err
}
//DeleteMessageBatchInput - it deletes batch messages
func DeleteMessageBatchInput(deleteMessageBatchInput *sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error) {
output, err := messageBatchDelete(deleteMessageBatchInput)
return output, err
}
func messageDelete(deleteMessageInput *sqs.DeleteMessageInput) (*sqs.DeleteMessageOutput, error) {
output, err := sqsAPI.DeleteMessage(deleteMessageInput)
if err != nil {
golog.Errorf("[fac_sqs] - %s", err)
return nil, err
}
golog.Debug("[fac_sqs]-Delete Message OK.")
golog.Debugf("[fac_sqs]- %v", output.String())
return output, nil
}
func messageBatchDelete(deleteMessageBatchInput *sqs.DeleteMessageBatchInput) (*sqs.DeleteMessageBatchOutput, error) {
output, err := sqsAPI.DeleteMessageBatch(deleteMessageBatchInput)
if err != nil {
golog.Errorf("[fac_sqs] - %s", err)
return nil, err
}
golog.Debug("[fac_sqs]-Delete Batch Message OK.")
golog.Debugf("[fac_sqs]- %v", output.String())
return output, nil
}
//GetQueueURL - get queue entire URL in order to send messages to SQS.
func GetQueueURL(queueName string) (*string, error) {
output, err := sqsAPI.GetQueueUrl(&sqs.GetQueueUrlInput{
QueueName: &queueName,
})
if err != nil {
golog.Errorf("[fac_sqs]-Error recovering queueURL: %s", err)
return nil, err
}
return output.QueueUrl, nil
}
|
package main
import (
"fmt"
"strconv"
)
type instructionSet struct {
mask string
instructions []instruction
}
type instruction struct {
address int
value int
}
func (i *instruction) getMaskedValue(mask string) int {
base2 := strconv.FormatInt(int64(i.value), 2)
paddedBase2Value := fmt.Sprintf("%036s", base2)
result := ""
for i := 0; i < len(mask); i++ {
char := string(mask[i])
switch string(char) {
case "0":
{
result += "0"
break
}
case "1":
{
result += "1"
break
}
case "X":
{
result += string(paddedBase2Value[i])
break
}
default:
{
panic(fmt.Sprintf("Unknown mask value: %s", char))
}
}
}
maskedValue, _ := strconv.ParseInt(result, 2, 64)
return int(maskedValue)
}
|
package main
const esbuildVersion = "0.4.1"
|
/*
* Copyright 2018-2019 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package application_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/buildpack/libbuildpack/application"
"github.com/buildpack/libbuildpack/internal"
"github.com/buildpack/libbuildpack/logger"
. "github.com/onsi/gomega"
"github.com/sclevine/spec"
"github.com/sclevine/spec/report"
)
func TestApplication(t *testing.T) {
spec.Run(t, "Application", func(t *testing.T, when spec.G, it spec.S) {
g := NewGomegaWithT(t)
var (
root string
app application.Application
wdCleanUp func()
)
it.Before(func() {
root = internal.ScratchDir(t, "application")
wdCleanUp = internal.ReplaceWorkingDirectory(t, root)
var err error
app, err = application.DefaultApplication(logger.Logger{})
g.Expect(err).NotTo(HaveOccurred())
})
it.After(func() {
wdCleanUp()
})
it("extracts root from working directory", func() {
g.Expect(app.Root).To(Equal(root))
})
when("file existence checks", func() {
it("exists in root", func() {
file := "exists.txt"
err := ioutil.WriteFile(filepath.Join(app.Root, file), []byte("content"), 0600)
g.Expect(err).NotTo(HaveOccurred())
exists, err := app.FileExists(file)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(exists).Should(BeTrue())
})
it("exists in subdir", func() {
file := "subdir/subdir2/exists.txt"
err := os.MkdirAll(filepath.Dir(filepath.Join(app.Root, file)), 0700)
g.Expect(err).NotTo(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(app.Root, file), []byte("content"), 0600)
g.Expect(err).NotTo(HaveOccurred())
exists, err := app.FileExists(file)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(exists).Should(BeTrue())
})
it("does not exist", func() {
exists, err := app.FileExists("doesnotexist.txt")
g.Expect(err).NotTo(HaveOccurred())
g.Expect(exists).Should(BeFalse())
})
})
}, spec.Report(report.Terminal{}))
}
|
// +build linux darwin freebsd
package main
import (
"log"
"os"
"os/signal"
"syscall"
kcp "github.com/xtaci/kcp-go/v5"
)
func init() {
go sigHandler()
}
func sigHandler() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGUSR1)
signal.Ignore(syscall.SIGPIPE)
for {
switch <-ch {
case syscall.SIGUSR1:
log.Printf("KCP SNMP:%+v", kcp.DefaultSnmp.Copy())
}
}
}
|
package chatbots
import (
"encoding/json"
"net/url"
"strconv"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
)
// BotCommand BotCommand
type BotCommand struct {
Command string `json:"command"`
Description string `json:"description"`
}
func (c ChatBot) setMyCommands(commands []BotCommand) (response tgbotapi.APIResponse, err error) {
v := url.Values{}
var data []byte
if data, err = json.Marshal(commands); err == nil {
v.Add("commands", string(data))
} else {
return
}
return c.botClient.MakeRequest("setMyCommands", v)
}
func (c ChatBot) getMyCommands() (commands []BotCommand, err error) {
v := url.Values{}
resp, err := c.botClient.MakeRequest("getMyCommands", v)
if err != nil {
return
}
err = json.Unmarshal(resp.Result, &commands)
return
}
// WebhookConfig contains information about a SetWebhook request.
type WebhookConfig struct {
tgbotapi.WebhookConfig
AllowedUpdates []string
}
// SetWebhook sets a webhook.
//
// If this is set, GetUpdates will not get any data!
//
// If you do not have a legitimate TLS certificate, you need to include
// your self signed certificate with the config.
func (c ChatBot) setWebhook(config WebhookConfig) (tgbotapi.APIResponse, error) {
if config.Certificate == nil {
v := url.Values{}
v.Add("url", config.URL.String())
if config.MaxConnections != 0 {
v.Add("max_connections", strconv.Itoa(config.MaxConnections))
}
if len(config.AllowedUpdates) != 0 {
v["allowed_updates"] = config.AllowedUpdates
}
return c.botClient.MakeRequest("setWebhook", v)
}
params := make(map[string]string)
params["url"] = config.URL.String()
if config.MaxConnections != 0 {
params["max_connections"] = strconv.Itoa(config.MaxConnections)
}
resp, err := c.botClient.UploadFile("setWebhook", params, "certificate", config.Certificate)
if err != nil {
return tgbotapi.APIResponse{}, err
}
return resp, nil
}
func (c ChatBot) deleteWebhook() (tgbotapi.APIResponse, error) {
return c.botClient.MakeRequest("deleteWebhook", url.Values{})
}
|
// Copyright 2020, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package splunkhecexporter
import (
"compress/gzip"
"context"
"errors"
"io/ioutil"
"math"
"net"
"net/http"
"net/url"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/consumer/pdata"
"go.opentelemetry.io/collector/translator/conventions"
"go.uber.org/zap"
"github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk"
)
func createMetricsData(numberOfDataPoints int) pdata.Metrics {
doubleVal := 1234.5678
metrics := pdata.NewMetrics()
rm := pdata.NewResourceMetrics()
rm.Resource().Attributes().InsertString("k0", "v0")
rm.Resource().Attributes().InsertString("k1", "v1")
metrics.ResourceMetrics().Append(rm)
for i := 0; i < numberOfDataPoints; i++ {
tsUnix := time.Unix(int64(i), int64(i)*time.Millisecond.Nanoseconds())
ilm := pdata.NewInstrumentationLibraryMetrics()
metric := pdata.NewMetric()
metric.SetName("gauge_double_with_dims")
metric.SetDataType(pdata.MetricDataTypeDoubleGauge)
doublePt := pdata.NewDoubleDataPoint()
doublePt.SetTimestamp(pdata.TimestampFromTime(tsUnix))
doublePt.SetValue(doubleVal)
doublePt.LabelsMap().Insert("k/n0", "vn0")
doublePt.LabelsMap().Insert("k/n1", "vn1")
doublePt.LabelsMap().Insert("k/r0", "vr0")
doublePt.LabelsMap().Insert("k/r1", "vr1")
metric.DoubleGauge().DataPoints().Append(doublePt)
ilm.Metrics().Append(metric)
rm.InstrumentationLibraryMetrics().Append(ilm)
}
return metrics
}
func createTraceData(numberOfTraces int) pdata.Traces {
traces := pdata.NewTraces()
traces.ResourceSpans().Resize(1)
rs := traces.ResourceSpans().At(0)
rs.Resource().Attributes().InsertString("resource", "R1")
rs.InstrumentationLibrarySpans().Resize(1)
ils := rs.InstrumentationLibrarySpans().At(0)
ils.Spans().Resize(numberOfTraces)
for i := 0; i < numberOfTraces; i++ {
span := ils.Spans().At(i)
span.SetName("root")
span.SetStartTime(pdata.Timestamp((i + 1) * 1e9))
span.SetEndTime(pdata.Timestamp((i + 2) * 1e9))
span.SetTraceID(pdata.NewTraceID([16]byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}))
span.SetSpanID(pdata.NewSpanID([8]byte{0, 0, 0, 0, 0, 0, 0, 1}))
span.SetTraceState("foo")
if i%2 == 0 {
span.SetParentSpanID(pdata.NewSpanID([8]byte{1, 2, 3, 4, 5, 6, 7, 8}))
span.Status().SetCode(pdata.StatusCodeOk)
span.Status().SetMessage("ok")
}
}
return traces
}
func createLogData(numberOfLogs int) pdata.Logs {
logs := pdata.NewLogs()
logs.ResourceLogs().Resize(1)
rl := logs.ResourceLogs().At(0)
rl.InstrumentationLibraryLogs().Resize(1)
ill := rl.InstrumentationLibraryLogs().At(0)
for i := 0; i < numberOfLogs; i++ {
ts := pdata.Timestamp(int64(i) * time.Millisecond.Nanoseconds())
logRecord := pdata.NewLogRecord()
logRecord.Body().SetStringVal("mylog")
logRecord.Attributes().InsertString(conventions.AttributeServiceName, "myapp")
logRecord.Attributes().InsertString(splunk.SourcetypeLabel, "myapp-type")
logRecord.Attributes().InsertString(splunk.IndexLabel, "myindex")
logRecord.Attributes().InsertString(conventions.AttributeHostName, "myhost")
logRecord.Attributes().InsertString("custom", "custom")
logRecord.SetTimestamp(ts)
ill.Logs().Append(logRecord)
}
return logs
}
type CapturingData struct {
testing *testing.T
receivedRequest chan string
statusCode int
checkCompression bool
}
func (c *CapturingData) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if c.checkCompression {
if r.Header.Get("Content-Encoding") != "gzip" {
c.testing.Fatal("No compression")
}
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
go func() {
c.receivedRequest <- string(body)
}()
w.WriteHeader(c.statusCode)
}
func runMetricsExport(disableCompression bool, numberOfDataPoints int, t *testing.T) (string, error) {
receivedRequest := make(chan string)
capture := CapturingData{testing: t, receivedRequest: receivedRequest, statusCode: 200, checkCompression: !disableCompression}
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
s := &http.Server{
Handler: &capture,
}
go func() {
panic(s.Serve(listener))
}()
factory := NewFactory()
cfg := factory.CreateDefaultConfig().(*Config)
cfg.Endpoint = "http://" + listener.Addr().String() + "/services/collector"
cfg.DisableCompression = disableCompression
cfg.Token = "1234-1234"
params := component.ExporterCreateParams{Logger: zap.NewNop()}
exporter, err := factory.CreateMetricsExporter(context.Background(), params, cfg)
assert.NoError(t, err)
assert.NoError(t, exporter.Start(context.Background(), componenttest.NewNopHost()))
defer exporter.Shutdown(context.Background())
md := createMetricsData(numberOfDataPoints)
err = exporter.ConsumeMetrics(context.Background(), md)
assert.NoError(t, err)
select {
case request := <-receivedRequest:
return request, nil
case <-time.After(1 * time.Second):
return "", errors.New("timeout")
}
}
func runTraceExport(disableCompression bool, numberOfTraces int, t *testing.T) (string, error) {
receivedRequest := make(chan string)
capture := CapturingData{testing: t, receivedRequest: receivedRequest, statusCode: 200, checkCompression: !disableCompression}
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
s := &http.Server{
Handler: &capture,
}
go func() {
panic(s.Serve(listener))
}()
factory := NewFactory()
cfg := factory.CreateDefaultConfig().(*Config)
cfg.Endpoint = "http://" + listener.Addr().String() + "/services/collector"
cfg.DisableCompression = disableCompression
cfg.Token = "1234-1234"
params := component.ExporterCreateParams{Logger: zap.NewNop()}
exporter, err := factory.CreateTracesExporter(context.Background(), params, cfg)
assert.NoError(t, err)
assert.NoError(t, exporter.Start(context.Background(), componenttest.NewNopHost()))
defer exporter.Shutdown(context.Background())
td := createTraceData(numberOfTraces)
err = exporter.ConsumeTraces(context.Background(), td)
assert.NoError(t, err)
select {
case request := <-receivedRequest:
return request, nil
case <-time.After(1 * time.Second):
return "", errors.New("timeout")
}
}
func runLogExport(disableCompression bool, numberOfLogs int, t *testing.T) (string, error) {
receivedRequest := make(chan string)
capture := CapturingData{testing: t, receivedRequest: receivedRequest, statusCode: 200, checkCompression: !disableCompression}
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
s := &http.Server{
Handler: &capture,
}
go func() {
panic(s.Serve(listener))
}()
factory := NewFactory()
cfg := factory.CreateDefaultConfig().(*Config)
cfg.Endpoint = "http://" + listener.Addr().String() + "/services/collector"
cfg.DisableCompression = disableCompression
cfg.Token = "1234-1234"
params := component.ExporterCreateParams{Logger: zap.NewNop()}
exporter, err := factory.CreateLogsExporter(context.Background(), params, cfg)
assert.NoError(t, err)
assert.NoError(t, exporter.Start(context.Background(), componenttest.NewNopHost()))
defer exporter.Shutdown(context.Background())
ld := createLogData(numberOfLogs)
err = exporter.ConsumeLogs(context.Background(), ld)
assert.NoError(t, err)
select {
case request := <-receivedRequest:
return request, nil
case <-time.After(1 * time.Second):
return "", errors.New("timeout")
}
}
func TestReceiveTraces(t *testing.T) {
actual, err := runTraceExport(true, 3, t)
assert.NoError(t, err)
expected := `{"time":1,"host":"unknown","event":{"trace_id":"01010101010101010101010101010101","span_id":"0000000000000001","parent_span_id":"0102030405060708","name":"root","end_time":2000000000,"kind":"SPAN_KIND_UNSPECIFIED","status":{"message":"ok","code":"STATUS_CODE_OK"},"start_time":1000000000},"fields":{"resource":"R1"}}`
expected += "\n\r\n\r\n"
expected += `{"time":2,"host":"unknown","event":{"trace_id":"01010101010101010101010101010101","span_id":"0000000000000001","parent_span_id":"","name":"root","end_time":3000000000,"kind":"SPAN_KIND_UNSPECIFIED","status":{"message":"","code":"STATUS_CODE_UNSET"},"start_time":2000000000},"fields":{"resource":"R1"}}`
expected += "\n\r\n\r\n"
expected += `{"time":3,"host":"unknown","event":{"trace_id":"01010101010101010101010101010101","span_id":"0000000000000001","parent_span_id":"0102030405060708","name":"root","end_time":4000000000,"kind":"SPAN_KIND_UNSPECIFIED","status":{"message":"ok","code":"STATUS_CODE_OK"},"start_time":3000000000},"fields":{"resource":"R1"}}`
expected += "\n\r\n\r\n"
assert.Equal(t, expected, actual)
}
func TestReceiveLogs(t *testing.T) {
actual, err := runLogExport(true, 3, t)
assert.NoError(t, err)
expected := `{"host":"myhost","source":"myapp","sourcetype":"myapp-type","index":"myindex","event":"mylog","fields":{"custom":"custom"}}`
expected += "\n\r\n\r\n"
expected += `{"time":0.001,"host":"myhost","source":"myapp","sourcetype":"myapp-type","index":"myindex","event":"mylog","fields":{"custom":"custom"}}`
expected += "\n\r\n\r\n"
expected += `{"time":0.002,"host":"myhost","source":"myapp","sourcetype":"myapp-type","index":"myindex","event":"mylog","fields":{"custom":"custom"}}`
expected += "\n\r\n\r\n"
assert.Equal(t, expected, actual)
}
func TestReceiveMetrics(t *testing.T) {
actual, err := runMetricsExport(true, 3, t)
assert.NoError(t, err)
expected := `{"host":"unknown","event":"metric","fields":{"k/n0":"vn0","k/n1":"vn1","k/r0":"vr0","k/r1":"vr1","k0":"v0","k1":"v1","metric_name:gauge_double_with_dims":1234.5678}}`
expected += "\n\r\n\r\n"
expected += `{"time":1.001,"host":"unknown","event":"metric","fields":{"k/n0":"vn0","k/n1":"vn1","k/r0":"vr0","k/r1":"vr1","k0":"v0","k1":"v1","metric_name:gauge_double_with_dims":1234.5678}}`
expected += "\n\r\n\r\n"
expected += `{"time":2.002,"host":"unknown","event":"metric","fields":{"k/n0":"vn0","k/n1":"vn1","k/r0":"vr0","k/r1":"vr1","k0":"v0","k1":"v1","metric_name:gauge_double_with_dims":1234.5678}}`
expected += "\n\r\n\r\n"
assert.Equal(t, expected, actual)
}
func TestReceiveTracesWithCompression(t *testing.T) {
request, err := runTraceExport(false, 1000, t)
assert.NoError(t, err)
assert.NotEqual(t, "", request)
}
func TestReceiveLogsWithCompression(t *testing.T) {
request, err := runLogExport(false, 1000, t)
assert.NoError(t, err)
assert.NotEqual(t, "", request)
}
func TestReceiveMetricsWithCompression(t *testing.T) {
request, err := runMetricsExport(false, 1000, t)
assert.NoError(t, err)
assert.NotEqual(t, "", request)
}
func TestErrorReceived(t *testing.T) {
receivedRequest := make(chan string)
capture := CapturingData{receivedRequest: receivedRequest, statusCode: 500}
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
panic(err)
}
s := &http.Server{
Handler: &capture,
}
go func() {
panic(s.Serve(listener))
}()
factory := NewFactory()
cfg := factory.CreateDefaultConfig().(*Config)
cfg.Endpoint = "http://" + listener.Addr().String() + "/services/collector"
// Disable QueueSettings to ensure that we execute the request when calling ConsumeTraces
// otherwise we will not see the error.
cfg.QueueSettings.Enabled = false
// Disable retries to not wait too much time for the return error.
cfg.RetrySettings.Enabled = false
cfg.DisableCompression = true
cfg.Token = "1234-1234"
params := component.ExporterCreateParams{Logger: zap.NewNop()}
exporter, err := factory.CreateTracesExporter(context.Background(), params, cfg)
assert.NoError(t, err)
assert.NoError(t, exporter.Start(context.Background(), componenttest.NewNopHost()))
defer exporter.Shutdown(context.Background())
td := createTraceData(3)
err = exporter.ConsumeTraces(context.Background(), td)
select {
case <-receivedRequest:
case <-time.After(5 * time.Second):
t.Fatal("Should have received request")
}
assert.EqualError(t, err, "HTTP 500 \"Internal Server Error\"")
}
func TestInvalidTraces(t *testing.T) {
_, err := runTraceExport(false, 0, t)
assert.Error(t, err)
}
func TestInvalidLogs(t *testing.T) {
_, err := runLogExport(false, 0, t)
assert.Error(t, err)
}
func TestInvalidMetrics(t *testing.T) {
_, err := runMetricsExport(false, 0, t)
assert.Error(t, err)
}
func TestInvalidURL(t *testing.T) {
factory := NewFactory()
cfg := factory.CreateDefaultConfig().(*Config)
// Disable queuing to ensure that we execute the request when calling ConsumeTraces
// otherwise we will not see the error.
cfg.QueueSettings.Enabled = false
// Disable retries to not wait too much time for the return error.
cfg.RetrySettings.Enabled = false
cfg.Endpoint = "ftp://example.com:134"
cfg.Token = "1234-1234"
params := component.ExporterCreateParams{Logger: zap.NewNop()}
exporter, err := factory.CreateTracesExporter(context.Background(), params, cfg)
assert.NoError(t, err)
assert.NoError(t, exporter.Start(context.Background(), componenttest.NewNopHost()))
defer exporter.Shutdown(context.Background())
td := createTraceData(2)
err = exporter.ConsumeTraces(context.Background(), td)
assert.EqualError(t, err, "Post \"ftp://example.com:134/services/collector\": unsupported protocol scheme \"ftp\"")
}
type badJSON struct {
Foo float64 `json:"foo"`
}
func TestInvalidJson(t *testing.T) {
badEvent := badJSON{
Foo: math.Inf(1),
}
syncPool := sync.Pool{New: func() interface{} {
return gzip.NewWriter(nil)
}}
evs := []*splunk.Event{
{
Event: badEvent,
},
nil,
}
reader, _, err := encodeBodyEvents(&syncPool, evs, false)
assert.Error(t, err, reader)
}
func TestStartAlwaysReturnsNil(t *testing.T) {
c := client{}
err := c.start(context.Background(), componenttest.NewNopHost())
assert.NoError(t, err)
}
func TestInvalidJsonClient(t *testing.T) {
badEvent := badJSON{
Foo: math.Inf(1),
}
evs := []*splunk.Event{
{
Event: badEvent,
},
nil,
}
c := client{
url: nil,
zippers: sync.Pool{New: func() interface{} {
return gzip.NewWriter(nil)
}},
config: &Config{},
}
err := c.sendSplunkEvents(context.Background(), evs)
assert.EqualError(t, err, "Permanent error: json: unsupported value: +Inf")
}
func TestInvalidURLClient(t *testing.T) {
c := client{
url: &url.URL{Host: "in va lid"},
zippers: sync.Pool{New: func() interface{} {
return gzip.NewWriter(nil)
}},
config: &Config{},
}
err := c.sendSplunkEvents(context.Background(), []*splunk.Event{})
assert.EqualError(t, err, "Permanent error: parse \"//in%20va%20lid\": invalid URL escape \"%20\"")
}
|
package nilable
// Int represents an int type that can be assigned nil.
type Int struct {
val int
has bool
}
// NilInt creates a new Int without value.
func NilInt() Int {
return Int{}
}
// NewInt creates a new Int with value 'v'.
func NewInt(v int) Int {
return Int{val: v, has: true}
}
// Has reports whether the Int has a value.
func (n Int) Has() bool {
return n.has
}
// Get returns the Int's value.
func (n Int) Get() int {
return n.val
}
// Set sets the Int's value to 'v'.
func (n Int) Set(v int) {
n.val = v
n.has = true
}
// Nil sets the Int's value to nil.
func (n Int) Nil() {
n.val = 0
n.has = false
}
|
package _interface
import (
"context"
"github.com/muhammadisa/vanilla-microservice/model"
)
type Repository interface {
WriteTodo(ctx context.Context, todo model.Todo) error
ReadTodos(ctx context.Context) model.Todos
}
|
package main
import (
"database/sql"
"log"
"github.com/MarcelCode/ROWA/src/api"
"github.com/MarcelCode/ROWA/src/db"
"github.com/MarcelCode/ROWA/src/sensor"
"github.com/MarcelCode/ROWA/src/settings"
"github.com/MarcelCode/ROWA/src/util"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func main() {
database, err := sql.Open("sqlite3", "rowa.db")
if err != nil {
log.Fatal(err)
}
defer database.Close()
db.InitStore(&db.Database{Db: database})
if settings.Debug {
db.FunctionStore.DbSetup()
}
if settings.ArduinoOn {
go sensor.ReadSensorData()
util.LightTimesRenew()
util.PumpTimesRenew()
go util.Runner()
} else {
go sensor.ReadFakeSensorData()
}
e := echo.New()
e.Use(middleware.CORS())
// Routes
e.GET("/dashboard/sensor-data", api.GetSensorDataHandler)
e.GET("/dashboard/harvestable-plants", api.GetHarvestablePlantsHandler)
e.GET("/dashboard/plantable-plants", api.GetPlantablePlantsHandler)
e.GET("/harvest/get-plant", api.GetHarvestablePlantHandler)
e.POST("/harvest/harvestdone", api.HarvestDoneHandler)
e.GET("/plant/blinkstop", api.StopModuleBlink)
e.GET("/plant/get-position", api.PlantHandler)
e.POST("/plant/finish", api.FinishPlantingHandler)
e.GET("/dashboard/cattree/:module", api.GetCatTreeDataHandler)
e.POST("/adminSettings/insert-light", api.InsertLightTimes)
e.GET("/adminSettings/get-light", api.GetLightTimes)
e.POST("/adminSettings/changelight", api.ChangeLightState)
e.GET("/adminSettings/get-types", api.GetPlantTypes)
e.GET("/adminSettings/get-knowntypes", api.GetKnownPlantTypes)
e.POST("/adminSettings/insertmodule-change", api.InsertModuleChanges)
e.POST("/adminSettings/insert-pump", api.InsertPumpTime)
e.GET("/adminSettings/get-pump", api.GetPumpTimes)
e.GET("/plant/get-all", api.AllPlantHandler)
e.POST("/plant/plant-all", api.MassPlantingHandler)
e.GET("/harvest/get-all", api.AllHarvestHandler)
e.POST("/harvest/harvest-all", api.MassHarvestHandler)
// Start server
e.Logger.Fatal(e.Start(":3000"))
}
|
package dispatchers
import (
"errors"
"github.com/jeremija/gol/types"
)
type newDispatcherFunc func(DispatcherConfig) Dispatcher
var dispatchers = map[string]newDispatcherFunc{}
func RegisterDispatcher(name string, createDispatcher newDispatcherFunc) {
if _, ok := dispatchers[name]; ok {
panic("Dispatcher " + name + " already registered")
}
dispatchers[name] = createDispatcher
}
type DispatcherConfig struct {
Database string
Dispatcher string
MaxBatchSize int
Timeout string
Props map[string]string
}
type Dispatcher interface {
Dispatch(event types.Line) error
Start()
Stop()
Wait()
}
func MustGetDispatcher(config DispatcherConfig) Dispatcher {
newDispatcher, ok := dispatchers[config.Dispatcher]
if !ok {
panic(errors.New("Dispatcher '" + config.Dispatcher + "' not found"))
}
return newDispatcher(config)
}
|
/*
* @lc app=leetcode.cn id=94 lang=golang
*
* [94] 二叉树的中序遍历
*/
package main
import "fmt"
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// @lc code=start
func inorderTraversal(root *TreeNode) []int {
if root == nil {
return nil
}
ans := []int{}
ans = append(ans, inorderTraversal(root.Left)...)
ans = append(ans, root.Val)
ans = append(ans, inorderTraversal(root.Right)...)
return ans
}
// @lc code=end
func main() {
fmt.Println(inorderTraversal(nil))
}
|
package collatzconjecture
import "errors"
func CollatzConjecture(n int) (int, error) {
if n < 1 {
return -1, errors.New("n must be greater than zero")
}
var count int
for {
if n == 1 {
return count, nil
}
if n%2 == 0 {
n = n / 2
} else {
n = 3*n + 1
}
count++
}
}
|
package main
import (
"encoding/json"
"sync"
"os"
"net"
"io/ioutil"
"fmt"
"log"
)
type Config struct {
WebServerAddress string `json:"webServerAddress"`
TcpCtrlAddress string `json:"tcpCtrlAddress"`
TcpLogAddress string `json:"tcpLogAddress"`
TcpNotifyAddress string `json:"tcpNotifyAddress"`
ErrorsCheckPeriod int `json:"errorsCheckPeriod"`
ErrorsCheckCount int `json:"errorsCheckCount"`
EmptyCheckPeriod int `json:"emptyCheckPeriod"`
NProcLogsSize int `json:"nProcLogsSize"`
TcpCtrlBufferSize int `json:"tcpCtrlBufferSize"`
TcpLogBufferSize int `json:"tcpLogBufferSize"`
ErrorTemplates []string `json:"errorTemplates"`
}
type ProcessInfo struct {
StopSignal chan struct{}
}
type Procs struct {
sync.Mutex
mp map[string]ProcessInfo
}
type ErrorData struct {
Ts string `json:"ts"`
Txt string `json:"txt"`
}
type Errors struct {
sync.Mutex
mp map[string][]ErrorData
}
const requiredMacAddress = "0e:73:c2:b5:53:ed"
var (
isDebug bool
mustRunWebserver bool
mustNotify bool
config Config
errorsCheckPeriod int64
nErrorTemplates int
tcpNotifyAddr string
procs Procs
errors Errors
logger *log.Logger
)
func init() {
args := os.Args[1:]
if len(args) == 1 && args[0] == "help" {
fmt.Println("usage: ./weberr [debug] [webserver] [notify]")
os.Exit(0)
}
isDebug = false
mustRunWebserver = false
mustNotify = false
for _, arg := range args {
switch arg {
case "debug":
isDebug = true
case "webserver":
mustRunWebserver = true
case "notify":
mustNotify = true
}
}
if isDebug {
fmt.Printf("isDebug: %t, mustRunWebserver: %t, mustNotify: %t\n", isDebug, mustRunWebserver, mustNotify)
}
}
func init() {
if isDebug {fmt.Println("checking mac address...")}
ifas, err := net.Interfaces()
if err != nil {
fmt.Println("check mac address error: " + err.Error())
os.Exit(1)
}
isFound := false
for _, ifa := range ifas {
a := ifa.HardwareAddr.String()
fmt.Println(a)
if a == requiredMacAddress {
isFound = true
}
}
if !isFound {
fmt.Println("Invalid mac adress")
os.Exit(1)
}
}
func init() {
if isDebug {fmt.Println("loading config...")}
configJson, err := ioutil.ReadFile("./config.json")
if err != nil {
fmt.Println("Read config error: " + err.Error())
os.Exit(1)
}
err = json.Unmarshal(configJson, &config)
if err != nil {
fmt.Println("config json parse error: " + err.Error())
os.Exit(1)
}
if isDebug {fmt.Printf("Config: %+v", config)}
errorsCheckPeriod = int64(config.ErrorsCheckPeriod) * 1e6
nErrorTemplates = len(config.ErrorTemplates)
}
func init() {
if isDebug {fmt.Println("initiating proc and err data...")}
procs.mp = make(map[string]ProcessInfo)
errors.mp = make(map[string][]ErrorData)
}
func init(){
f, err := os.OpenFile("text.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
fmt.Println("err open log file: ",err)
}
//defer f.Close()
logger = log.New(f, "prefix_", log.LstdFlags)
logger.Println("start logging...")
//w := bufio.NewWriter(f)
//fmt.Fprintln(w, "test")
//w.Flush()
}
func main() {
go TcpCtrlServer(config.TcpCtrlAddress)
go TcpLogServer(config.TcpLogAddress)
if mustRunWebserver {
go WebServer(config.WebServerAddress)
}
select{}
}
|
package main
import "fmt"
func main() {
fmt.Println(fib(1), fib(2), fib(3), fib(4), fib(5), fib(6), fib(7))
}
func fib(n int) int {
//atribuicao de tupla, permite que diversas variaveis recebam valores de uma so vez
x, y := 0, 1
for i := 1; i < n; i++ {
y, x = y+x, y
}
return y
}
|
package graphql_test
import (
"testing"
"github.com/ONSdigital/aws-appsync-generator/pkg/graphql"
"github.com/stretchr/testify/assert"
)
func TestNewFilterFromObject(t *testing.T) {
in := &graphql.Object{
Name: "TestObject",
Fields: []*graphql.Field{
{
Name: "name",
Type: &graphql.FieldType{
Name: "String",
IsList: false,
NonNullable: true,
},
},
{
Name: "age",
Type: &graphql.FieldType{
Name: "Int",
IsList: false,
NonNullable: false,
},
},
{
Name: "custom",
Type: &graphql.FieldType{
Name: "BananaObject",
IsList: false,
NonNullable: false,
},
},
},
}
expected := &graphql.FilterObject{
Name: "TestObjectFilter",
Fields: []*graphql.Field{
{
Name: "name",
Type: &graphql.FieldType{
Name: "TableStringFilterInput",
IsList: false,
NonNullable: false,
},
},
{
Name: "age",
Type: &graphql.FieldType{
Name: "TableIntFilterInput",
IsList: false,
NonNullable: false,
},
},
},
}
generated := graphql.NewFilterFromObject(in)
assert.Equal(t, expected, generated)
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
)
type build struct {
Branch string `json:"branch"`
BuildURL string `json:"build_url"`
Workflows workflow `json:"workflows"`
StartTime string `json:"start_time"`
BuildTimeMillis int `json:"build_time_millis"`
Status string `json:"status"`
Lifecycle string `json:"lifecycle"`
BuildNum int `json:"build_num"`
UserName string `json:"username"`
RepoName string `json:"reponame"`
CommitterName string `json:"committer_name"`
}
type workflow struct {
JobName string `json:"job_name"`
}
type Items struct {
Item []Item `json:"items"`
}
type Item struct {
Title string `json:"title"`
Subtitle string `json:"subtitle"`
Arg string `json:"arg"`
Icon icon `json:"icon"`
}
type icon struct {
Path string `json:"path"`
}
func main() {
var token = flag.String("t", "secret", "CircleCI Token")
var username = flag.String("u", "", "Username")
var reponame = flag.String("r", "", "Reponame")
var limit = flag.Int("l", 30, "Limit")
var filter = flag.String("f", "", "Search Filter")
var jobURL = flag.String("j", "", "JobURL (to watch)")
var watchFlag = flag.Bool("w", false, "Watch job")
var watchTimeout = flag.Duration("wt", 15*time.Minute, "Watch timeout, default 15m")
flag.Parse()
var r []build
switch {
case *watchFlag:
b := watch(*watchTimeout, *token, *jobURL)
fmt.Printf("Job %s #%d [%s]\nStatus: %s - Outcome: %s", b.RepoName, b.BuildNum, b.Workflows.JobName, b.Lifecycle, b.Status)
return
case *username != "" && *reponame != "":
r = search(*token, *username, *reponame, *limit)
default:
r = getRecent(*token, *limit)
}
items := filterItems(r, *filter)
j, err := json.Marshal(Items{Item: items})
if err != nil {
fmt.Println("cannot marshal alfred response")
os.Exit(1)
}
fmt.Println(string(j))
}
func getRecent(token string, limit int) []build {
var b []build
url := fmt.Sprintf("https://circleci.com/api/v1.1/recent-builds?circle-token=%s&shallow=true&limit=%d", token, limit)
query(url, &b)
return b
}
func search(token, user, repository string, limit int) []build {
var b []build
url := fmt.Sprintf("https://circleci.com/api/v1.1/project/github/%s/%s?circle-token=%s&shallow=true&limit=%d", user, repository, token, limit)
query(url, &b)
return b
}
func watch(timeout time.Duration, token, jobURL string) build {
var b build
suffix := strings.Replace(jobURL, "https://circleci.com/gh/", "", -1)
url := fmt.Sprintf("https://circleci.com/api/v1.1/project/github/%s?circle-token=%s&shallow=true", suffix, token)
timer := time.NewTimer(timeout)
for {
select {
case <-timer.C:
b.Status = b.Status + "[TIMEOUT]"
return b
default:
query(url, &b)
if b.Lifecycle == "finished" {
return b
}
time.Sleep(5 * time.Second)
}
}
}
func query(url string, b interface{}) {
client := &http.Client{
Timeout: 5 * time.Second,
}
req, _ := http.NewRequest("GET", url, nil)
req.Header.Add("Accept", "application/json")
res, err := client.Do(req)
if err != nil {
fmt.Println("error while making the call:", err)
os.Exit(1)
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
fmt.Println("status code was:", res.Status)
os.Exit(1)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Println("error while reading the body:", err)
os.Exit(1)
}
err = json.Unmarshal(body, b)
if err != nil {
fmt.Println("cannot unmarshal circleci response")
os.Exit(1)
}
}
func filterItems(builds []build, filter string) []Item {
var items []Item
for _, v := range builds {
if strings.Contains(v.RepoName+v.Branch+v.Status+v.CommitterName, filter) {
title := "#" + fmt.Sprint(v.BuildNum) +
" / " + v.RepoName +
" / " + v.Branch
sec := v.BuildTimeMillis / 1000
user := v.CommitterName
if user == "" {
user = v.UserName
}
t, _ := time.Parse(time.RFC3339, v.StartTime)
subtitle := fmt.Sprintf("[%s] U: %s | Start: %v | Elapsed: %d sec", v.Workflows.JobName, user, t.Format("02/01/2006 3:04PM"), sec)
var color string
if v.Status == "no_tests" || v.Status == "not_run" || v.Status == "not_running" {
color = "gray"
} else if v.Status == "fixed" || v.Status == "success" {
color = "green"
} else if v.Status == "queued" || v.Status == "scheduled" {
color = "purple"
} else if v.Status == "canceled" || v.Status == "failed" || v.Status == "infrastructure_fail" || v.Status == "timeout" {
color = "red"
} else if v.Status == "retried" || v.Status == "running" {
color = "blue"
}
items = append(items, Item{
Title: title,
Subtitle: subtitle,
Arg: v.BuildURL,
Icon: icon{Path: color + ".png"}})
}
}
return items
}
|
package handler
import (
"context"
client "github.com/lecex/core/client"
pb "github.com/lecex/device-api/proto/device"
)
// Device 设备结构
type Device struct {
ServiceName string
}
// All 权限列表
func (srv *Device) All(ctx context.Context, req *pb.Request, res *pb.Response) (err error) {
return client.Call(ctx, srv.ServiceName, "Devices.All", req, res)
}
// List 设备列表
func (srv *Device) List(ctx context.Context, req *pb.Request, res *pb.Response) (err error) {
return client.Call(ctx, srv.ServiceName, "Devices.List", req, res)
}
// Get 获取设备
func (srv *Device) Get(ctx context.Context, req *pb.Request, res *pb.Response) (err error) {
return client.Call(ctx, srv.ServiceName, "Devices.Get", req, res)
}
// Create 创建设备
func (srv *Device) Create(ctx context.Context, req *pb.Request, res *pb.Response) (err error) {
return client.Call(ctx, srv.ServiceName, "Devices.Create", req, res)
}
// Update 更新设备
func (srv *Device) Update(ctx context.Context, req *pb.Request, res *pb.Response) (err error) {
return client.Call(ctx, srv.ServiceName, "Devices.Update", req, res)
}
// Delete 删除设备
func (srv *Device) Delete(ctx context.Context, req *pb.Request, res *pb.Response) (err error) {
return client.Call(ctx, srv.ServiceName, "Devices.Delete", req, res)
}
|
package ccutility
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"os"
// xxx
_ "net/http/pprof"
"strings"
"time"
)
// GetAllFileByExt .
func GetAllFileByExt(pathname string, ext string, s []string) ([]string, error) {
rd, err := ioutil.ReadDir(pathname)
if err != nil {
log.Printf("GetAllFileByExt.ReadDir[%v].err[%v]", pathname, err)
return s, err
}
for _, fi := range rd {
if fi.IsDir() {
fullDir := pathname + "/" + fi.Name()
s, err = GetAllFileByExt(fullDir, ext, s)
if err != nil {
log.Printf("GetAllFileByExt[%v].err[%v]", fullDir, err)
return s, err
}
} else {
if strings.HasSuffix(strings.ToLower(fi.Name()), strings.ToLower(ext)) {
fullName := pathname + "/" + fi.Name()
s = append(s, fullName)
}
}
}
return s, nil
}
// PathExists .
func PathExists(path string) (bool, error) {
fileInfo, err := os.Stat(path)
if err == nil {
log.Printf("fileInfo[%v].size[%v]", path, fileInfo.Size())
return true, nil
}
if !os.IsNotExist(err) {
return true, nil
}
return false, err
}
// Round .
func Round(f float64) int {
return int(math.Floor(f + 0.5))
}
// ReplaceSlash a://b/c -> a:b:c
func ReplaceSlash(uri string) (ret string) {
ret = strings.Replace(uri, "//", "", -1)
ret = strings.Replace(ret, "/", ":", -1)
return ret
}
// RemoveLastSlash trim the last chr('/')
func RemoveLastSlash(uri string) (ret string) {
ret = uri
if ret[len(ret)-1:] == "/" {
ret = ret[0 : len(ret)-1]
}
return ret
}
// WorkerTimer .
func WorkerTimer(d time.Duration, f func()) {
go func(d time.Duration) {
for {
f()
now := time.Now()
next := now.Add(d)
t := time.NewTimer(next.Sub(now))
<-t.C
}
}(d)
}
// ReadBinary .
func ReadBinary(filePath string) ([]byte, error) {
file, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("ReadBinary[%v].Open.err[%v]", filePath, err)
}
defer file.Close()
buf := bytes.Buffer{}
_, err = io.Copy(&buf, file)
if err != nil {
return nil, fmt.Errorf("ReadBinary[%v].Read.err[%v]", filePath, err)
}
if err = file.Close(); err != nil {
return nil, fmt.Errorf("ReadBinary[%v].Close.err[%v]", filePath, err)
}
return buf.Bytes(), nil
}
// WriteBinary .
func WriteBinary(filePath string, src []byte) (int64, error) {
path := strings.Split(filePath, "/")
dirs := strings.Replace(strings.Trim(fmt.Sprint(path[:len(path)-1]), "[]"), " ", "/", -1) + "/"
name := path[len(path)-1]
err := os.MkdirAll(dirs, os.ModePerm)
if err != nil {
log.Printf("WriteBinary.MkdirAll[%v].err[%v]", dirs, err)
return 0, fmt.Errorf("WriteBinary.MkdirAll[%v].err[%v]", dirs, err)
}
fs, err := os.Create(dirs + name)
if err != nil {
log.Printf("WriteBinary.Create[%v].err[%v]", dirs+name, err)
return 0, fmt.Errorf("WriteBinary.Create[%v].err[%v]", dirs+name, err)
}
var dlen int64
dlen, err = io.Copy(fs, bytes.NewReader(src))
if err != nil {
log.Printf("WriteBinary.Copy[%v].err[%v]", dirs+name, err)
return 0, fmt.Errorf("WriteBinary.Copy[%v].err[%v]", dirs+name, err)
}
err = fs.Close()
if err != nil {
log.Printf("WriteBinary.Close[%v].err[%v]", dirs+name, err)
return 0, fmt.Errorf("WriteBinary.Close[%v].err[%v]", dirs+name, err)
}
return dlen, nil
}
// Int64ToBytes .
func Int64ToBytes(i int64) []byte {
var buf = make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(i))
return buf
}
// BytesToInt64 .
func BytesToInt64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}
|
package main
import (
"log"
"net/http"
"github.com/gorilla/mux"
)
var users []User
var medicalRecords []MedicalRecord
var patientHistory []PatientHistory
func main() {
router := mux.NewRouter()
users = append(users, User{Id: "1", UserName: "ganeshRao", FirstName: "Ganesh", LastName: "Rao", Address: "Mahalakshmi Layout"})
users = append(users, User{Id: "2", UserName: "kotiCheshte", FirstName: "Rajesh", LastName: "Venkataraman"})
router.HandleFunc("/users", GetUsers).Methods("GET")
router.HandleFunc("/users/{id}", GetUser).Methods("GET")
router.HandleFunc("/users/{id}", CreateUser).Methods("POST")
router.HandleFunc("/users/{id}", DeleteUser).Methods("DELETE")
var mr1 MedicalRecord
mr1 = MedicalRecord{Id: "1", Title: "General Checkup", Symptoms: "Cough and Cold"}
var mr2 MedicalRecord
mr2 = MedicalRecord{Id: "2", Title: "Bedhi", Symptoms: "Bedhi"}
var mr3 MedicalRecord
mr3 = MedicalRecord{Id: "3", Title: "Knee ligament tear", Symptoms: "Knee ligament tear, thumba novu"}
medicalRecords = append(medicalRecords, mr1)
medicalRecords = append(medicalRecords, mr2)
medicalRecords = append(medicalRecords, mr3)
router.HandleFunc("/medicalrecords", GetMedicalRecords).Methods("GET")
router.HandleFunc("/medicalrecords/{id}", GetMedicalRecord).Methods("GET")
router.HandleFunc("/medicalrecords/{id}", CreateMedicalRecord).Methods("POST")
router.HandleFunc("/medicalrecords/{id}", DeleteMedicalRecord).Methods("DELETE")
// Bug below: No two patients should share a medical record. This is for testing the Handlers.
patientHistory = append(patientHistory, PatientHistory{PatientId: "1", MedicalRecords: medicalRecords})
patientHistory = append(patientHistory, PatientHistory{PatientId: "2", MedicalRecords: medicalRecords})
router.HandleFunc("/patients/{patientid}/history", GetPatientHistory).Methods("GET")
// TODO: Implement GetPatientHistoryByMedicalRecordId
log.Fatal(http.ListenAndServe(":1234", router))
}
|
// Copyright 2019-2023 The sakuracloud_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collector
import (
"context"
"fmt"
"log/slog"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sacloud/iaas-api-go"
"github.com/sacloud/sakuracloud_exporter/platform"
)
// SIMCollector collects metrics about all sims.
type SIMCollector struct {
ctx context.Context
logger *slog.Logger
errors *prometheus.CounterVec
client platform.SIMClient
Up *prometheus.Desc
SIMInfo *prometheus.Desc
Uplink *prometheus.Desc
Downlink *prometheus.Desc
}
// NewSIMCollector returns a new SIMCollector.
func NewSIMCollector(ctx context.Context, logger *slog.Logger, errors *prometheus.CounterVec, client platform.SIMClient) *SIMCollector {
errors.WithLabelValues("sim").Add(0)
simLabels := []string{"id", "name"}
simInfoLabels := append(simLabels, "imei_lock",
"registered_date", "activated_date", "deactivated_date",
"ipaddress", "simgroup_id", "carriers", "tags", "description")
return &SIMCollector{
ctx: ctx,
logger: logger,
errors: errors,
client: client,
Up: prometheus.NewDesc(
"sakuracloud_sim_session_up",
"If 1 the session is up and running, 0 otherwise",
simLabels, nil,
),
SIMInfo: prometheus.NewDesc(
"sakuracloud_sim_info",
"A metric with a constant '1' value labeled by sim information",
simInfoLabels, nil,
),
Uplink: prometheus.NewDesc(
"sakuracloud_sim_uplink",
"Uplink traffic (unit: Kbps)",
simLabels, nil,
),
Downlink: prometheus.NewDesc(
"sakuracloud_sim_downlink",
"Downlink traffic (unit: Kbps)",
simLabels, nil,
),
}
}
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector.
func (c *SIMCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.Up
ch <- c.SIMInfo
ch <- c.Uplink
ch <- c.Downlink
}
// Collect is called by the Prometheus registry when collecting metrics.
func (c *SIMCollector) Collect(ch chan<- prometheus.Metric) {
sims, err := c.client.Find(c.ctx)
if err != nil {
c.errors.WithLabelValues("sim").Add(1)
c.logger.Warn(
"can't list sims",
slog.Any("err", err),
)
}
var wg sync.WaitGroup
wg.Add(len(sims))
for i := range sims {
func(sim *iaas.SIM) {
defer wg.Done()
simLabels := c.simLabels(sim)
var up float64
if strings.ToLower(sim.Info.SessionStatus) == "up" {
up = 1.0
}
ch <- prometheus.MustNewConstMetric(
c.Up,
prometheus.GaugeValue,
up,
simLabels...,
)
wg.Add(1)
go func() {
c.collectSIMInfo(ch, sim)
wg.Done()
}()
if sim.Info.SessionStatus == "UP" {
now := time.Now()
wg.Add(1)
go func() {
c.collectSIMMetrics(ch, sim, now)
wg.Done()
}()
}
}(sims[i])
}
wg.Wait()
}
func (c *SIMCollector) simLabels(sim *iaas.SIM) []string {
return []string{
sim.ID.String(),
sim.Name,
}
}
func (c *SIMCollector) collectSIMInfo(ch chan<- prometheus.Metric, sim *iaas.SIM) {
simConfigs, err := c.client.GetNetworkOperatorConfig(c.ctx, sim.ID)
if err != nil {
c.errors.WithLabelValues("sim").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get sim's network operator config: SIMID=%d", sim.ID),
slog.Any("err", err),
)
return
}
var carriers []string
for _, config := range simConfigs {
if config.Allow {
carriers = append(carriers, config.Name)
}
}
simInfo := sim.Info
imeiLock := "0"
if simInfo.IMEILock {
imeiLock = "1"
}
var registerdDate, activatedDate, deactivatedDate int64
if !simInfo.RegisteredDate.IsZero() {
registerdDate = simInfo.RegisteredDate.Unix() * 1000
}
if !simInfo.ActivatedDate.IsZero() {
activatedDate = simInfo.ActivatedDate.Unix() * 1000
}
if !simInfo.DeactivatedDate.IsZero() {
deactivatedDate = simInfo.DeactivatedDate.Unix() * 1000
}
labels := append(c.simLabels(sim),
imeiLock,
fmt.Sprintf("%d", registerdDate),
fmt.Sprintf("%d", activatedDate),
fmt.Sprintf("%d", deactivatedDate),
simInfo.IP,
simInfo.SIMGroupID,
flattenStringSlice(carriers),
flattenStringSlice(sim.Tags),
sim.Description,
)
ch <- prometheus.MustNewConstMetric(
c.SIMInfo,
prometheus.GaugeValue,
float64(1.0),
labels...,
)
}
func (c *SIMCollector) collectSIMMetrics(ch chan<- prometheus.Metric, sim *iaas.SIM, now time.Time) {
values, err := c.client.MonitorTraffic(c.ctx, sim.ID, now)
if err != nil {
c.errors.WithLabelValues("sim").Add(1)
c.logger.Warn(
fmt.Sprintf("can't get sim's metrics: SIMID=%d", sim.ID),
slog.Any("err", err),
)
return
}
if values == nil {
return
}
uplink := values.UplinkBPS
if uplink > 0 {
uplink /= 1000
}
m := prometheus.MustNewConstMetric(
c.Uplink,
prometheus.GaugeValue,
uplink,
c.simLabels(sim)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
downlink := values.DownlinkBPS
if downlink > 0 {
downlink /= 1000
}
m = prometheus.MustNewConstMetric(
c.Downlink,
prometheus.GaugeValue,
downlink,
c.simLabels(sim)...,
)
ch <- prometheus.NewMetricWithTimestamp(values.Time, m)
}
|
package nebulatest
import (
"encoding/json"
"fmt"
"github.com/vesoft-inc/nebula-go/graph"
)
type JsonDiffer struct {
DifferError
Response *graph.ExecutionResponse
Order bool
}
func (d *JsonDiffer) Diff(result string) {
// result = fmt.Sprintf("%q", result)
var resp executionResponse
if err := json.Unmarshal([]byte(result), &resp); err != nil {
d.err = fmt.Errorf("Fail to parse JSON string, error: %s", err.Error())
} else {
r := resp.convertToNebulaResponse()
if err = d.compare(r); err != nil {
d.err = err
} else {
d.err = nil
}
}
}
func (d *JsonDiffer) compare(result *graph.ExecutionResponse) error {
if d.Response.GetErrorCode() != result.GetErrorCode() {
return fmt.Errorf("ErrorCode: %v vs. %v", d.Response.GetErrorCode(), result.GetErrorCode())
}
if result.IsSetErrorMsg() && d.Response.GetErrorMsg() != result.GetErrorMsg() {
return fmt.Errorf("ErrorMsg: %s vs. %s", d.Response.GetErrorMsg(), result.GetErrorMsg())
}
if result.IsSetSpaceName() && d.Response.GetSpaceName() != result.GetSpaceName() {
return fmt.Errorf("SpaceName: %s vs. %s", d.Response.GetSpaceName(), result.GetSpaceName())
}
if result.IsSetColumnNames() {
if len(d.Response.GetColumnNames()) != len(result.GetColumnNames()) {
return fmt.Errorf("Length of column names: %d vs. %d", len(d.Response.GetColumnNames()), len(result.GetColumnNames()))
}
for _, rc := range d.Response.GetColumnNames() {
found := false
for _, ec := range result.GetColumnNames() {
if string(rc) == string(ec) {
found = true
break
}
}
if !found {
return fmt.Errorf("NotFoundColumnName: %s", string(rc))
}
}
}
if result.IsSetRows() {
if len(d.Response.GetRows()) != len(result.GetRows()) {
return fmt.Errorf("Number of rows: %d vs. %d", d.Response.GetRows(), result.GetRows())
}
if d.Order {
for i := range d.Response.GetRows() {
if !d.compareRowValue(d.Response.GetRows()[i], result.GetRows()[i]) {
return fmt.Errorf("Rows: %s vs. %s", d.Response.GetRows()[i].String(), result.GetRows()[i].String())
}
}
} else {
for _, i := range d.Response.GetRows() {
found := false
for _, j := range result.GetRows() {
if d.compareRowValue(i, j) {
found = true
break
}
}
if !found {
return fmt.Errorf("NotFoundRow: %s", i)
}
}
}
}
return nil
}
func (d *JsonDiffer) compareRowValue(l *graph.RowValue, r *graph.RowValue) bool {
for _, lc := range l.GetColumns() {
found := false
for _, rc := range r.GetColumns() {
if d.compareColumnValue(lc, rc) {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (d *JsonDiffer) compareColumnValue(l *graph.ColumnValue, r *graph.ColumnValue) bool {
if l.IsSetBoolVal() && r.IsSetBoolVal() {
return l.GetBoolVal() == r.GetBoolVal()
} else if l.IsSetInteger() && r.IsSetInteger() {
return l.GetInteger() == r.GetInteger()
} else if l.IsSetId() && r.IsSetId() {
return l.GetId() == r.GetId()
} else if l.IsSetStr() && r.IsSetStr() {
return string(l.GetStr()) == string(r.GetStr())
} else if l.IsSetDate() && r.IsSetDate() {
return l.GetDate().String() == r.GetDate().String()
} else if l.IsSetDatetime() && r.IsSetDatetime() {
return l.GetDatetime().String() == r.GetDatetime().String()
} else if l.IsSetTimestamp() && r.IsSetTimestamp() {
return l.GetTimestamp() == r.GetTimestamp()
} else if l.IsSetSinglePrecision() && r.IsSetSinglePrecision() {
return l.GetSinglePrecision() == r.GetSinglePrecision()
} else if l.IsSetDoublePrecision() && r.IsSetDoublePrecision() {
return l.GetDoublePrecision() == r.GetDoublePrecision()
} else {
return false
}
}
|
package download
import (
"fmt"
"os"
"net/http"
"io/ioutil"
"io"
"bytes"
)
func DownloadImg(id string, url string) () {
go func() {
out, err := os.Create("H:\\wallpager\\" + id + ".jpg")
if err != nil {
fmt.Printf("download err %s \n", err.Error())
}
defer out.Close()
resp, err := http.Get(url)
defer resp.Body.Close()
pix, err := ioutil.ReadAll(resp.Body)
_, err = io.Copy(out, bytes.NewReader(pix))
if err != nil {
fmt.Printf("download err %s \n", err.Error())
}
}()
}
|
package cgo
import (
"crypto/rand"
"crypto/subtle"
"encoding/base64"
"errors"
"fmt"
"strings"
"golang.org/x/crypto/argon2"
)
var (
// ErrInvalidHash error
ErrInvalidHash = errors.New("the encoded hash is not in the correct format")
// ErrIncompatibleVersion error
ErrIncompatibleVersion = errors.New("incompatible version of argon2")
)
type params struct {
memory uint32
iterations uint32
parallelism uint8
saltLength uint32
keyLength uint32
}
var p = ¶ms{
memory: 64 * 1024,
iterations: 3,
parallelism: 2,
saltLength: 16,
keyLength: 32,
}
// Hash using argon 2
func Hash(password string) (encodedHash string, err error) {
salt, err := generateRandomBytes(p.saltLength)
if err != nil {
return "", err
}
hash := argon2.IDKey([]byte(password), salt, p.iterations, p.memory, p.parallelism, p.keyLength)
// Base64 encode the salt and hashed password.
b64Salt := base64.RawStdEncoding.EncodeToString(salt)
b64Hash := base64.RawStdEncoding.EncodeToString(hash)
// Return a string using the standard encoded hash representation.
encodedHash = fmt.Sprintf("$argon2id$v=%d$m=%d,t=%d,p=%d$%s$%s", argon2.Version, p.memory, p.iterations, p.parallelism, b64Salt, b64Hash)
return encodedHash, nil
}
// Verify argon 2
func Verify(password, encodedHash string) (match bool, err error) {
// Extract the parameters, salt and derived key from the encoded password
// hash.
p, salt, hash, err := decodeHash(encodedHash)
if err != nil {
return false, err
}
// Derive the key from the other password using the same parameters.
otherHash := argon2.IDKey([]byte(password), salt, p.iterations, p.memory, p.parallelism, p.keyLength)
// Check that the contents of the hashed passwords are identical. Note
// that we are using the subtle.ConstantTimeCompare() function for this
// to help prevent timing attacks.
if subtle.ConstantTimeCompare(hash, otherHash) == 1 {
return true, nil
}
return false, nil
}
func generateRandomBytes(n uint32) ([]byte, error) {
b := make([]byte, n)
_, err := rand.Read(b)
if err != nil {
return nil, err
}
return b, nil
}
func decodeHash(encodedHash string) (p *params, salt, hash []byte, err error) {
vals := strings.Split(encodedHash, "$")
if len(vals) != 6 {
return nil, nil, nil, ErrInvalidHash
}
var version int
_, err = fmt.Sscanf(vals[2], "v=%d", &version)
if err != nil {
return nil, nil, nil, err
}
if version != argon2.Version {
return nil, nil, nil, ErrIncompatibleVersion
}
p = ¶ms{}
_, err = fmt.Sscanf(vals[3], "m=%d,t=%d,p=%d", &p.memory, &p.iterations, &p.parallelism)
if err != nil {
return nil, nil, nil, err
}
salt, err = base64.RawStdEncoding.DecodeString(vals[4])
if err != nil {
return nil, nil, nil, err
}
p.saltLength = uint32(len(salt))
hash, err = base64.RawStdEncoding.DecodeString(vals[5])
if err != nil {
return nil, nil, nil, err
}
p.keyLength = uint32(len(hash))
return p, salt, hash, nil
}
|
package main
import (
"fmt"
"sort"
)
func main() {
fmt.Println(getLeastNumbers([]int{3, 2, 1}, 2))
fmt.Println(getLeastNumbers([]int{0, 1, 2, 1}, 1))
fmt.Println(getLeastNumbers([]int{4, 5, 1, 6, 2, 7, 3, 8}, 4))
}
func getLeastNumbers(arr []int, k int) []int {
sort.Ints(arr)
return arr[:k]
}
func getLeastNumbers2(arr []int, k int) []int {
for i := 0; i < k; i++ {
for j := i + 1; j < len(arr); j++ {
if arr[j] < arr[i] {
arr[j], arr[i] = arr[i], arr[j]
}
}
}
return arr[:k]
}
|
// Copyright 2019 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package java
import (
"fmt"
"path/filepath"
"sort"
"strconv"
"strings"
"android/soong/android"
"android/soong/java/config"
"github.com/google/blueprint/pathtools"
)
func init() {
android.RegisterPreSingletonType("sdk_versions", sdkPreSingletonFactory)
android.RegisterSingletonType("sdk", sdkSingletonFactory)
android.RegisterMakeVarsProvider(pctx, sdkMakeVars)
}
var sdkVersionsKey = android.NewOnceKey("sdkVersionsKey")
var sdkFrameworkAidlPathKey = android.NewOnceKey("sdkFrameworkAidlPathKey")
var nonUpdatableFrameworkAidlPathKey = android.NewOnceKey("nonUpdatableFrameworkAidlPathKey")
var apiFingerprintPathKey = android.NewOnceKey("apiFingerprintPathKey")
type sdkContext interface {
// sdkVersion returns sdkSpec that corresponds to the sdk_version property of the current module
sdkVersion() sdkSpec
// systemModules returns the system_modules property of the current module, or an empty string if it is not set.
systemModules() string
// minSdkVersion returns sdkSpec that corresponds to the min_sdk_version property of the current module,
// or from sdk_version if it is not set.
minSdkVersion() sdkSpec
// targetSdkVersion returns the sdkSpec that corresponds to the target_sdk_version property of the current module,
// or from sdk_version if it is not set.
targetSdkVersion() sdkSpec
}
func UseApiFingerprint(ctx android.BaseModuleContext) bool {
if ctx.Config().UnbundledBuild() &&
!ctx.Config().UnbundledBuildUsePrebuiltSdks() &&
ctx.Config().IsEnvTrue("UNBUNDLED_BUILD_TARGET_SDK_WITH_API_FINGERPRINT") {
return true
}
return false
}
// sdkKind represents a particular category of an SDK spec like public, system, test, etc.
type sdkKind int
const (
sdkInvalid sdkKind = iota
sdkNone
sdkCore
sdkCorePlatform
sdkPublic
sdkSystem
sdkTest
sdkModule
sdkSystemServer
sdkPrivate
)
// String returns the string representation of this sdkKind
func (k sdkKind) String() string {
switch k {
case sdkPrivate:
return "private"
case sdkNone:
return "none"
case sdkPublic:
return "public"
case sdkSystem:
return "system"
case sdkTest:
return "test"
case sdkCore:
return "core"
case sdkCorePlatform:
return "core_platform"
case sdkModule:
return "module"
case sdkSystemServer:
return "system_server"
default:
return "invalid"
}
}
// sdkVersion represents a specific version number of an SDK spec of a particular kind
type sdkVersion int
const (
// special version number for a not-yet-frozen SDK
sdkVersionCurrent sdkVersion = sdkVersion(android.FutureApiLevel)
// special version number to be used for SDK specs where version number doesn't
// make sense, e.g. "none", "", etc.
sdkVersionNone sdkVersion = sdkVersion(0)
)
// isCurrent checks if the sdkVersion refers to the not-yet-published version of an sdkKind
func (v sdkVersion) isCurrent() bool {
return v == sdkVersionCurrent
}
// isNumbered checks if the sdkVersion refers to the published (a.k.a numbered) version of an sdkKind
func (v sdkVersion) isNumbered() bool {
return !v.isCurrent() && v != sdkVersionNone
}
// String returns the string representation of this sdkVersion.
func (v sdkVersion) String() string {
if v.isCurrent() {
return "current"
} else if v.isNumbered() {
return strconv.Itoa(int(v))
}
return "(no version)"
}
// asNumberString directly converts the numeric value of this sdk version as a string.
// When isNumbered() is true, this method is the same as String(). However, for sdkVersionCurrent
// and sdkVersionNone, this returns 10000 and 0 while String() returns "current" and "(no version"),
// respectively.
func (v sdkVersion) asNumberString() string {
return strconv.Itoa(int(v))
}
// sdkSpec represents the kind and the version of an SDK for a module to build against
type sdkSpec struct {
kind sdkKind
version sdkVersion
raw string
}
func (s sdkSpec) String() string {
return fmt.Sprintf("%s_%s", s.kind, s.version)
}
// valid checks if this sdkSpec is well-formed. Note however that true doesn't mean that the
// specified SDK actually exists.
func (s sdkSpec) valid() bool {
return s.kind != sdkInvalid
}
// specified checks if this sdkSpec is well-formed and is not "".
func (s sdkSpec) specified() bool {
return s.valid() && s.kind != sdkPrivate
}
// whether the API surface is managed and versioned, i.e. has .txt file that
// get frozen on SDK freeze and changes get reviewed by API council.
func (s sdkSpec) stable() bool {
if !s.specified() {
return false
}
switch s.kind {
case sdkNone:
// there is nothing to manage and version in this case; de facto stable API.
return true
case sdkCore, sdkPublic, sdkSystem, sdkModule, sdkSystemServer:
return true
case sdkCorePlatform, sdkTest, sdkPrivate:
return false
default:
panic(fmt.Errorf("unknown sdkKind=%v", s.kind))
}
return false
}
// prebuiltSdkAvailableForUnbundledBuilt tells whether this sdkSpec can have a prebuilt SDK
// that can be used for unbundled builds.
func (s sdkSpec) prebuiltSdkAvailableForUnbundledBuild() bool {
// "", "none", and "core_platform" are not available for unbundled build
// as we don't/can't have prebuilt stub for the versions
return s.kind != sdkPrivate && s.kind != sdkNone && s.kind != sdkCorePlatform
}
// forPdkBuild converts this sdkSpec into another sdkSpec that is for the PDK builds.
func (s sdkSpec) forPdkBuild(ctx android.EarlyModuleContext) sdkSpec {
// For PDK builds, use the latest SDK version instead of "current" or ""
if s.kind == sdkPrivate || s.kind == sdkPublic {
kind := s.kind
if kind == sdkPrivate {
// We don't have prebuilt SDK for private APIs, so use the public SDK
// instead. This looks odd, but that's how it has been done.
// TODO(b/148271073): investigate the need for this.
kind = sdkPublic
}
version := sdkVersion(LatestSdkVersionInt(ctx))
return sdkSpec{kind, version, s.raw}
}
return s
}
// usePrebuilt determines whether prebuilt SDK should be used for this sdkSpec with the given context.
func (s sdkSpec) usePrebuilt(ctx android.EarlyModuleContext) bool {
if s.version.isCurrent() {
// "current" can be built from source and be from prebuilt SDK
return ctx.Config().UnbundledBuildUsePrebuiltSdks()
} else if s.version.isNumbered() {
// sanity check
if s.kind != sdkPublic && s.kind != sdkSystem && s.kind != sdkTest {
panic(fmt.Errorf("prebuilt SDK is not not available for sdkKind=%q", s.kind))
return false
}
// numbered SDKs are always from prebuilt
return true
}
// "", "none", "core_platform" fall here
return false
}
// effectiveVersion converts an sdkSpec into the concrete sdkVersion that the module
// should use. For modules targeting an unreleased SDK (meaning it does not yet have a number)
// it returns android.FutureApiLevel(10000).
func (s sdkSpec) effectiveVersion(ctx android.EarlyModuleContext) (sdkVersion, error) {
if !s.valid() {
return s.version, fmt.Errorf("invalid sdk version %q", s.raw)
}
if ctx.Config().IsPdkBuild() {
s = s.forPdkBuild(ctx)
}
if s.version.isNumbered() {
return s.version, nil
}
return sdkVersion(ctx.Config().DefaultAppTargetSdkInt()), nil
}
// effectiveVersionString converts an sdkSpec into the concrete version string that the module
// should use. For modules targeting an unreleased SDK (meaning it does not yet have a number)
// it returns the codename (P, Q, R, etc.)
func (s sdkSpec) effectiveVersionString(ctx android.EarlyModuleContext) (string, error) {
ver, err := s.effectiveVersion(ctx)
if err == nil && int(ver) == ctx.Config().DefaultAppTargetSdkInt() {
return ctx.Config().DefaultAppTargetSdk(), nil
}
return ver.String(), err
}
func (s sdkSpec) defaultJavaLanguageVersion(ctx android.EarlyModuleContext) javaVersion {
sdk, err := s.effectiveVersion(ctx)
if err != nil {
ctx.PropertyErrorf("sdk_version", "%s", err)
}
if sdk <= 23 {
return JAVA_VERSION_7
} else if sdk <= 29 {
return JAVA_VERSION_8
} else {
return JAVA_VERSION_9
}
}
func sdkSpecFrom(str string) sdkSpec {
switch str {
// special cases first
case "":
return sdkSpec{sdkPrivate, sdkVersionNone, str}
case "none":
return sdkSpec{sdkNone, sdkVersionNone, str}
case "core_platform":
return sdkSpec{sdkCorePlatform, sdkVersionNone, str}
default:
// the syntax is [kind_]version
sep := strings.LastIndex(str, "_")
var kindString string
if sep == 0 {
return sdkSpec{sdkInvalid, sdkVersionNone, str}
} else if sep == -1 {
kindString = ""
} else {
kindString = str[0:sep]
}
versionString := str[sep+1 : len(str)]
var kind sdkKind
switch kindString {
case "":
kind = sdkPublic
case "core":
kind = sdkCore
case "system":
kind = sdkSystem
case "test":
kind = sdkTest
case "module":
kind = sdkModule
case "system_server":
kind = sdkSystemServer
default:
return sdkSpec{sdkInvalid, sdkVersionNone, str}
}
var version sdkVersion
if versionString == "current" {
version = sdkVersionCurrent
} else if i, err := strconv.Atoi(versionString); err == nil {
version = sdkVersion(i)
} else {
return sdkSpec{sdkInvalid, sdkVersionNone, str}
}
return sdkSpec{kind, version, str}
}
}
func decodeSdkDep(ctx android.EarlyModuleContext, sdkContext sdkContext) sdkDep {
sdkVersion := sdkContext.sdkVersion()
if !sdkVersion.valid() {
ctx.PropertyErrorf("sdk_version", "invalid version %q", sdkVersion.raw)
return sdkDep{}
}
if ctx.Config().IsPdkBuild() {
sdkVersion = sdkVersion.forPdkBuild(ctx)
}
if sdkVersion.usePrebuilt(ctx) {
dir := filepath.Join("prebuilts", "sdk", sdkVersion.version.String(), sdkVersion.kind.String())
jar := filepath.Join(dir, "android.jar")
// There's no aidl for other SDKs yet.
// TODO(77525052): Add aidl files for other SDKs too.
public_dir := filepath.Join("prebuilts", "sdk", sdkVersion.version.String(), "public")
aidl := filepath.Join(public_dir, "framework.aidl")
jarPath := android.ExistentPathForSource(ctx, jar)
aidlPath := android.ExistentPathForSource(ctx, aidl)
lambdaStubsPath := android.PathForSource(ctx, config.SdkLambdaStubsPath)
if (!jarPath.Valid() || !aidlPath.Valid()) && ctx.Config().AllowMissingDependencies() {
return sdkDep{
invalidVersion: true,
bootclasspath: []string{fmt.Sprintf("sdk_%s_%s_android", sdkVersion.kind, sdkVersion.version.String())},
}
}
if !jarPath.Valid() {
ctx.PropertyErrorf("sdk_version", "invalid sdk version %q, %q does not exist", sdkVersion.raw, jar)
return sdkDep{}
}
if !aidlPath.Valid() {
ctx.PropertyErrorf("sdk_version", "invalid sdk version %q, %q does not exist", sdkVersion.raw, aidl)
return sdkDep{}
}
var systemModules string
if sdkVersion.defaultJavaLanguageVersion(ctx).usesJavaModules() {
systemModules = "sdk_public_" + sdkVersion.version.String() + "_system_modules"
}
return sdkDep{
useFiles: true,
jars: android.Paths{jarPath.Path(), lambdaStubsPath},
aidl: android.OptionalPathForPath(aidlPath.Path()),
systemModules: systemModules,
}
}
toModule := func(modules []string, res string, aidl android.Path) sdkDep {
return sdkDep{
useModule: true,
bootclasspath: append(modules, config.DefaultLambdaStubsLibrary),
systemModules: "core-current-stubs-system-modules",
java9Classpath: modules,
frameworkResModule: res,
aidl: android.OptionalPathForPath(aidl),
}
}
// Ensures that the specificed system SDK version is one of BOARD_SYSTEMSDK_VERSIONS (for vendor apks)
// or PRODUCT_SYSTEMSDK_VERSIONS (for other apks or when BOARD_SYSTEMSDK_VERSIONS is not set)
if sdkVersion.kind == sdkSystem && sdkVersion.version.isNumbered() {
allowed_versions := ctx.DeviceConfig().PlatformSystemSdkVersions()
if ctx.DeviceSpecific() || ctx.SocSpecific() {
if len(ctx.DeviceConfig().SystemSdkVersions()) > 0 {
allowed_versions = ctx.DeviceConfig().SystemSdkVersions()
}
}
if len(allowed_versions) > 0 && !android.InList(sdkVersion.version.String(), allowed_versions) {
ctx.PropertyErrorf("sdk_version", "incompatible sdk version %q. System SDK version should be one of %q",
sdkVersion.raw, allowed_versions)
}
}
switch sdkVersion.kind {
case sdkPrivate:
return sdkDep{
useDefaultLibs: true,
frameworkResModule: "framework-res",
}
case sdkNone:
systemModules := sdkContext.systemModules()
if systemModules == "" {
ctx.PropertyErrorf("sdk_version",
`system_modules is required to be set to a non-empty value when sdk_version is "none", did you mean sdk_version: "core_platform"?`)
} else if systemModules == "none" {
return sdkDep{
noStandardLibs: true,
}
}
return sdkDep{
useModule: true,
noStandardLibs: true,
systemModules: systemModules,
bootclasspath: []string{systemModules},
}
case sdkCorePlatform:
return sdkDep{
useDefaultLibs: true,
frameworkResModule: "framework-res",
noFrameworksLibs: true,
}
case sdkPublic:
return toModule([]string{"android_stubs_current"}, "framework-res", sdkFrameworkAidlPath(ctx))
case sdkSystem:
return toModule([]string{"android_system_stubs_current"}, "framework-res", sdkFrameworkAidlPath(ctx))
case sdkTest:
return toModule([]string{"android_test_stubs_current"}, "framework-res", sdkFrameworkAidlPath(ctx))
case sdkCore:
return toModule([]string{"core.current.stubs"}, "", nil)
case sdkModule:
// TODO(146757305): provide .apk and .aidl that have more APIs for modules
return toModule([]string{"android_module_lib_stubs_current"}, "framework-res", nonUpdatableFrameworkAidlPath(ctx))
case sdkSystemServer:
// TODO(146757305): provide .apk and .aidl that have more APIs for modules
return toModule([]string{"android_system_server_stubs_current"}, "framework-res", sdkFrameworkAidlPath(ctx))
default:
panic(fmt.Errorf("invalid sdk %q", sdkVersion.raw))
}
}
func sdkPreSingletonFactory() android.Singleton {
return sdkPreSingleton{}
}
type sdkPreSingleton struct{}
func (sdkPreSingleton) GenerateBuildActions(ctx android.SingletonContext) {
sdkJars, err := ctx.GlobWithDeps("prebuilts/sdk/*/public/android.jar", nil)
if err != nil {
ctx.Errorf("failed to glob prebuilts/sdk/*/public/android.jar: %s", err.Error())
}
var sdkVersions []int
for _, sdkJar := range sdkJars {
dir := filepath.Base(filepath.Dir(filepath.Dir(sdkJar)))
v, err := strconv.Atoi(dir)
if scerr, ok := err.(*strconv.NumError); ok && scerr.Err == strconv.ErrSyntax {
continue
} else if err != nil {
ctx.Errorf("invalid sdk jar %q, %s, %v", sdkJar, err.Error())
}
sdkVersions = append(sdkVersions, v)
}
sort.Ints(sdkVersions)
ctx.Config().Once(sdkVersionsKey, func() interface{} { return sdkVersions })
}
func LatestSdkVersionInt(ctx android.EarlyModuleContext) int {
sdkVersions := ctx.Config().Get(sdkVersionsKey).([]int)
latestSdkVersion := 0
if len(sdkVersions) > 0 {
latestSdkVersion = sdkVersions[len(sdkVersions)-1]
}
return latestSdkVersion
}
func sdkSingletonFactory() android.Singleton {
return sdkSingleton{}
}
type sdkSingleton struct{}
func (sdkSingleton) GenerateBuildActions(ctx android.SingletonContext) {
if ctx.Config().UnbundledBuildUsePrebuiltSdks() || ctx.Config().IsPdkBuild() {
return
}
createSdkFrameworkAidl(ctx)
createNonUpdatableFrameworkAidl(ctx)
createAPIFingerprint(ctx)
}
// Create framework.aidl by extracting anything that implements android.os.Parcelable from the SDK stubs modules.
func createSdkFrameworkAidl(ctx android.SingletonContext) {
stubsModules := []string{
"android_stubs_current",
"android_test_stubs_current",
"android_system_stubs_current",
}
combinedAidl := sdkFrameworkAidlPath(ctx)
tempPath := combinedAidl.ReplaceExtension(ctx, "aidl.tmp")
rule := createFrameworkAidl(stubsModules, tempPath, ctx)
commitChangeForRestat(rule, tempPath, combinedAidl)
rule.Build(pctx, ctx, "framework_aidl", "generate framework.aidl")
}
// Creates a version of framework.aidl for the non-updatable part of the platform.
func createNonUpdatableFrameworkAidl(ctx android.SingletonContext) {
stubsModules := []string{"android_module_lib_stubs_current"}
combinedAidl := nonUpdatableFrameworkAidlPath(ctx)
tempPath := combinedAidl.ReplaceExtension(ctx, "aidl.tmp")
rule := createFrameworkAidl(stubsModules, tempPath, ctx)
commitChangeForRestat(rule, tempPath, combinedAidl)
rule.Build(pctx, ctx, "framework_non_updatable_aidl", "generate framework_non_updatable.aidl")
}
func createFrameworkAidl(stubsModules []string, path android.OutputPath, ctx android.SingletonContext) *android.RuleBuilder {
stubsJars := make([]android.Paths, len(stubsModules))
ctx.VisitAllModules(func(module android.Module) {
// Collect dex jar paths for the modules listed above.
if j, ok := module.(Dependency); ok {
name := ctx.ModuleName(module)
if i := android.IndexList(name, stubsModules); i != -1 {
stubsJars[i] = j.HeaderJars()
}
}
})
var missingDeps []string
for i := range stubsJars {
if stubsJars[i] == nil {
if ctx.Config().AllowMissingDependencies() {
missingDeps = append(missingDeps, stubsModules[i])
} else {
ctx.Errorf("failed to find dex jar path for module %q", stubsModules[i])
}
}
}
rule := android.NewRuleBuilder()
rule.MissingDeps(missingDeps)
var aidls android.Paths
for _, jars := range stubsJars {
for _, jar := range jars {
aidl := android.PathForOutput(ctx, "aidl", pathtools.ReplaceExtension(jar.Base(), "aidl"))
rule.Command().
Text("rm -f").Output(aidl)
rule.Command().
BuiltTool(ctx, "sdkparcelables").
Input(jar).
Output(aidl)
aidls = append(aidls, aidl)
}
}
rule.Command().
Text("rm -f").Output(path)
rule.Command().
Text("cat").
Inputs(aidls).
Text("| sort -u >").
Output(path)
return rule
}
func sdkFrameworkAidlPath(ctx android.PathContext) android.OutputPath {
return ctx.Config().Once(sdkFrameworkAidlPathKey, func() interface{} {
return android.PathForOutput(ctx, "framework.aidl")
}).(android.OutputPath)
}
func nonUpdatableFrameworkAidlPath(ctx android.PathContext) android.OutputPath {
return ctx.Config().Once(nonUpdatableFrameworkAidlPathKey, func() interface{} {
return android.PathForOutput(ctx, "framework_non_updatable.aidl")
}).(android.OutputPath)
}
// Create api_fingerprint.txt
func createAPIFingerprint(ctx android.SingletonContext) {
out := ApiFingerprintPath(ctx)
rule := android.NewRuleBuilder()
rule.Command().
Text("rm -f").Output(out)
cmd := rule.Command()
if ctx.Config().PlatformSdkCodename() == "REL" {
cmd.Text("echo REL >").Output(out)
} else if ctx.Config().IsPdkBuild() {
// TODO: get this from the PDK artifacts?
cmd.Text("echo PDK >").Output(out)
} else if !ctx.Config().UnbundledBuildUsePrebuiltSdks() {
in, err := ctx.GlobWithDeps("frameworks/base/api/*current.txt", nil)
if err != nil {
ctx.Errorf("error globbing API files: %s", err)
}
cmd.Text("cat").
Inputs(android.PathsForSource(ctx, in)).
Text("| md5sum | cut -d' ' -f1 >").
Output(out)
} else {
// Unbundled build
// TODO: use a prebuilt api_fingerprint.txt from prebuilts/sdk/current.txt once we have one
cmd.Text("echo").
Flag(ctx.Config().PlatformPreviewSdkVersion()).
Text(">").
Output(out)
}
rule.Build(pctx, ctx, "api_fingerprint", "generate api_fingerprint.txt")
}
func ApiFingerprintPath(ctx android.PathContext) android.OutputPath {
return ctx.Config().Once(apiFingerprintPathKey, func() interface{} {
return android.PathForOutput(ctx, "api_fingerprint.txt")
}).(android.OutputPath)
}
func sdkMakeVars(ctx android.MakeVarsContext) {
if ctx.Config().UnbundledBuildUsePrebuiltSdks() || ctx.Config().IsPdkBuild() {
return
}
ctx.Strict("FRAMEWORK_AIDL", sdkFrameworkAidlPath(ctx).String())
ctx.Strict("API_FINGERPRINT", ApiFingerprintPath(ctx).String())
}
|
package client
import (
"encoding/json"
"log"
"net/http"
"testing"
"github.com/stretchr/testify/assert"
"github.com/taglme/nfc-goclient/pkg/models"
)
func TestHandleHttpResponseCode(t *testing.T) {
err := handleHttpResponseCode(http.StatusOK, []byte("message"))
assert.Nil(t, err)
resp, err := json.Marshal(models.ErrorResponse{
Message: "msg",
Info: "err",
})
if err != nil {
log.Fatal("Can't marshal test model", err)
}
err = handleHttpResponseCode(http.StatusBadRequest, resp)
assert.EqualError(t, err, "Server responded with an error: msg (err)")
}
func TestNew(t *testing.T) {
c := New("http://my.url")
assert.NotNil(t, c.Jobs)
assert.NotNil(t, c.Runs)
assert.NotNil(t, c.Snippets)
assert.NotNil(t, c.Tags)
assert.NotNil(t, c.About)
assert.NotNil(t, c.Events)
assert.NotNil(t, c.Adapters)
assert.NotNil(t, c.Ws)
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compute
import (
"context"
"fmt"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
dclService "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute/beta"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/unstructured"
)
type InstanceGroupManager struct{}
func InstanceGroupManagerToUnstructured(r *dclService.InstanceGroupManager) *unstructured.Resource {
u := &unstructured.Resource{
STV: unstructured.ServiceTypeVersion{
Service: "compute",
Version: "beta",
Type: "InstanceGroupManager",
},
Object: make(map[string]interface{}),
}
var rAutoHealingPolicies []interface{}
for _, rAutoHealingPoliciesVal := range r.AutoHealingPolicies {
rAutoHealingPoliciesObject := make(map[string]interface{})
if rAutoHealingPoliciesVal.HealthCheck != nil {
rAutoHealingPoliciesObject["healthCheck"] = *rAutoHealingPoliciesVal.HealthCheck
}
if rAutoHealingPoliciesVal.InitialDelaySec != nil {
rAutoHealingPoliciesObject["initialDelaySec"] = *rAutoHealingPoliciesVal.InitialDelaySec
}
rAutoHealingPolicies = append(rAutoHealingPolicies, rAutoHealingPoliciesObject)
}
u.Object["autoHealingPolicies"] = rAutoHealingPolicies
if r.BaseInstanceName != nil {
u.Object["baseInstanceName"] = *r.BaseInstanceName
}
if r.CreationTimestamp != nil {
u.Object["creationTimestamp"] = *r.CreationTimestamp
}
if r.CurrentActions != nil && r.CurrentActions != dclService.EmptyInstanceGroupManagerCurrentActions {
rCurrentActions := make(map[string]interface{})
if r.CurrentActions.Abandoning != nil {
rCurrentActions["abandoning"] = *r.CurrentActions.Abandoning
}
if r.CurrentActions.Creating != nil {
rCurrentActions["creating"] = *r.CurrentActions.Creating
}
if r.CurrentActions.CreatingWithoutRetries != nil {
rCurrentActions["creatingWithoutRetries"] = *r.CurrentActions.CreatingWithoutRetries
}
if r.CurrentActions.Deleting != nil {
rCurrentActions["deleting"] = *r.CurrentActions.Deleting
}
if r.CurrentActions.None != nil {
rCurrentActions["none"] = *r.CurrentActions.None
}
if r.CurrentActions.Recreating != nil {
rCurrentActions["recreating"] = *r.CurrentActions.Recreating
}
if r.CurrentActions.Refreshing != nil {
rCurrentActions["refreshing"] = *r.CurrentActions.Refreshing
}
if r.CurrentActions.Restarting != nil {
rCurrentActions["restarting"] = *r.CurrentActions.Restarting
}
if r.CurrentActions.Verifying != nil {
rCurrentActions["verifying"] = *r.CurrentActions.Verifying
}
u.Object["currentActions"] = rCurrentActions
}
if r.Description != nil {
u.Object["description"] = *r.Description
}
if r.DistributionPolicy != nil && r.DistributionPolicy != dclService.EmptyInstanceGroupManagerDistributionPolicy {
rDistributionPolicy := make(map[string]interface{})
if r.DistributionPolicy.TargetShape != nil {
rDistributionPolicy["targetShape"] = string(*r.DistributionPolicy.TargetShape)
}
var rDistributionPolicyZones []interface{}
for _, rDistributionPolicyZonesVal := range r.DistributionPolicy.Zones {
rDistributionPolicyZonesObject := make(map[string]interface{})
if rDistributionPolicyZonesVal.Zone != nil {
rDistributionPolicyZonesObject["zone"] = *rDistributionPolicyZonesVal.Zone
}
rDistributionPolicyZones = append(rDistributionPolicyZones, rDistributionPolicyZonesObject)
}
rDistributionPolicy["zones"] = rDistributionPolicyZones
u.Object["distributionPolicy"] = rDistributionPolicy
}
if r.FailoverAction != nil {
u.Object["failoverAction"] = string(*r.FailoverAction)
}
if r.Fingerprint != nil {
u.Object["fingerprint"] = *r.Fingerprint
}
if r.Id != nil {
u.Object["id"] = *r.Id
}
if r.InstanceGroup != nil {
u.Object["instanceGroup"] = *r.InstanceGroup
}
if r.InstanceTemplate != nil {
u.Object["instanceTemplate"] = *r.InstanceTemplate
}
if r.Location != nil {
u.Object["location"] = *r.Location
}
if r.Name != nil {
u.Object["name"] = *r.Name
}
var rNamedPorts []interface{}
for _, rNamedPortsVal := range r.NamedPorts {
rNamedPortsObject := make(map[string]interface{})
if rNamedPortsVal.Name != nil {
rNamedPortsObject["name"] = *rNamedPortsVal.Name
}
if rNamedPortsVal.Port != nil {
rNamedPortsObject["port"] = *rNamedPortsVal.Port
}
rNamedPorts = append(rNamedPorts, rNamedPortsObject)
}
u.Object["namedPorts"] = rNamedPorts
if r.Project != nil {
u.Object["project"] = *r.Project
}
if r.Region != nil {
u.Object["region"] = *r.Region
}
if r.SelfLink != nil {
u.Object["selfLink"] = *r.SelfLink
}
if r.ServiceAccount != nil {
u.Object["serviceAccount"] = *r.ServiceAccount
}
if r.StatefulPolicy != nil && r.StatefulPolicy != dclService.EmptyInstanceGroupManagerStatefulPolicy {
rStatefulPolicy := make(map[string]interface{})
if r.StatefulPolicy.PreservedState != nil && r.StatefulPolicy.PreservedState != dclService.EmptyInstanceGroupManagerStatefulPolicyPreservedState {
rStatefulPolicyPreservedState := make(map[string]interface{})
if r.StatefulPolicy.PreservedState.Disks != nil {
rStatefulPolicyPreservedStateDisks := make(map[string]interface{})
for k, v := range r.StatefulPolicy.PreservedState.Disks {
rStatefulPolicyPreservedStateDisksMap := make(map[string]interface{})
if v.AutoDelete != nil {
rStatefulPolicyPreservedStateDisksMap["autoDelete"] = string(*v.AutoDelete)
}
rStatefulPolicyPreservedStateDisks[k] = rStatefulPolicyPreservedStateDisksMap
}
rStatefulPolicyPreservedState["disks"] = rStatefulPolicyPreservedStateDisks
}
if r.StatefulPolicy.PreservedState.ExternalIps != nil {
rStatefulPolicyPreservedStateExternalIps := make(map[string]interface{})
for k, v := range r.StatefulPolicy.PreservedState.ExternalIps {
rStatefulPolicyPreservedStateExternalIpsMap := make(map[string]interface{})
if v.AutoDelete != nil {
rStatefulPolicyPreservedStateExternalIpsMap["autoDelete"] = string(*v.AutoDelete)
}
rStatefulPolicyPreservedStateExternalIps[k] = rStatefulPolicyPreservedStateExternalIpsMap
}
rStatefulPolicyPreservedState["externalIps"] = rStatefulPolicyPreservedStateExternalIps
}
if r.StatefulPolicy.PreservedState.InternalIps != nil {
rStatefulPolicyPreservedStateInternalIps := make(map[string]interface{})
for k, v := range r.StatefulPolicy.PreservedState.InternalIps {
rStatefulPolicyPreservedStateInternalIpsMap := make(map[string]interface{})
if v.AutoDelete != nil {
rStatefulPolicyPreservedStateInternalIpsMap["autoDelete"] = string(*v.AutoDelete)
}
rStatefulPolicyPreservedStateInternalIps[k] = rStatefulPolicyPreservedStateInternalIpsMap
}
rStatefulPolicyPreservedState["internalIps"] = rStatefulPolicyPreservedStateInternalIps
}
rStatefulPolicy["preservedState"] = rStatefulPolicyPreservedState
}
u.Object["statefulPolicy"] = rStatefulPolicy
}
if r.Status != nil && r.Status != dclService.EmptyInstanceGroupManagerStatus {
rStatus := make(map[string]interface{})
if r.Status.Autoscaler != nil {
rStatus["autoscaler"] = *r.Status.Autoscaler
}
if r.Status.IsStable != nil {
rStatus["isStable"] = *r.Status.IsStable
}
if r.Status.Stateful != nil && r.Status.Stateful != dclService.EmptyInstanceGroupManagerStatusStateful {
rStatusStateful := make(map[string]interface{})
if r.Status.Stateful.HasStatefulConfig != nil {
rStatusStateful["hasStatefulConfig"] = *r.Status.Stateful.HasStatefulConfig
}
if r.Status.Stateful.IsStateful != nil {
rStatusStateful["isStateful"] = *r.Status.Stateful.IsStateful
}
if r.Status.Stateful.PerInstanceConfigs != nil && r.Status.Stateful.PerInstanceConfigs != dclService.EmptyInstanceGroupManagerStatusStatefulPerInstanceConfigs {
rStatusStatefulPerInstanceConfigs := make(map[string]interface{})
if r.Status.Stateful.PerInstanceConfigs.AllEffective != nil {
rStatusStatefulPerInstanceConfigs["allEffective"] = *r.Status.Stateful.PerInstanceConfigs.AllEffective
}
rStatusStateful["perInstanceConfigs"] = rStatusStatefulPerInstanceConfigs
}
rStatus["stateful"] = rStatusStateful
}
if r.Status.VersionTarget != nil && r.Status.VersionTarget != dclService.EmptyInstanceGroupManagerStatusVersionTarget {
rStatusVersionTarget := make(map[string]interface{})
if r.Status.VersionTarget.IsReached != nil {
rStatusVersionTarget["isReached"] = *r.Status.VersionTarget.IsReached
}
rStatus["versionTarget"] = rStatusVersionTarget
}
u.Object["status"] = rStatus
}
var rTargetPools []interface{}
for _, rTargetPoolsVal := range r.TargetPools {
rTargetPools = append(rTargetPools, rTargetPoolsVal)
}
u.Object["targetPools"] = rTargetPools
if r.TargetSize != nil {
u.Object["targetSize"] = *r.TargetSize
}
if r.UpdatePolicy != nil && r.UpdatePolicy != dclService.EmptyInstanceGroupManagerUpdatePolicy {
rUpdatePolicy := make(map[string]interface{})
if r.UpdatePolicy.InstanceRedistributionType != nil {
rUpdatePolicy["instanceRedistributionType"] = string(*r.UpdatePolicy.InstanceRedistributionType)
}
if r.UpdatePolicy.MaxSurge != nil && r.UpdatePolicy.MaxSurge != dclService.EmptyInstanceGroupManagerUpdatePolicyMaxSurge {
rUpdatePolicyMaxSurge := make(map[string]interface{})
if r.UpdatePolicy.MaxSurge.Calculated != nil {
rUpdatePolicyMaxSurge["calculated"] = *r.UpdatePolicy.MaxSurge.Calculated
}
if r.UpdatePolicy.MaxSurge.Fixed != nil {
rUpdatePolicyMaxSurge["fixed"] = *r.UpdatePolicy.MaxSurge.Fixed
}
if r.UpdatePolicy.MaxSurge.Percent != nil {
rUpdatePolicyMaxSurge["percent"] = *r.UpdatePolicy.MaxSurge.Percent
}
rUpdatePolicy["maxSurge"] = rUpdatePolicyMaxSurge
}
if r.UpdatePolicy.MaxUnavailable != nil && r.UpdatePolicy.MaxUnavailable != dclService.EmptyInstanceGroupManagerUpdatePolicyMaxUnavailable {
rUpdatePolicyMaxUnavailable := make(map[string]interface{})
if r.UpdatePolicy.MaxUnavailable.Calculated != nil {
rUpdatePolicyMaxUnavailable["calculated"] = *r.UpdatePolicy.MaxUnavailable.Calculated
}
if r.UpdatePolicy.MaxUnavailable.Fixed != nil {
rUpdatePolicyMaxUnavailable["fixed"] = *r.UpdatePolicy.MaxUnavailable.Fixed
}
if r.UpdatePolicy.MaxUnavailable.Percent != nil {
rUpdatePolicyMaxUnavailable["percent"] = *r.UpdatePolicy.MaxUnavailable.Percent
}
rUpdatePolicy["maxUnavailable"] = rUpdatePolicyMaxUnavailable
}
if r.UpdatePolicy.MinReadySec != nil {
rUpdatePolicy["minReadySec"] = *r.UpdatePolicy.MinReadySec
}
if r.UpdatePolicy.MinimalAction != nil {
rUpdatePolicy["minimalAction"] = string(*r.UpdatePolicy.MinimalAction)
}
if r.UpdatePolicy.MostDisruptiveAllowedAction != nil {
rUpdatePolicy["mostDisruptiveAllowedAction"] = string(*r.UpdatePolicy.MostDisruptiveAllowedAction)
}
if r.UpdatePolicy.ReplacementMethod != nil {
rUpdatePolicy["replacementMethod"] = string(*r.UpdatePolicy.ReplacementMethod)
}
if r.UpdatePolicy.Type != nil {
rUpdatePolicy["type"] = string(*r.UpdatePolicy.Type)
}
u.Object["updatePolicy"] = rUpdatePolicy
}
var rVersions []interface{}
for _, rVersionsVal := range r.Versions {
rVersionsObject := make(map[string]interface{})
if rVersionsVal.InstanceTemplate != nil {
rVersionsObject["instanceTemplate"] = *rVersionsVal.InstanceTemplate
}
if rVersionsVal.Name != nil {
rVersionsObject["name"] = *rVersionsVal.Name
}
if rVersionsVal.TargetSize != nil && rVersionsVal.TargetSize != dclService.EmptyInstanceGroupManagerVersionsTargetSize {
rVersionsValTargetSize := make(map[string]interface{})
if rVersionsVal.TargetSize.Calculated != nil {
rVersionsValTargetSize["calculated"] = *rVersionsVal.TargetSize.Calculated
}
if rVersionsVal.TargetSize.Fixed != nil {
rVersionsValTargetSize["fixed"] = *rVersionsVal.TargetSize.Fixed
}
if rVersionsVal.TargetSize.Percent != nil {
rVersionsValTargetSize["percent"] = *rVersionsVal.TargetSize.Percent
}
rVersionsObject["targetSize"] = rVersionsValTargetSize
}
rVersions = append(rVersions, rVersionsObject)
}
u.Object["versions"] = rVersions
if r.Zone != nil {
u.Object["zone"] = *r.Zone
}
return u
}
func UnstructuredToInstanceGroupManager(u *unstructured.Resource) (*dclService.InstanceGroupManager, error) {
r := &dclService.InstanceGroupManager{}
if _, ok := u.Object["autoHealingPolicies"]; ok {
if s, ok := u.Object["autoHealingPolicies"].([]interface{}); ok {
for _, o := range s {
if objval, ok := o.(map[string]interface{}); ok {
var rAutoHealingPolicies dclService.InstanceGroupManagerAutoHealingPolicies
if _, ok := objval["healthCheck"]; ok {
if s, ok := objval["healthCheck"].(string); ok {
rAutoHealingPolicies.HealthCheck = dcl.String(s)
} else {
return nil, fmt.Errorf("rAutoHealingPolicies.HealthCheck: expected string")
}
}
if _, ok := objval["initialDelaySec"]; ok {
if i, ok := objval["initialDelaySec"].(int64); ok {
rAutoHealingPolicies.InitialDelaySec = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rAutoHealingPolicies.InitialDelaySec: expected int64")
}
}
r.AutoHealingPolicies = append(r.AutoHealingPolicies, rAutoHealingPolicies)
}
}
} else {
return nil, fmt.Errorf("r.AutoHealingPolicies: expected []interface{}")
}
}
if _, ok := u.Object["baseInstanceName"]; ok {
if s, ok := u.Object["baseInstanceName"].(string); ok {
r.BaseInstanceName = dcl.String(s)
} else {
return nil, fmt.Errorf("r.BaseInstanceName: expected string")
}
}
if _, ok := u.Object["creationTimestamp"]; ok {
if s, ok := u.Object["creationTimestamp"].(string); ok {
r.CreationTimestamp = dcl.String(s)
} else {
return nil, fmt.Errorf("r.CreationTimestamp: expected string")
}
}
if _, ok := u.Object["currentActions"]; ok {
if rCurrentActions, ok := u.Object["currentActions"].(map[string]interface{}); ok {
r.CurrentActions = &dclService.InstanceGroupManagerCurrentActions{}
if _, ok := rCurrentActions["abandoning"]; ok {
if i, ok := rCurrentActions["abandoning"].(int64); ok {
r.CurrentActions.Abandoning = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.Abandoning: expected int64")
}
}
if _, ok := rCurrentActions["creating"]; ok {
if i, ok := rCurrentActions["creating"].(int64); ok {
r.CurrentActions.Creating = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.Creating: expected int64")
}
}
if _, ok := rCurrentActions["creatingWithoutRetries"]; ok {
if i, ok := rCurrentActions["creatingWithoutRetries"].(int64); ok {
r.CurrentActions.CreatingWithoutRetries = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.CreatingWithoutRetries: expected int64")
}
}
if _, ok := rCurrentActions["deleting"]; ok {
if i, ok := rCurrentActions["deleting"].(int64); ok {
r.CurrentActions.Deleting = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.Deleting: expected int64")
}
}
if _, ok := rCurrentActions["none"]; ok {
if i, ok := rCurrentActions["none"].(int64); ok {
r.CurrentActions.None = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.None: expected int64")
}
}
if _, ok := rCurrentActions["recreating"]; ok {
if i, ok := rCurrentActions["recreating"].(int64); ok {
r.CurrentActions.Recreating = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.Recreating: expected int64")
}
}
if _, ok := rCurrentActions["refreshing"]; ok {
if i, ok := rCurrentActions["refreshing"].(int64); ok {
r.CurrentActions.Refreshing = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.Refreshing: expected int64")
}
}
if _, ok := rCurrentActions["restarting"]; ok {
if i, ok := rCurrentActions["restarting"].(int64); ok {
r.CurrentActions.Restarting = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.Restarting: expected int64")
}
}
if _, ok := rCurrentActions["verifying"]; ok {
if i, ok := rCurrentActions["verifying"].(int64); ok {
r.CurrentActions.Verifying = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.CurrentActions.Verifying: expected int64")
}
}
} else {
return nil, fmt.Errorf("r.CurrentActions: expected map[string]interface{}")
}
}
if _, ok := u.Object["description"]; ok {
if s, ok := u.Object["description"].(string); ok {
r.Description = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Description: expected string")
}
}
if _, ok := u.Object["distributionPolicy"]; ok {
if rDistributionPolicy, ok := u.Object["distributionPolicy"].(map[string]interface{}); ok {
r.DistributionPolicy = &dclService.InstanceGroupManagerDistributionPolicy{}
if _, ok := rDistributionPolicy["targetShape"]; ok {
if s, ok := rDistributionPolicy["targetShape"].(string); ok {
r.DistributionPolicy.TargetShape = dclService.InstanceGroupManagerDistributionPolicyTargetShapeEnumRef(s)
} else {
return nil, fmt.Errorf("r.DistributionPolicy.TargetShape: expected string")
}
}
if _, ok := rDistributionPolicy["zones"]; ok {
if s, ok := rDistributionPolicy["zones"].([]interface{}); ok {
for _, o := range s {
if objval, ok := o.(map[string]interface{}); ok {
var rDistributionPolicyZones dclService.InstanceGroupManagerDistributionPolicyZones
if _, ok := objval["zone"]; ok {
if s, ok := objval["zone"].(string); ok {
rDistributionPolicyZones.Zone = dcl.String(s)
} else {
return nil, fmt.Errorf("rDistributionPolicyZones.Zone: expected string")
}
}
r.DistributionPolicy.Zones = append(r.DistributionPolicy.Zones, rDistributionPolicyZones)
}
}
} else {
return nil, fmt.Errorf("r.DistributionPolicy.Zones: expected []interface{}")
}
}
} else {
return nil, fmt.Errorf("r.DistributionPolicy: expected map[string]interface{}")
}
}
if _, ok := u.Object["failoverAction"]; ok {
if s, ok := u.Object["failoverAction"].(string); ok {
r.FailoverAction = dclService.InstanceGroupManagerFailoverActionEnumRef(s)
} else {
return nil, fmt.Errorf("r.FailoverAction: expected string")
}
}
if _, ok := u.Object["fingerprint"]; ok {
if s, ok := u.Object["fingerprint"].(string); ok {
r.Fingerprint = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Fingerprint: expected string")
}
}
if _, ok := u.Object["id"]; ok {
if i, ok := u.Object["id"].(int64); ok {
r.Id = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.Id: expected int64")
}
}
if _, ok := u.Object["instanceGroup"]; ok {
if s, ok := u.Object["instanceGroup"].(string); ok {
r.InstanceGroup = dcl.String(s)
} else {
return nil, fmt.Errorf("r.InstanceGroup: expected string")
}
}
if _, ok := u.Object["instanceTemplate"]; ok {
if s, ok := u.Object["instanceTemplate"].(string); ok {
r.InstanceTemplate = dcl.String(s)
} else {
return nil, fmt.Errorf("r.InstanceTemplate: expected string")
}
}
if _, ok := u.Object["location"]; ok {
if s, ok := u.Object["location"].(string); ok {
r.Location = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Location: expected string")
}
}
if _, ok := u.Object["name"]; ok {
if s, ok := u.Object["name"].(string); ok {
r.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Name: expected string")
}
}
if _, ok := u.Object["namedPorts"]; ok {
if s, ok := u.Object["namedPorts"].([]interface{}); ok {
for _, o := range s {
if objval, ok := o.(map[string]interface{}); ok {
var rNamedPorts dclService.InstanceGroupManagerNamedPorts
if _, ok := objval["name"]; ok {
if s, ok := objval["name"].(string); ok {
rNamedPorts.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("rNamedPorts.Name: expected string")
}
}
if _, ok := objval["port"]; ok {
if i, ok := objval["port"].(int64); ok {
rNamedPorts.Port = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rNamedPorts.Port: expected int64")
}
}
r.NamedPorts = append(r.NamedPorts, rNamedPorts)
}
}
} else {
return nil, fmt.Errorf("r.NamedPorts: expected []interface{}")
}
}
if _, ok := u.Object["project"]; ok {
if s, ok := u.Object["project"].(string); ok {
r.Project = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Project: expected string")
}
}
if _, ok := u.Object["region"]; ok {
if s, ok := u.Object["region"].(string); ok {
r.Region = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Region: expected string")
}
}
if _, ok := u.Object["selfLink"]; ok {
if s, ok := u.Object["selfLink"].(string); ok {
r.SelfLink = dcl.String(s)
} else {
return nil, fmt.Errorf("r.SelfLink: expected string")
}
}
if _, ok := u.Object["serviceAccount"]; ok {
if s, ok := u.Object["serviceAccount"].(string); ok {
r.ServiceAccount = dcl.String(s)
} else {
return nil, fmt.Errorf("r.ServiceAccount: expected string")
}
}
if _, ok := u.Object["statefulPolicy"]; ok {
if rStatefulPolicy, ok := u.Object["statefulPolicy"].(map[string]interface{}); ok {
r.StatefulPolicy = &dclService.InstanceGroupManagerStatefulPolicy{}
if _, ok := rStatefulPolicy["preservedState"]; ok {
if rStatefulPolicyPreservedState, ok := rStatefulPolicy["preservedState"].(map[string]interface{}); ok {
r.StatefulPolicy.PreservedState = &dclService.InstanceGroupManagerStatefulPolicyPreservedState{}
if _, ok := rStatefulPolicyPreservedState["disks"]; ok {
if rStatefulPolicyPreservedStateDisks, ok := rStatefulPolicyPreservedState["disks"].(map[string]interface{}); ok {
m := make(map[string]dclService.InstanceGroupManagerStatefulPolicyPreservedStateDisks)
for k, v := range rStatefulPolicyPreservedStateDisks {
if objval, ok := v.(map[string]interface{}); ok {
var rStatefulPolicyPreservedStateDisksObj dclService.InstanceGroupManagerStatefulPolicyPreservedStateDisks
if _, ok := objval["autoDelete"]; ok {
if s, ok := objval["autoDelete"].(string); ok {
rStatefulPolicyPreservedStateDisksObj.AutoDelete = dclService.InstanceGroupManagerStatefulPolicyPreservedStateDisksAutoDeleteEnumRef(s)
} else {
return nil, fmt.Errorf("rStatefulPolicyPreservedStateDisksObj.AutoDelete: expected string")
}
}
m[k] = rStatefulPolicyPreservedStateDisksObj
} else {
return nil, fmt.Errorf("r.StatefulPolicy.PreservedState.Disks: expected map[string]interface{}")
}
}
r.StatefulPolicy.PreservedState.Disks = m
} else {
return nil, fmt.Errorf("r.StatefulPolicy.PreservedState.Disks: expected map[string]interface{}")
}
}
if _, ok := rStatefulPolicyPreservedState["externalIps"]; ok {
if rStatefulPolicyPreservedStateExternalIps, ok := rStatefulPolicyPreservedState["externalIps"].(map[string]interface{}); ok {
m := make(map[string]dclService.InstanceGroupManagerStatefulPolicyPreservedStateExternalIps)
for k, v := range rStatefulPolicyPreservedStateExternalIps {
if objval, ok := v.(map[string]interface{}); ok {
var rStatefulPolicyPreservedStateExternalIpsObj dclService.InstanceGroupManagerStatefulPolicyPreservedStateExternalIps
if _, ok := objval["autoDelete"]; ok {
if s, ok := objval["autoDelete"].(string); ok {
rStatefulPolicyPreservedStateExternalIpsObj.AutoDelete = dclService.InstanceGroupManagerStatefulPolicyPreservedStateExternalIpsAutoDeleteEnumRef(s)
} else {
return nil, fmt.Errorf("rStatefulPolicyPreservedStateExternalIpsObj.AutoDelete: expected string")
}
}
m[k] = rStatefulPolicyPreservedStateExternalIpsObj
} else {
return nil, fmt.Errorf("r.StatefulPolicy.PreservedState.ExternalIps: expected map[string]interface{}")
}
}
r.StatefulPolicy.PreservedState.ExternalIps = m
} else {
return nil, fmt.Errorf("r.StatefulPolicy.PreservedState.ExternalIps: expected map[string]interface{}")
}
}
if _, ok := rStatefulPolicyPreservedState["internalIps"]; ok {
if rStatefulPolicyPreservedStateInternalIps, ok := rStatefulPolicyPreservedState["internalIps"].(map[string]interface{}); ok {
m := make(map[string]dclService.InstanceGroupManagerStatefulPolicyPreservedStateInternalIps)
for k, v := range rStatefulPolicyPreservedStateInternalIps {
if objval, ok := v.(map[string]interface{}); ok {
var rStatefulPolicyPreservedStateInternalIpsObj dclService.InstanceGroupManagerStatefulPolicyPreservedStateInternalIps
if _, ok := objval["autoDelete"]; ok {
if s, ok := objval["autoDelete"].(string); ok {
rStatefulPolicyPreservedStateInternalIpsObj.AutoDelete = dclService.InstanceGroupManagerStatefulPolicyPreservedStateInternalIpsAutoDeleteEnumRef(s)
} else {
return nil, fmt.Errorf("rStatefulPolicyPreservedStateInternalIpsObj.AutoDelete: expected string")
}
}
m[k] = rStatefulPolicyPreservedStateInternalIpsObj
} else {
return nil, fmt.Errorf("r.StatefulPolicy.PreservedState.InternalIps: expected map[string]interface{}")
}
}
r.StatefulPolicy.PreservedState.InternalIps = m
} else {
return nil, fmt.Errorf("r.StatefulPolicy.PreservedState.InternalIps: expected map[string]interface{}")
}
}
} else {
return nil, fmt.Errorf("r.StatefulPolicy.PreservedState: expected map[string]interface{}")
}
}
} else {
return nil, fmt.Errorf("r.StatefulPolicy: expected map[string]interface{}")
}
}
if _, ok := u.Object["status"]; ok {
if rStatus, ok := u.Object["status"].(map[string]interface{}); ok {
r.Status = &dclService.InstanceGroupManagerStatus{}
if _, ok := rStatus["autoscaler"]; ok {
if s, ok := rStatus["autoscaler"].(string); ok {
r.Status.Autoscaler = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Status.Autoscaler: expected string")
}
}
if _, ok := rStatus["isStable"]; ok {
if b, ok := rStatus["isStable"].(bool); ok {
r.Status.IsStable = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.Status.IsStable: expected bool")
}
}
if _, ok := rStatus["stateful"]; ok {
if rStatusStateful, ok := rStatus["stateful"].(map[string]interface{}); ok {
r.Status.Stateful = &dclService.InstanceGroupManagerStatusStateful{}
if _, ok := rStatusStateful["hasStatefulConfig"]; ok {
if b, ok := rStatusStateful["hasStatefulConfig"].(bool); ok {
r.Status.Stateful.HasStatefulConfig = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.Status.Stateful.HasStatefulConfig: expected bool")
}
}
if _, ok := rStatusStateful["isStateful"]; ok {
if b, ok := rStatusStateful["isStateful"].(bool); ok {
r.Status.Stateful.IsStateful = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.Status.Stateful.IsStateful: expected bool")
}
}
if _, ok := rStatusStateful["perInstanceConfigs"]; ok {
if rStatusStatefulPerInstanceConfigs, ok := rStatusStateful["perInstanceConfigs"].(map[string]interface{}); ok {
r.Status.Stateful.PerInstanceConfigs = &dclService.InstanceGroupManagerStatusStatefulPerInstanceConfigs{}
if _, ok := rStatusStatefulPerInstanceConfigs["allEffective"]; ok {
if b, ok := rStatusStatefulPerInstanceConfigs["allEffective"].(bool); ok {
r.Status.Stateful.PerInstanceConfigs.AllEffective = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.Status.Stateful.PerInstanceConfigs.AllEffective: expected bool")
}
}
} else {
return nil, fmt.Errorf("r.Status.Stateful.PerInstanceConfigs: expected map[string]interface{}")
}
}
} else {
return nil, fmt.Errorf("r.Status.Stateful: expected map[string]interface{}")
}
}
if _, ok := rStatus["versionTarget"]; ok {
if rStatusVersionTarget, ok := rStatus["versionTarget"].(map[string]interface{}); ok {
r.Status.VersionTarget = &dclService.InstanceGroupManagerStatusVersionTarget{}
if _, ok := rStatusVersionTarget["isReached"]; ok {
if b, ok := rStatusVersionTarget["isReached"].(bool); ok {
r.Status.VersionTarget.IsReached = dcl.Bool(b)
} else {
return nil, fmt.Errorf("r.Status.VersionTarget.IsReached: expected bool")
}
}
} else {
return nil, fmt.Errorf("r.Status.VersionTarget: expected map[string]interface{}")
}
}
} else {
return nil, fmt.Errorf("r.Status: expected map[string]interface{}")
}
}
if _, ok := u.Object["targetPools"]; ok {
if s, ok := u.Object["targetPools"].([]interface{}); ok {
for _, ss := range s {
if strval, ok := ss.(string); ok {
r.TargetPools = append(r.TargetPools, strval)
}
}
} else {
return nil, fmt.Errorf("r.TargetPools: expected []interface{}")
}
}
if _, ok := u.Object["targetSize"]; ok {
if i, ok := u.Object["targetSize"].(int64); ok {
r.TargetSize = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.TargetSize: expected int64")
}
}
if _, ok := u.Object["updatePolicy"]; ok {
if rUpdatePolicy, ok := u.Object["updatePolicy"].(map[string]interface{}); ok {
r.UpdatePolicy = &dclService.InstanceGroupManagerUpdatePolicy{}
if _, ok := rUpdatePolicy["instanceRedistributionType"]; ok {
if s, ok := rUpdatePolicy["instanceRedistributionType"].(string); ok {
r.UpdatePolicy.InstanceRedistributionType = dclService.InstanceGroupManagerUpdatePolicyInstanceRedistributionTypeEnumRef(s)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.InstanceRedistributionType: expected string")
}
}
if _, ok := rUpdatePolicy["maxSurge"]; ok {
if rUpdatePolicyMaxSurge, ok := rUpdatePolicy["maxSurge"].(map[string]interface{}); ok {
r.UpdatePolicy.MaxSurge = &dclService.InstanceGroupManagerUpdatePolicyMaxSurge{}
if _, ok := rUpdatePolicyMaxSurge["calculated"]; ok {
if i, ok := rUpdatePolicyMaxSurge["calculated"].(int64); ok {
r.UpdatePolicy.MaxSurge.Calculated = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxSurge.Calculated: expected int64")
}
}
if _, ok := rUpdatePolicyMaxSurge["fixed"]; ok {
if i, ok := rUpdatePolicyMaxSurge["fixed"].(int64); ok {
r.UpdatePolicy.MaxSurge.Fixed = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxSurge.Fixed: expected int64")
}
}
if _, ok := rUpdatePolicyMaxSurge["percent"]; ok {
if i, ok := rUpdatePolicyMaxSurge["percent"].(int64); ok {
r.UpdatePolicy.MaxSurge.Percent = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxSurge.Percent: expected int64")
}
}
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxSurge: expected map[string]interface{}")
}
}
if _, ok := rUpdatePolicy["maxUnavailable"]; ok {
if rUpdatePolicyMaxUnavailable, ok := rUpdatePolicy["maxUnavailable"].(map[string]interface{}); ok {
r.UpdatePolicy.MaxUnavailable = &dclService.InstanceGroupManagerUpdatePolicyMaxUnavailable{}
if _, ok := rUpdatePolicyMaxUnavailable["calculated"]; ok {
if i, ok := rUpdatePolicyMaxUnavailable["calculated"].(int64); ok {
r.UpdatePolicy.MaxUnavailable.Calculated = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxUnavailable.Calculated: expected int64")
}
}
if _, ok := rUpdatePolicyMaxUnavailable["fixed"]; ok {
if i, ok := rUpdatePolicyMaxUnavailable["fixed"].(int64); ok {
r.UpdatePolicy.MaxUnavailable.Fixed = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxUnavailable.Fixed: expected int64")
}
}
if _, ok := rUpdatePolicyMaxUnavailable["percent"]; ok {
if i, ok := rUpdatePolicyMaxUnavailable["percent"].(int64); ok {
r.UpdatePolicy.MaxUnavailable.Percent = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxUnavailable.Percent: expected int64")
}
}
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MaxUnavailable: expected map[string]interface{}")
}
}
if _, ok := rUpdatePolicy["minReadySec"]; ok {
if i, ok := rUpdatePolicy["minReadySec"].(int64); ok {
r.UpdatePolicy.MinReadySec = dcl.Int64(i)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MinReadySec: expected int64")
}
}
if _, ok := rUpdatePolicy["minimalAction"]; ok {
if s, ok := rUpdatePolicy["minimalAction"].(string); ok {
r.UpdatePolicy.MinimalAction = dclService.InstanceGroupManagerUpdatePolicyMinimalActionEnumRef(s)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MinimalAction: expected string")
}
}
if _, ok := rUpdatePolicy["mostDisruptiveAllowedAction"]; ok {
if s, ok := rUpdatePolicy["mostDisruptiveAllowedAction"].(string); ok {
r.UpdatePolicy.MostDisruptiveAllowedAction = dclService.InstanceGroupManagerUpdatePolicyMostDisruptiveAllowedActionEnumRef(s)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.MostDisruptiveAllowedAction: expected string")
}
}
if _, ok := rUpdatePolicy["replacementMethod"]; ok {
if s, ok := rUpdatePolicy["replacementMethod"].(string); ok {
r.UpdatePolicy.ReplacementMethod = dclService.InstanceGroupManagerUpdatePolicyReplacementMethodEnumRef(s)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.ReplacementMethod: expected string")
}
}
if _, ok := rUpdatePolicy["type"]; ok {
if s, ok := rUpdatePolicy["type"].(string); ok {
r.UpdatePolicy.Type = dclService.InstanceGroupManagerUpdatePolicyTypeEnumRef(s)
} else {
return nil, fmt.Errorf("r.UpdatePolicy.Type: expected string")
}
}
} else {
return nil, fmt.Errorf("r.UpdatePolicy: expected map[string]interface{}")
}
}
if _, ok := u.Object["versions"]; ok {
if s, ok := u.Object["versions"].([]interface{}); ok {
for _, o := range s {
if objval, ok := o.(map[string]interface{}); ok {
var rVersions dclService.InstanceGroupManagerVersions
if _, ok := objval["instanceTemplate"]; ok {
if s, ok := objval["instanceTemplate"].(string); ok {
rVersions.InstanceTemplate = dcl.String(s)
} else {
return nil, fmt.Errorf("rVersions.InstanceTemplate: expected string")
}
}
if _, ok := objval["name"]; ok {
if s, ok := objval["name"].(string); ok {
rVersions.Name = dcl.String(s)
} else {
return nil, fmt.Errorf("rVersions.Name: expected string")
}
}
if _, ok := objval["targetSize"]; ok {
if rVersionsTargetSize, ok := objval["targetSize"].(map[string]interface{}); ok {
rVersions.TargetSize = &dclService.InstanceGroupManagerVersionsTargetSize{}
if _, ok := rVersionsTargetSize["calculated"]; ok {
if i, ok := rVersionsTargetSize["calculated"].(int64); ok {
rVersions.TargetSize.Calculated = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rVersions.TargetSize.Calculated: expected int64")
}
}
if _, ok := rVersionsTargetSize["fixed"]; ok {
if i, ok := rVersionsTargetSize["fixed"].(int64); ok {
rVersions.TargetSize.Fixed = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rVersions.TargetSize.Fixed: expected int64")
}
}
if _, ok := rVersionsTargetSize["percent"]; ok {
if i, ok := rVersionsTargetSize["percent"].(int64); ok {
rVersions.TargetSize.Percent = dcl.Int64(i)
} else {
return nil, fmt.Errorf("rVersions.TargetSize.Percent: expected int64")
}
}
} else {
return nil, fmt.Errorf("rVersions.TargetSize: expected map[string]interface{}")
}
}
r.Versions = append(r.Versions, rVersions)
}
}
} else {
return nil, fmt.Errorf("r.Versions: expected []interface{}")
}
}
if _, ok := u.Object["zone"]; ok {
if s, ok := u.Object["zone"].(string); ok {
r.Zone = dcl.String(s)
} else {
return nil, fmt.Errorf("r.Zone: expected string")
}
}
return r, nil
}
func GetInstanceGroupManager(ctx context.Context, config *dcl.Config, u *unstructured.Resource) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToInstanceGroupManager(u)
if err != nil {
return nil, err
}
r, err = c.GetInstanceGroupManager(ctx, r)
if err != nil {
return nil, err
}
return InstanceGroupManagerToUnstructured(r), nil
}
func ListInstanceGroupManager(ctx context.Context, config *dcl.Config, project string, location string) ([]*unstructured.Resource, error) {
c := dclService.NewClient(config)
l, err := c.ListInstanceGroupManager(ctx, project, location)
if err != nil {
return nil, err
}
var resources []*unstructured.Resource
for {
for _, r := range l.Items {
resources = append(resources, InstanceGroupManagerToUnstructured(r))
}
if !l.HasNext() {
break
}
if err := l.Next(ctx, c); err != nil {
return nil, err
}
}
return resources, nil
}
func ApplyInstanceGroupManager(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToInstanceGroupManager(u)
if err != nil {
return nil, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToInstanceGroupManager(ush)
if err != nil {
return nil, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
r, err = c.ApplyInstanceGroupManager(ctx, r, opts...)
if err != nil {
return nil, err
}
return InstanceGroupManagerToUnstructured(r), nil
}
func InstanceGroupManagerHasDiff(ctx context.Context, config *dcl.Config, u *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
c := dclService.NewClient(config)
r, err := UnstructuredToInstanceGroupManager(u)
if err != nil {
return false, err
}
if ush := unstructured.FetchStateHint(opts); ush != nil {
sh, err := UnstructuredToInstanceGroupManager(ush)
if err != nil {
return false, err
}
opts = append(opts, dcl.WithStateHint(sh))
}
opts = append(opts, dcl.WithLifecycleParam(dcl.BlockDestruction), dcl.WithLifecycleParam(dcl.BlockCreation), dcl.WithLifecycleParam(dcl.BlockModification))
_, err = c.ApplyInstanceGroupManager(ctx, r, opts...)
if err != nil {
if _, ok := err.(dcl.ApplyInfeasibleError); ok {
return true, nil
}
return false, err
}
return false, nil
}
func DeleteInstanceGroupManager(ctx context.Context, config *dcl.Config, u *unstructured.Resource) error {
c := dclService.NewClient(config)
r, err := UnstructuredToInstanceGroupManager(u)
if err != nil {
return err
}
return c.DeleteInstanceGroupManager(ctx, r)
}
func InstanceGroupManagerID(u *unstructured.Resource) (string, error) {
r, err := UnstructuredToInstanceGroupManager(u)
if err != nil {
return "", err
}
return r.ID()
}
func (r *InstanceGroupManager) STV() unstructured.ServiceTypeVersion {
return unstructured.ServiceTypeVersion{
"compute",
"InstanceGroupManager",
"beta",
}
}
func (r *InstanceGroupManager) SetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *InstanceGroupManager) GetPolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, role, member string) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *InstanceGroupManager) DeletePolicyMember(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, member *unstructured.Resource) error {
return unstructured.ErrNoSuchMethod
}
func (r *InstanceGroupManager) SetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *InstanceGroupManager) SetPolicyWithEtag(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, policy *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *InstanceGroupManager) GetPolicy(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return nil, unstructured.ErrNoSuchMethod
}
func (r *InstanceGroupManager) Get(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) (*unstructured.Resource, error) {
return GetInstanceGroupManager(ctx, config, resource)
}
func (r *InstanceGroupManager) Apply(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (*unstructured.Resource, error) {
return ApplyInstanceGroupManager(ctx, config, resource, opts...)
}
func (r *InstanceGroupManager) HasDiff(ctx context.Context, config *dcl.Config, resource *unstructured.Resource, opts ...dcl.ApplyOption) (bool, error) {
return InstanceGroupManagerHasDiff(ctx, config, resource, opts...)
}
func (r *InstanceGroupManager) Delete(ctx context.Context, config *dcl.Config, resource *unstructured.Resource) error {
return DeleteInstanceGroupManager(ctx, config, resource)
}
func (r *InstanceGroupManager) ID(resource *unstructured.Resource) (string, error) {
return InstanceGroupManagerID(resource)
}
func init() {
unstructured.Register(&InstanceGroupManager{})
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package configcontroller provides functions to manage Config Controller
// resources.
package alpha
// EncodeInstanceCreateRequest enables the Config Controller option (adds the
// `bundlesConfig` field) for all the create requests for a ConfigController
// Instance.
func EncodeInstanceCreateRequest(m map[string]interface{}) map[string]interface{} {
m["bundlesConfig"] = map[string]interface{}{
"configControllerConfig": map[string]interface{}{
"enabled": true,
},
}
return m
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-07-21 11:54
* Description:
*****************************************************************/
package xthrift
import (
. "github.com/apache/thrift/lib/go/thrift"
)
// 继承thrift.TBinaryProtocol, 修正为传输数据结构名称及字段名
type TBinaryProtocolEx struct {
*TBinaryProtocol
protocolType TProtocolType
}
var _ DynamicProtocol = (*TBinaryProtocolEx)(nil)
func NewBinaryProtocolEx(trans TTransport) TProtocol {
return &TBinaryProtocolEx{
protocolType: UnknownProtocolType,
TBinaryProtocol: NewTBinaryProtocolTransport(trans),
}
}
func (p *TBinaryProtocolEx) GetProtocolType() TProtocolType {
return p.protocolType
}
func (p *TBinaryProtocolEx) WriteMessageBegin(name string, msgType TMessageType, seqId int32) error {
if err := p.WriteByte(int8(BinaryProtocolType)); err != nil {
return err
}
return p.TBinaryProtocol.WriteMessageBegin(name, msgType, seqId)
}
func (p *TBinaryProtocolEx) ReadMessageBegin() (name string, msgType TMessageType, seqId int32, err error) {
var n int8 = 0
if n, err = p.ReadByte(); err != nil {
return
} else {
p.protocolType = TProtocolType(n)
}
return p.TBinaryProtocol.ReadMessageBegin()
}
func (p *TBinaryProtocolEx) WriteStructBegin(name string) error {
return p.WriteString(name)
}
func (p *TBinaryProtocolEx) ReadStructBegin() (name string, err error) {
s, err := p.ReadString()
if err != nil {
return "", err
}
return s, nil
}
func (p *TBinaryProtocolEx) WriteFieldBegin(name string, fdType TType, fdId int16) error {
if err := p.WriteByte(int8(fdType)); err != nil {
return err
}
if err := p.WriteString(name); err != nil {
return err
}
if err := p.WriteI16(fdId); err != nil {
return err
}
return nil
}
func (p *TBinaryProtocolEx) ReadFieldBegin() (name string, fdType TType, fdId int16, err error) {
var e error
name = ""
fdId = 0
n, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(err)
return
}
fdType = TType(n)
if n != STOP {
if name, err = p.ReadString(); err != nil {
return
}
if fdId, err = p.ReadI16(); err != nil {
return
}
}
return
}
|
package leetcode
import (
"fmt"
"testing"
)
func TestHammingWeight(t *testing.T) {
fmt.Println(HammingWeight(11))
}
func TestIsPowerOfTwo(t *testing.T) {
t.Log(isPowerOfTwo(6))
}
|
package clls
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"github.com/clls-dev/clls/pkg/examples"
"github.com/peterbourgon/ff/v3/ffcli"
"github.com/pkg/errors"
lsp "go.lsp.dev/protocol"
"go.lsp.dev/uri"
"go.uber.org/zap"
)
var (
CommandName = "clls"
)
func readFileToString(u lsp.DocumentURI) (string, error) {
b, err := ioutil.ReadFile(u.Filename())
if err != nil {
return "", err
}
return string(b), nil
}
func Command(rootName string) (*ffcli.Command, *string) {
flagSet := flag.NewFlagSet(fmt.Sprintf("%s %s", rootName, CommandName), flag.ExitOnError)
ppFlag := flagSet.String("pp", "", `--pp "(mod () ("Hello world!"))"`)
s := ""
r := &s
return &ffcli.Command{
Name: CommandName,
ShortUsage: fmt.Sprintf("%s %s <arg>", rootName, CommandName),
ShortHelp: "parse clvm high level code",
FlagSet: flagSet,
Exec: func(_ context.Context, args []string) error {
l, err := zap.NewDevelopment()
if err != nil {
fmt.Println("failed to init zap logger", err)
l = zap.NewNop()
}
var mod *Module
if *ppFlag != "" {
sources := map[lsp.DocumentURI]string{uri.New("file://main.clvm"): *ppFlag}
exs, err := examples.F.ReadDir(".")
if err == nil {
for _, e := range exs {
b, err := examples.F.ReadFile(e.Name())
if err == nil {
fmt.Println("source", e.Name())
sources[uri.From("file", "", e.Name(), "", "")] = string(b)
}
}
}
nmod, err := LoadCLVMFromStrings(l, "file://main.clvm", sources)
if err != nil {
return errors.Wrap(err, "parse clvm")
}
//fmt.Println(mod)
mod = nmod
} else {
filePath := flagSet.Arg(0)
if filePath == "" {
return errors.New("missing file path argument")
}
nmod, err := LoadCLVM(l, uri.New("file://"+filePath), readFileToString)
if err != nil {
return errors.Wrap(err, "parse modules")
}
mod = nmod
}
//fmt.Println(mod)
/*if *lispFlag {
s = mod.lispString()
} else {
s = mod.String()
}*/
mbytes, err := json.MarshalIndent(mod, "", " ")
if err == nil {
fmt.Println(string(mbytes))
}
return nil
},
}, r
}
const fileURIPrefix = "file://"
// TODO: replace p with documentURI
func LoadCLVM(l *zap.Logger, documentURI lsp.DocumentURI, readFile func(lsp.DocumentURI) (string, error)) (*Module, error) {
f, err := readFile(documentURI)
if err != nil {
return nil, errors.Wrap(err, "read file")
}
tokens := []*Token(nil)
tch, errptr := tokenize(f, documentURI)
duptch := make(chan *Token)
go func() {
defer close(duptch)
for token := range tch {
duptch <- token
tokens = append(tokens, token)
}
}()
ast, err := parseAST(duptch)
if err != nil {
return nil, errors.Wrap(err, "parse syntax tree")
}
if *errptr != nil {
return nil, errors.Wrap(*errptr, "tokenize")
}
mods, err := parseModules(l, ast, documentURI, readFile, tokens)
if err != nil {
return nil, errors.Wrap(err, "parse modules")
}
if len(mods) == 0 {
return nil, errors.New("no modules in file")
}
return mods[0], nil
}
func LoadCLVMFromStrings(l *zap.Logger, documentURI lsp.DocumentURI, files map[lsp.DocumentURI]string) (*Module, error) {
return LoadCLVM(l, documentURI, func(p lsp.DocumentURI) (string, error) {
f, ok := files[p]
if !ok {
return "", fmt.Errorf("unknown file '%s'", p)
}
return f, nil
})
}
type constant struct {
Token *Token
Name interface{}
Value *CodeBody
}
type Function struct {
Raw *ASTNode
RawBody interface{}
KeywordToken *Token
Name *Token
Params interface{}
ParamsBody *CodeBody
Body *CodeBody
Inline bool `json:",omitempty"`
Builtin bool `json:",omitempty"`
Macro bool `json:",omitempty"`
}
func paramsNames(n interface{}) map[string]struct{} {
toks := paramsTokens(n)
v := make(map[string]struct{}, len(toks))
for _, t := range toks {
v[t.Value] = struct{}{}
}
return v
}
func paramsTokens(n interface{}) map[string]*Token {
v := map[string]*Token{}
switch n := n.(type) {
case *Token:
if n.Kind == basicToken && n.Text != "." {
v[n.Text] = n
}
case *ASTNode:
for _, c := range n.Children {
for k, sv := range paramsTokens(c) {
v[k] = sv
}
}
}
return v
}
func (f *Function) vars() map[string]struct{} {
return paramsNames(f.Params)
}
func (f *Function) varTokens() map[string]*Token {
return paramsTokens(f.Params)
}
type include struct {
Token *Token
Value interface{}
Module *Module
LoadError error
}
func (m *Module) vars() map[string]struct{} {
return paramsNames(m.Args)
}
func (m *Module) varTokens() map[string]*Token {
return paramsTokens(m.Args)
}
var ConditionCodes = func() string {
b, err := examples.F.ReadFile("condition_codes.clvm")
if err == nil {
return string(b)
}
return ""
}()
|
package isaac
import (
"time"
)
const (
actionsBasePath = "/api/v1/logs/scenarios"
)
type ActionsService interface {
Add(NewAction) (Action, error)
Get(ID) (Action, error)
List() ([]Action, error)
Remove(Action) error
Update(Action) (Action, error)
}
type ActionsServiceOp struct {
client *Client
}
type NewAction struct {
Origin string `json:"origin"`
Action string `json:"action"`
DisplayName string `json:"displayName"`
Description string `json:"description"`
Log bool `json:"log"`
}
type Action struct {
NewActionLog `json:",inline"`
ID ID `json:"_id"`
UpdatedAt time.Time `json:"time"`
}
func (item Action) ref() string {
return item.ID.String()
}
func (s *ActionsServiceOp) Add(action NewAction) (Action, error) {
var respStruct Action
err := s.client.genericPost(actionsBasePath, action, &respStruct)
return respStruct, err
}
func (s *ActionsServiceOp) Get(id ID) (Action, error) {
var respStruct Action
err := s.client.genericGetID(actionsBasePath, id, &respStruct)
return respStruct, err
}
func (s *ActionsServiceOp) List() ([]Action, error) {
var respStruct []Action
err := s.client.genericGet(actionsBasePath, &respStruct)
return respStruct, err
}
func (s *ActionsServiceOp) Remove(action Action) error {
err := s.client.genericDeleteID(actionsBasePath, action, nil)
return err
}
func (s *ActionsServiceOp) Update(action Action) (Action, error) {
var respStruct Action
err := s.client.genericPutID(actionsBasePath, action, &respStruct)
return respStruct, err
}
|
package core
import "fmt"
//AOI 区域管理模块
type AOIManager struct {
//区域左边界坐标
MinX int
//区域右边界坐标
MaxX int
//X方向格子的数量
CntsX int
// 区域的上边界坐标
MinY int
// 区域的下边界坐标
MaxY int
//Y方向格子的数量
CntsY int
// 当前区域中有哪些格子map: key 格子的ID value 格子的对象
grids map[int] *Grid
}
// 初始化一个AOI区域管理模块
func NewAOIManager(minX, maxX, cntsX, minY, maxY, cntsY int)*AOIManager {
aoiMgr := &AOIManager{
MinX: minX,
MaxX: maxX,
CntsX:cntsX,
MinY:minY,
MaxY:maxY,
CntsY:cntsY,
grids:make(map[int]*Grid),
}
// 给AOI初始化区域的格子所有的格子进行编号和初始化
for y:=0; y < cntsY; y++ {
for x:=0; x<cntsX; x++ {
// 计算格子ID,根据x, y编号
// 格子编号:id = idy *cntsX + idX
gid := y * cntsX + x
// 初始化gid格子
aoiMgr.grids[gid] = &Grid{
GID:gid,
MinX:x * aoiMgr.gridWitdh(),
MaxX: (x+1) * aoiMgr.gridWitdh(),
MinY:y * aoiMgr.gridHeigh(),
MaxY:(y+1) * aoiMgr.gridHeigh(),
}
}
}
return aoiMgr
}
// 得到每个格子在X轴方向的宽度
func(m* AOIManager) gridWitdh() int {
return (m.MaxX - m.MinX)/m.CntsX
}
// 得到每个格子在Y轴方向的长度
func(m* AOIManager) gridHeigh() int {
return (m.MaxY - m.MinY)/m.CntsY
}
// 打印格子信息
func(m *AOIManager) String()string {
//打印AOIManager信息
s:= fmt.Sprintf("AOIManager\n MinX: %d, MaxX: %d, cntsX:%d, MinY: %d, MaxY:%d, cntsY:%d\n Grid in AOIManager:\n",
m.MinX, m.MaxX, m.CntsX, m.MinY, m.MaxY, m.CntsY)
// 打印全部格子信息
for _, grid := range m.grids {
s += fmt.Sprintln(grid)
}
return s
}
|
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"sync"
"github.com/google/go-github/github"
"golang.org/x/oauth2"
)
const (
//DotFile The file in HOME user
DotFile = ".github-command-line"
)
//Config Configuration Application
type Config struct {
Token string
organization string
clone bool
directory string
}
var config = new(Config)
func init() {
flag.StringVar(&config.Token, "token", "", "The token (https://github.com/settings/tokens/new)")
flag.StringVar(&config.organization, "organization", "", "The organization")
flag.BoolVar(&config.clone, "clone", false, "True we clone in current directory")
flag.StringVar(&config.directory, "directory", "", "Where do we clone")
flag.Parse()
//TODO validate
readToken()
}
//read token from HOME/.github-command-line if -token is empty
func readToken() {
if config.Token == "" {
usr, err := user.Current()
if err != nil {
log.Fatal(err)
}
value, _ := ioutil.ReadFile(filepath.Join(usr.HomeDir, DotFile))
var tmpConfig Config
json.Unmarshal(value, &tmpConfig)
config.Token = tmpConfig.Token
}
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: config.Token},
)
tc := oauth2.NewClient(oauth2.NoContext, ts)
client := github.NewClient(tc)
fmt.Println("Name\t\t\t\t\tclone url")
var wg sync.WaitGroup
var err error
var page int
for page, err = 1, nil; page != 0; {
page, err = listRepos(&wg, client, page)
if err != nil {
fmt.Printf("%s", err)
}
}
wg.Wait()
}
func listRepos(wg *sync.WaitGroup, client *github.Client, page int) (int, error) {
var opt *github.RepositoryListByOrgOptions
if page > 0 {
opt = &github.RepositoryListByOrgOptions{Type: "all", ListOptions: github.ListOptions{Page: page}}
} else {
opt = nil
}
repos, response, err := client.Repositories.ListByOrg(context.Background(), config.organization, opt)
if err != nil {
return 0, err
}
//clone all repositories
for _, repo := range repos {
p := Project{name: *repo.Name, cloneURL: *repo.SSHURL}
fmt.Printf("%q\t\t\t\t\t%q\n", p.name, p.cloneURL)
if config.clone {
p.clone(wg)
}
}
return response.NextPage, nil
}
//Project the name and url to clone
type Project struct {
name string
cloneURL string
}
func (p Project) clone(wg *sync.WaitGroup) {
d := filepath.Join(config.directory, p.name)
if _, err := os.Stat(d); os.IsNotExist(err) {
wg.Add(1)
cmd := exec.Command("git", "clone", p.cloneURL, d)
go func(wg *sync.WaitGroup) {
errCmd := cmd.Start()
defer wg.Done()
if errCmd != nil {
fmt.Printf("error: %v\n\n", errCmd)
}
err := cmd.Wait()
if err != nil {
fmt.Printf("%s", err)
}
fmt.Printf("%q\t\t\t\t\t%q done\n", p.name, p.cloneURL)
}(wg)
} else {
fmt.Printf("%q\t\t\t\t\t%q exists\n", p.name, p.cloneURL)
//TODO stash and pull
}
}
|
package imp0rt
import (
"encoding/json"
"io"
"os"
"github.com/Zenika/marcel/api/db"
)
func imp0rt(inputFile string, value interface{}, save func() error) error {
if err := db.Open(); err != nil {
return err
}
defer db.Close()
var r io.ReadCloser
if inputFile == "" {
r = os.Stdin
} else {
var err error
if r, err = os.Open(inputFile); err != nil {
return err
}
defer r.Close()
}
if err := json.NewDecoder(r).Decode(value); err != nil {
return err
}
return save()
}
|
package pivot
import (
"io/ioutil"
"path"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/openshift/origin/pkg/oc/clusterup/componentinstall"
"github.com/openshift/origin/pkg/oc/clusterup/docker/dockerhelper"
)
type Component interface {
Name() string
Install(dockerClient dockerhelper.Interface) error
}
type KubeAPIServerContent struct {
InstallContext componentinstall.Context
}
func (k *KubeAPIServerContent) Name() string {
return "KubeAPIServerContent"
}
// this ia list of all the secrets and configmaps we need to create
const (
namespace = "openshift-kube-apiserver"
// saTokenSigningCerts contains certificates corresponding to the valid keys that are and were used to sign SA tokens
saTokenSigningCerts = "sa-token-signing-certs"
// aggregatorClientCABundle is the ca-bundle to use to verify that the aggregator is proxying to your apiserver
aggregatorClientCABundle = "aggregator-client-ca"
// aggregatorClientCertKeyPair is the client cert/key pair used by the aggregator when proxying
aggregatorClientCertKeyPair = "aggregator-client"
// kubeletClientCertKeyPair is the client cert/key used by the kube-apiserver when communicating with the kubelet
kubeletClientCertKeyPair = "kubelet-client"
// kubeletServingCABundle is the ca-bundle to use to verify connections to the kubelet
kubeletServingCABundle = "kubelet-serving-ca"
// apiserverServingCertKeyPair is the serving cert/key used by the kube-apiserver to secure its https server
apiserverServingCertKeyPair = "serving-cert"
// apiserverClientCABundle is the ca-bundle used to identify users from incoming connections to the kube-apiserver
apiserverClientCABundle = "client-ca"
// etcdClientCertKeyPair is the client cert/key pair used by the kube-apiserver when communicating with etcd
etcdClientCertKeyPair = "etcd-client"
// etcdServingCABundle is the ca-bundle to use to verify connections to etcd
etcdServingCABundle = "etcd-serving-ca"
)
func (c *KubeAPIServerContent) Install(dockerClient dockerhelper.Interface) error {
kubAPIServerConfigDir := path.Join(c.InstallContext.BaseDir(), "kube-apiserver")
kubeClient, err := kubernetes.NewForConfig(c.InstallContext.ClusterAdminClientConfig())
if err != nil {
return err
}
_, err = kubeClient.CoreV1().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace, Labels: map[string]string{"openshift.io/run-level": "0"}}})
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
if err := ensureCABundle(kubeClient, kubAPIServerConfigDir, saTokenSigningCerts, "serviceaccounts.public.key"); err != nil {
return err
}
if err := ensureCABundle(kubeClient, kubAPIServerConfigDir, aggregatorClientCABundle, "frontproxy-ca.crt"); err != nil {
return err
}
if err := ensureCABundle(kubeClient, kubAPIServerConfigDir, kubeletServingCABundle, "ca.crt"); err != nil {
return err
}
if err := ensureCABundle(kubeClient, kubAPIServerConfigDir, apiserverClientCABundle, "ca.crt"); err != nil {
return err
}
if err := ensureCABundle(kubeClient, kubAPIServerConfigDir, etcdServingCABundle, "ca.crt"); err != nil {
return err
}
if err := ensureCertKeyPair(kubeClient, kubAPIServerConfigDir, aggregatorClientCertKeyPair, "openshift-aggregator"); err != nil {
return err
}
if err := ensureCertKeyPair(kubeClient, kubAPIServerConfigDir, kubeletClientCertKeyPair, "master.kubelet-client"); err != nil {
return err
}
if err := ensureCertKeyPair(kubeClient, kubAPIServerConfigDir, apiserverServingCertKeyPair, "master.server"); err != nil {
return err
}
if err := ensureCertKeyPair(kubeClient, kubAPIServerConfigDir, etcdClientCertKeyPair, "master.etcd-client"); err != nil {
return err
}
return nil
}
func ensureConfigMap(kubeClient kubernetes.Interface, obj *corev1.ConfigMap) error {
_, err := kubeClient.CoreV1().ConfigMaps(obj.Namespace).Get(obj.Name, metav1.GetOptions{})
if err == nil || !apierrors.IsNotFound(err) {
return err
}
_, err = kubeClient.CoreV1().ConfigMaps(obj.Namespace).Create(obj)
return err
}
func ensureSecret(kubeClient kubernetes.Interface, obj *corev1.Secret) error {
_, err := kubeClient.CoreV1().Secrets(obj.Namespace).Get(obj.Name, metav1.GetOptions{})
if err == nil || !apierrors.IsNotFound(err) {
return err
}
_, err = kubeClient.CoreV1().Secrets(obj.Namespace).Create(obj)
return err
}
func ensureCABundle(kubeClient kubernetes.Interface, kubeAPIServerDir, name, filename string) error {
content, err := ioutil.ReadFile(path.Join(kubeAPIServerDir, filename))
if err != nil {
return err
}
obj := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name},
Data: map[string]string{
"ca-bundle.crt": string(content),
}}
return ensureConfigMap(kubeClient, obj)
}
func ensureCertKeyPair(kubeClient kubernetes.Interface, kubeAPIServerDir, name, baseFilename string) error {
cert, err := ioutil.ReadFile(path.Join(kubeAPIServerDir, baseFilename+".crt"))
if err != nil {
return err
}
key, err := ioutil.ReadFile(path.Join(kubeAPIServerDir, baseFilename+".key"))
if err != nil {
return err
}
obj := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name},
Data: map[string][]byte{
"tls.crt": cert,
"tls.key": key,
}}
return ensureSecret(kubeClient, obj)
}
|
package user
import (
"bytes"
"fmt"
"log"
"net/http"
"net/http/httptest"
"syscall"
"github.com/cswank/quimby/internal/auth"
"github.com/cswank/quimby/internal/repository"
"golang.org/x/crypto/ssh/terminal"
)
func Create(r *repository.User, username string) {
fmt.Print("Enter password: ")
pw, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Fatal(err)
}
fmt.Print("Confirm password: ")
pw2, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
log.Fatal(err)
}
if !bytes.Equal(pw, pw2) {
log.Fatal(fmt.Errorf("passwords don't match"))
}
hashed, tfa, qr, err := auth.Credentials(username, string(pw))
if err != nil {
log.Fatal(err)
}
u, err := r.Create(username, hashed, tfa)
if err != nil {
log.Fatal(err)
}
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/octet-stream")
w.Write(qr)
}))
fmt.Printf("\ncreated user: %d %s\n, open %s to scan code\n", u.ID, u.Name, ts.URL)
fmt.Println("hit enter when done")
var i int
fmt.Scanln(&i)
ts.Close()
}
func Delete(r *repository.User, username string) {
if err := r.Delete(username); err != nil {
log.Fatal(err)
}
}
func List(r *repository.User) {
users, err := r.GetAll()
if err != nil {
log.Fatal(err)
}
for _, u := range users {
fmt.Printf("%d: %s\n", u.ID, u.Name)
}
}
|
package sendtx
import (
"context"
"crypto/ecdsa"
"fmt"
"log"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
)
// send signed tx to given chain
func SendTransaction(eth string, value *big.Int, toAddrHex string, data []byte, sk string) error { // private key to sign tx
// connect to chain
client, err := ethclient.Dial(eth)
if err != nil {
log.Fatal(err)
return err
}
// prepair privatekey
// string to ECDSA
privateKeyECDSA, err := crypto.HexToECDSA(sk)
if err != nil {
log.Fatal(err)
return err
}
// sk to pubkey
publicKey := privateKeyECDSA.Public()
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
if !ok {
log.Fatal("cannot assert type: publicKey is not of type *ecdsa.PublicKey")
return err
}
// from: pubkey to address
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
if err != nil {
log.Fatal(err)
return err
}
fmt.Println("from:", fromAddress)
fmt.Println("nonce:", nonce)
// construct tx
gasLimit := uint64(21000) // in uint
gasPrice, err := client.SuggestGasPrice(context.Background()) // gas price
if err != nil {
log.Fatal(err)
return err
}
// to address
toAddress := common.HexToAddress(toAddrHex)
// var data []byte
tx := types.NewTransaction(nonce, toAddress, value, gasLimit, gasPrice, data)
// get chain id
chainID, err := client.NetworkID(context.Background())
if err != nil {
log.Fatal(err)
return err
}
fmt.Println("chain ID:", chainID)
// sign tx
signedTx, err := types.SignTx(tx, types.NewEIP155Signer(chainID), privateKeyECDSA)
if err != nil {
log.Fatal(err)
return err
}
/*
// get signed tx bytes
txs := types.Transactions{signedTx}
rawTxBytes := txs.GetRlp(0)
//rawTxHex := hex.EncodeToString(rawTxBytes)
// decode bytes to Transaction and send it
tx = new(types.Transaction)
rlp.DecodeBytes(rawTxBytes, &tx)
*/
// send tx
err = client.SendTransaction(context.Background(), signedTx)
if err != nil {
log.Fatal(err)
return err
}
fmt.Printf("tx hash: %s\n", tx.Hash().Hex()) //
return nil
}
|
package model
import (
"context"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/bson/primitive"
"github.com/mongodb/mongo-go-driver/mongo/options"
log "github.com/sirupsen/logrus"
"testing"
"time"
)
// TestInitClient ...
func TestInitClient(t *testing.T) {
client, _ := InitClient(context.Background(), "")
time.Sleep(10 * time.Second)
client.Database("db1").Collection("numbers").InsertOne(context.Background(), bson.M{
"value": "hello world",
"name": "test",
})
}
// TestRelate ...
func TestRelate(t *testing.T) {
b := true
// db.user.aggregate(
// [
// {
// "$project" : {
// "_id" : NumberInt(0),
// "user" : "$$ROOT"
// }
// },
// {
// "$lookup" : {
// "localField" : "user._id",
// "from" : "role_user",
// "foreignField" : "userid",
// "as" : "role_user"
// }
// },
// {
// "$unwind" : {
// "path" : "$role_user",
// "preserveNullAndEmptyArrays" : true
// }
// },
// {
// "$lookup" : {
// "localField" : "role_user.roleid",
// "from" : "role",
// "foreignField" : "_id",
// "as" : "role"
// }
// },
// {
// "$unwind" : {
// "path" : "$role",
// "preserveNullAndEmptyArrays" : true
// }
// },
// {
// "$match" : {
// "user._id" : ObjectId("5c33711e06b5362b5f8dccbf")
// }
// }
//],
// {
// "allowDiskUse" : true
// }
// );
cursor, e := C("user").Aggregate(context.Background(),
[]primitive.E{
primitive.E{
Key: "$project",
Value: bson.M{
"_id": 0,
"user": "$$ROOT",
},
},
primitive.E{
Key: "$lookup",
Value: bson.M{
"localField": "user._id",
"from": "role_user",
"foreignField": "userid",
"as": "role_user",
},
},
primitive.E{
Key: "$unwind",
Value: bson.M{
"path": "$role_user",
"preserveNullAndEmptyArrays": true,
},
},
primitive.E{
Key: "$lookup",
Value: bson.M{
"localField": "role_user.roleid",
"from": "role",
"foreignField": "_id",
"as": "role",
},
},
primitive.E{
Key: "$unwind",
Value: bson.M{"path": "$role",
"preserveNullAndEmptyArrays": true,
},
},
primitive.E{
Key: "$match",
Value: bson.M{
"user._id": ID("5c384909078d4d5bd20177be"),
},
},
},
&options.AggregateOptions{
AllowDiskUse: &b,
})
log.Println(e)
log.Println(cursor)
}
|
package binance
import (
"context"
"net/http"
"github.com/adshao/go-binance/v2/common"
)
// ListBookTickersService list best price/qty on the order book for a symbol or symbols
type ListBookTickersService struct {
c *Client
symbol *string
}
// Symbol set symbol
func (s *ListBookTickersService) Symbol(symbol string) *ListBookTickersService {
s.symbol = &symbol
return s
}
// Do send request
func (s *ListBookTickersService) Do(ctx context.Context, opts ...RequestOption) (res []*BookTicker, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/api/v3/ticker/bookTicker",
}
if s.symbol != nil {
r.setParam("symbol", *s.symbol)
}
data, err := s.c.callAPI(ctx, r, opts...)
data = common.ToJSONList(data)
if err != nil {
return []*BookTicker{}, err
}
res = make([]*BookTicker, 0)
err = json.Unmarshal(data, &res)
if err != nil {
return []*BookTicker{}, err
}
return res, nil
}
// BookTicker define book ticker info
type BookTicker struct {
Symbol string `json:"symbol"`
BidPrice string `json:"bidPrice"`
BidQuantity string `json:"bidQty"`
AskPrice string `json:"askPrice"`
AskQuantity string `json:"askQty"`
}
// ListPricesService list latest price for a symbol or symbols
type ListPricesService struct {
c *Client
symbol *string
symbols []string
}
// Symbol set symbol
func (s *ListPricesService) Symbol(symbol string) *ListPricesService {
s.symbol = &symbol
return s
}
// Do send request
func (s *ListPricesService) Do(ctx context.Context, opts ...RequestOption) (res []*SymbolPrice, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/api/v3/ticker/price",
}
if s.symbol != nil {
r.setParam("symbol", *s.symbol)
} else if s.symbols != nil {
s, _ := json.Marshal(s.symbols)
r.setParam("symbols", string(s))
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return []*SymbolPrice{}, err
}
data = common.ToJSONList(data)
res = make([]*SymbolPrice, 0)
err = json.Unmarshal(data, &res)
if err != nil {
return []*SymbolPrice{}, err
}
return res, nil
}
// SymbolPrice define symbol and price pair
type SymbolPrice struct {
Symbol string `json:"symbol"`
Price string `json:"price"`
}
// ListPriceChangeStatsService show stats of price change in last 24 hours for all symbols
type ListPriceChangeStatsService struct {
c *Client
symbol *string
symbols []string
}
// Symbol set symbol
func (s *ListPriceChangeStatsService) Symbol(symbol string) *ListPriceChangeStatsService {
s.symbol = &symbol
return s
}
// Symbols set symbols
func (s *ListPriceChangeStatsService) Symbols(symbols []string) *ListPriceChangeStatsService {
s.symbols = symbols
return s
}
// Symbols set symbols
func (s *ListPricesService) Symbols(symbols []string) *ListPricesService {
s.symbols = symbols
return s
}
// Do send request
func (s *ListPriceChangeStatsService) Do(ctx context.Context, opts ...RequestOption) (res []*PriceChangeStats, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/api/v3/ticker/24hr",
}
if s.symbol != nil {
r.setParam("symbol", *s.symbol)
} else if s.symbols != nil {
r.setParam("symbols", s.symbols)
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return res, err
}
data = common.ToJSONList(data)
res = make([]*PriceChangeStats, 0)
err = json.Unmarshal(data, &res)
if err != nil {
return nil, err
}
return res, nil
}
// PriceChangeStats define price change stats
type PriceChangeStats struct {
Symbol string `json:"symbol"`
PriceChange string `json:"priceChange"`
PriceChangePercent string `json:"priceChangePercent"`
WeightedAvgPrice string `json:"weightedAvgPrice"`
PrevClosePrice string `json:"prevClosePrice"`
LastPrice string `json:"lastPrice"`
LastQty string `json:"lastQty"`
BidPrice string `json:"bidPrice"`
BidQty string `json:"bidQty"`
AskPrice string `json:"askPrice"`
AskQty string `json:"askQty"`
OpenPrice string `json:"openPrice"`
HighPrice string `json:"highPrice"`
LowPrice string `json:"lowPrice"`
Volume string `json:"volume"`
QuoteVolume string `json:"quoteVolume"`
OpenTime int64 `json:"openTime"`
CloseTime int64 `json:"closeTime"`
FristID int64 `json:"firstId"`
LastID int64 `json:"lastId"`
Count int64 `json:"count"`
}
// AveragePriceService show current average price for a symbol
type AveragePriceService struct {
c *Client
symbol string
}
// Symbol set symbol
func (s *AveragePriceService) Symbol(symbol string) *AveragePriceService {
s.symbol = symbol
return s
}
// Do send request
func (s *AveragePriceService) Do(ctx context.Context, opts ...RequestOption) (res *AvgPrice, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/api/v3/avgPrice",
}
r.setParam("symbol", s.symbol)
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return res, err
}
res = new(AvgPrice)
err = json.Unmarshal(data, res)
if err != nil {
return nil, err
}
return res, nil
}
// AvgPrice define average price
type AvgPrice struct {
Mins int64 `json:"mins"`
Price string `json:"price"`
}
type ListSymbolTickerService struct {
c *Client
symbol *string
symbols []string
windowSize *string
}
type SymbolTicker struct {
Symbol string `json:"symbol"`
PriceChange string `json:"priceChange"`
PriceChangePercent string `json:"priceChangePercent"`
WeightedAvgPrice string `json:"weightedAvgPrice"`
OpenPrice string `json:"openPrice"`
HighPrice string `json:"highPrice"`
LowPrice string `json:"lowPrice"`
LastPrice string `json:"lastPrice"`
Volume string `json:"volume"`
QuoteVolume string `json:"quoteVolume"`
OpenTime int64 `json:"openTime"`
CloseTime int64 `json:"closeTime"`
FirstId int64 `json:"firstId"`
LastId int64 `json:"lastId"`
Count int64 `json:"count"`
}
func (s *ListSymbolTickerService) Symbol(symbol string) *ListSymbolTickerService {
s.symbol = &symbol
return s
}
func (s *ListSymbolTickerService) Symbols(symbols []string) *ListSymbolTickerService {
s.symbols = symbols
return s
}
// Defaults to 1d if no parameter provided
//
// Supported windowSize values:
//
// - 1m,2m....59m for minutes
//
// - 1h, 2h....23h - for hours
//
// - 1d...7d - for days
//
// Units cannot be combined (e.g. 1d2h is not allowed).
//
// Reference: https://binance-docs.github.io/apidocs/spot/en/#rolling-window-price-change-statistics
func (s *ListSymbolTickerService) WindowSize(windowSize string) *ListSymbolTickerService {
s.windowSize = &windowSize
return s
}
func (s *ListSymbolTickerService) Do(ctx context.Context, opts ...RequestOption) (res []*SymbolTicker, err error) {
r := &request{
method: http.MethodGet,
endpoint: "/api/v3/ticker",
}
if s.symbol != nil {
r.setParam("symbol", *s.symbol)
} else if s.symbols != nil {
s, _ := json.Marshal(s.symbols)
r.setParam("symbols", string(s))
}
if s.windowSize != nil {
r.setParam("windowSize", *s.windowSize)
}
data, err := s.c.callAPI(ctx, r, opts...)
data = common.ToJSONList(data)
if err != nil {
return []*SymbolTicker{}, err
}
res = make([]*SymbolTicker, 0)
err = json.Unmarshal(data, &res)
if err != nil {
return []*SymbolTicker{}, err
}
return res, nil
}
|
package main
import (
"net/http/httptest"
"testing"
)
func TestCodeRun(t *testing.T) {
var ts *httptest.Server
ts, args = mockAPI(`{}`)
defer ts.Close()
prepareScript("def test():\n\treturn 'test'")
rc := &RunCode{}
err := rc.Run()
if err != nil {
t.Error(err)
}
}
|
package awsvaultcredsprovider
import (
"context"
"crypto/sha1"
"encoding/json"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/jcmturner/vaultclient"
gootp "gopkg.in/jcmturner/gootp.v1"
"time"
)
const (
PROVIDER_NAME = "VaultCredsProvider"
DefaultTempCredentialsDuration = 900
)
type AWSCredential struct {
AccessKeyId string
secretAccessKey string
sessionToken string
MFASerialNumber string
mFASecret string
Expiration time.Time
TTL int64
}
func (c *AWSCredential) GetSecretAccessKey() string {
return c.secretAccessKey
}
func (c *AWSCredential) GetSessionToken() string {
return c.sessionToken
}
func (c *AWSCredential) GetMFASecret() string {
return c.mFASecret
}
type VaultCredsProvider struct {
VaultClient *vaultclient.Client
Name string
Arn string
Credential AWSCredential
reloadAfter time.Time
}
func NewVaultCredsProvider(arn string, conf vaultclient.Config, creds vaultclient.Credentials) (*VaultCredsProvider, error) {
cl, err := vaultclient.NewClient(&conf, &creds)
if err != nil {
return nil, err
}
return &VaultCredsProvider{
VaultClient: &cl,
Arn: arn,
}, nil
}
func (p *VaultCredsProvider) SetAccessKey(s string) *VaultCredsProvider {
p.Credential.AccessKeyId = s
return p
}
func (p *VaultCredsProvider) SetSecretAccessKey(s string) *VaultCredsProvider {
p.Credential.secretAccessKey = s
return p
}
func (p *VaultCredsProvider) SetSessionToken(s string) *VaultCredsProvider {
p.Credential.sessionToken = s
return p
}
func (p *VaultCredsProvider) SetExpiration(t time.Time) *VaultCredsProvider {
p.Credential.Expiration = t
if p.reloadAfter.After(t) || p.reloadAfter.IsZero() {
p.reloadAfter = t
}
return p
}
func (p *VaultCredsProvider) SetTTL(ttl int64) *VaultCredsProvider {
p.Credential.TTL = ttl
return p
}
func (p *VaultCredsProvider) WithMFA(serial, secret string) *VaultCredsProvider {
p.Credential.MFASerialNumber = serial
p.Credential.mFASecret = secret
return p
}
func (p *VaultCredsProvider) Retrieve() (credentials.Value, error) {
err := p.Read()
if err != nil {
return credentials.Value{}, err
}
if p.Credential.mFASecret != "" && p.Credential.MFASerialNumber != "" {
// We have an MFA so we will get a session to be able to support calls where MFA is required.
err := p.getSessionCredentials()
if err != nil {
return credentials.Value{}, err
}
}
return credentials.Value{
AccessKeyID: p.Credential.AccessKeyId,
SecretAccessKey: p.Credential.secretAccessKey,
SessionToken: p.Credential.sessionToken,
ProviderName: PROVIDER_NAME,
}, nil
}
func (p *VaultCredsProvider) getSessionCredentials() error {
creds := credentials.NewStaticCredentials(p.Credential.AccessKeyId, p.Credential.secretAccessKey, p.Credential.sessionToken)
config := aws.NewConfig().WithCredentials(creds)
sess := session.Must(session.NewSession(config))
svc := sts.New(sess)
ctx := context.Background()
OTP, _, err := gootp.TOTPNow(p.Credential.mFASecret, sha1.New, 6)
if err != nil {
return err
}
params := &sts.GetSessionTokenInput{}
var d int64 = DefaultTempCredentialsDuration
if p.Credential.TTL > d {
d = p.Credential.TTL
}
params.SetDurationSeconds(d).
SetSerialNumber(p.Credential.MFASerialNumber).
SetTokenCode(OTP)
result, err := svc.GetSessionTokenWithContext(ctx, params)
if err != nil {
return err
}
p.Credential.AccessKeyId = *result.Credentials.AccessKeyId
p.Credential.secretAccessKey = *result.Credentials.SecretAccessKey
p.Credential.sessionToken = *result.Credentials.SessionToken
p.Credential.Expiration = *result.Credentials.Expiration
if p.Credential.TTL < 30 {
// Cannot reuse OTP within 30 seconds. Min of 30s cache
p.Credential.TTL = 30
}
return nil
}
func (p *VaultCredsProvider) IsExpired() bool {
// Setting TTL to <0 will cause the cache to never be used as will always be expired
if p.Credential.TTL < 0 {
return true
}
if time.Now().UTC().After(p.reloadAfter) {
return true
}
return false
}
func (p *VaultCredsProvider) Store() error {
m := map[string]interface{}{
"Name": p.Name,
"AccessKeyID": p.Credential.AccessKeyId,
"SecretAccessKey": p.Credential.secretAccessKey,
"SessionToken": p.Credential.sessionToken,
"MFASerialNumber": p.Credential.MFASerialNumber,
"MFASecret": p.Credential.mFASecret,
"Expiration": p.Credential.Expiration,
"TTL": p.Credential.TTL,
}
return p.VaultClient.Write(p.Arn, m)
}
func (p *VaultCredsProvider) Read() error {
m, err := p.VaultClient.Read(p.Arn)
if err != nil {
return err
}
if v, ok := m["Name"]; ok {
p.Name = v.(string)
}
if v, ok := m["AccessKeyID"]; ok {
p.Credential.AccessKeyId = v.(string)
}
if v, ok := m["SecretAccessKey"]; ok {
p.Credential.secretAccessKey = v.(string)
}
if v, ok := m["SessionToken"]; ok {
p.Credential.sessionToken = v.(string)
}
if v, ok := m["MFASerialNumber"]; ok {
p.Credential.MFASerialNumber = v.(string)
}
if v, ok := m["MFASecret"]; ok {
p.Credential.mFASecret = v.(string)
}
if v, ok := m["Expiration"]; ok {
if p.Credential.Expiration, err = time.Parse(time.RFC3339, v.(string)); err != nil {
p.Credential.Expiration = time.Now().UTC()
}
p.reloadAfter = p.Credential.Expiration
}
if v, ok := m["TTL"]; ok {
if p.Credential.TTL, err = v.(json.Number).Int64(); err != nil {
// Default to never caching
p.Credential.TTL = -1
}
t := time.Now().UTC().Add(time.Duration(p.Credential.TTL) * time.Second)
if p.Credential.Expiration.After(t) || p.Credential.Expiration.IsZero() {
p.reloadAfter = t
}
}
return nil
}
func (p *VaultCredsProvider) Delete() error {
return p.VaultClient.Delete(p.Arn)
}
|
package connrt
import (
"math/rand"
"time"
"github.com/golang/mock/gomock"
"github.com/gookit/event"
"github.com/kbence/conndetect/internal/connlib"
"github.com/kbence/conndetect/internal/ext_mock"
"github.com/kbence/conndetect/internal/utils_mock"
. "gopkg.in/check.v1"
)
var _ = Suite(&PortscanDetectorTestSuite{})
type PortscanDetectorTestSuite struct{}
var testPortscanSettings = NewPortscanSettings(3, 60*time.Second)
func (s *PortscanDetectorTestSuite) newConnectionToPort(port int) connlib.DirectionalConnection {
return connlib.DirectionalConnection{
Source: connlib.Endpoint{IP: connlib.IPv4Address{1, 2, 3, 4}, Port: uint16(30000 + rand.Int()%20000)},
Destination: connlib.Endpoint{IP: connlib.IPv4Address{5, 6, 7, 8}, Port: uint16(port)},
}
}
func (s *PortscanDetectorTestSuite) newRandomConnection() connlib.DirectionalConnection {
sourceIP := connlib.IPv4Address{byte(rand.Int() % 255), byte(rand.Int() % 255),
byte(rand.Int() % 255), byte(rand.Int() % 255)}
destIP := connlib.IPv4Address{byte(rand.Int() % 255), byte(rand.Int() % 255),
byte(rand.Int() % 255), byte(rand.Int() % 255)}
return connlib.DirectionalConnection{
Source: connlib.Endpoint{IP: sourceIP, Port: uint16(30000 + rand.Int()%20000)},
Destination: connlib.Endpoint{IP: destIP, Port: uint16(1 + rand.Int()%20000)},
}
}
func (s *PortscanDetectorTestSuite) getTime(timeStr string) time.Time {
fakeTime, _ := time.Parse(TIME_FORMAT, timeStr)
return fakeTime
}
func (s *PortscanDetectorTestSuite) TestPortscanDetectorDetectsScan(c *C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
eventManagerMock := ext_mock.NewMockManagerFace(ctrl)
printerMock := utils_mock.NewMockPrinter(ctrl)
timeMock := utils_mock.NewTimeTravelingMock(s.getTime("2021-05-19 09:59:34"))
eventManagerMock.EXPECT().On("newConnection", gomock.Any())
detector := NewPortscanDetector(eventManagerMock, testPortscanSettings)
detector.printer = printerMock
detector.time = timeMock
printerMock.
EXPECT().
Printf("%s: Port scan detected: %s -> %s on ports %s\n",
"2021-05-19 09:59:34",
"1.2.3.4",
"5.6.7.8",
"80,443,1234")
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newConnectionToPort(80)}))
timeMock.ForwardBy(2 * time.Second)
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newConnectionToPort(443)}))
timeMock.ForwardBy(57 * time.Second)
err := detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newConnectionToPort(1234)}))
c.Check(err, IsNil)
}
func (s *PortscanDetectorTestSuite) TestPortscanDetectorReportsOnlyOnce(c *C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
eventManagerMock := ext_mock.NewMockManagerFace(ctrl)
printerMock := utils_mock.NewMockPrinter(ctrl)
timeMock := utils_mock.NewTimeTravelingMock(s.getTime("2021-05-19 09:59:34"))
eventManagerMock.EXPECT().On("newConnection", gomock.Any())
detector := NewPortscanDetector(eventManagerMock, testPortscanSettings)
detector.printer = printerMock
detector.time = timeMock
printerMock.
EXPECT().
Printf("%s: Port scan detected: %s -> %s on ports %s\n",
"2021-05-19 09:59:34",
"1.2.3.4",
"5.6.7.8",
"80,443,1234")
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newConnectionToPort(80)}))
timeMock.ForwardBy(2 * time.Second)
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newConnectionToPort(443)}))
timeMock.ForwardBy(57 * time.Second)
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newConnectionToPort(1234)}))
err := detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newConnectionToPort(5432)}))
c.Check(err, IsNil)
}
func (s *PortscanDetectorTestSuite) TestPortscanDetectorDoesNothingOnConnectionsFromDifferentIPs(c *C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
eventManagerMock := ext_mock.NewMockManagerFace(ctrl)
printerMock := utils_mock.NewMockPrinter(ctrl)
timeMock := utils_mock.NewTimeTravelingMock(s.getTime("2021-05-19 09:59:34"))
eventManagerMock.EXPECT().On("newConnection", gomock.Any())
detector := NewPortscanDetector(eventManagerMock, testPortscanSettings)
detector.printer = printerMock
detector.time = timeMock
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newRandomConnection()}))
timeMock.ForwardBy(2 * time.Second)
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newRandomConnection()}))
timeMock.ForwardBy(57 * time.Second)
err := detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newRandomConnection()}))
c.Check(err, IsNil)
}
func (s *PortscanDetectorTestSuite) TestPortscanDetectorDoesNothingOnRandomConnections(c *C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
eventManagerMock := ext_mock.NewMockManagerFace(ctrl)
printerMock := utils_mock.NewMockPrinter(ctrl)
timeMock := utils_mock.NewTimeTravelingMock(s.getTime("2021-05-19 09:59:34"))
eventManagerMock.EXPECT().On("newConnection", gomock.Any())
detector := NewPortscanDetector(eventManagerMock, testPortscanSettings)
detector.printer = printerMock
detector.time = timeMock
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newRandomConnection()}))
timeMock.ForwardBy(2 * time.Second)
detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newRandomConnection()}))
timeMock.ForwardBy(57 * time.Second)
err := detector.Handle(event.NewBasic("newConnection", event.M{"connection": s.newRandomConnection()}))
c.Check(err, IsNil)
}
|
package data
import (
"fxkt.tech/bj21/internal/conf"
"fxkt.tech/bj21/internal/data/logic"
"github.com/go-kratos/kratos/v2/log"
"github.com/google/wire"
)
var (
ProviderSet = wire.NewSet(NewData, Newbj21Repo)
)
type Data struct {
world *logic.World
}
func NewData(c *conf.Data, logger log.Logger) (*Data, func(), error) {
cleanup := func() {
logger.Log(log.LevelWarn, "status", "closing the data resources")
}
world := logic.NewWorld()
return &Data{
world: world,
}, cleanup, nil
}
|
package main
import "fmt"
func main() {
var str1 string = "\\\""
fmt.Println(str1)
var numbers2 [5]int
numbers2[0] = 2
numbers2[3] = numbers2[0] - 3
numbers2[1] = numbers2[2] + 5
numbers2[4] = len(numbers2)
sum := 0
for i := 0; i < 5; i++ {
sum += numbers2[i]
}
// “==”用于两个值的相等性判断
fmt.Printf("%v\n", (sum == numbers2[0]+numbers2[1]+numbers2[2]+numbers2[3]+numbers2[4]))
}
|
package main
import (
"crypto/rand"
"fmt"
smpp "github.com/mergenchik/smpp34"
gsmutil "github.com/mergenchik/smpp34/gsmutil"
"math"
)
func main() {
// connect and bind
tx, err := smpp.NewTransmitter(
"localhost",
9000,
5,
smpp.Params{
"system_type": "CMT",
"system_id": "hugo",
"password": "ggoohu",
},
)
if err != nil {
fmt.Println("Connection Err:", err)
return
}
msg := "Very Long Message, Очень длинное сообщение, 1234567890123456789012345678901234567890123456789012345678901234567890END"
sm_bytes := gsmutil.EncodeUcs2(msg)
sm_len := len(sm_bytes)
fmt.Println("Message Bytes count:", sm_len)
if sm_len > 140 {
total_parts := byte(int(math.Ceil(float64(sm_len) / 134.0)))
send_params := smpp.Params{smpp.DATA_CODING: smpp.ENCODING_ISO10646, smpp.ESM_CLASS: smpp.ESM_CLASS_GSMFEAT_UDHI}
partNum := 1
uid := make([]byte, 1)
_, err := rand.Read(uid)
if err != nil {
// fmt.Println("QuerySM error:", err)
fmt.Println("Rand.Read error:", err)
return
}
for i := 0; i < sm_len; i += 134 {
start := i
end := i + 134
if end > sm_len {
end = sm_len
}
part := []byte{0x05, 0x00, 0x03, uid[0], total_parts, byte(partNum)}
part = append(part, sm_bytes[start:end]...)
fmt.Println("Part:", part)
// Send SubmitSm
seq, err := tx.SubmitSmEncoded("test", "test2", part, &send_params)
// Pdu gen errors
if err != nil {
fmt.Println("SubmitSm err:", err)
}
// Should save this to match with message_id
fmt.Println("seq:", seq)
partNum++
}
} else {
send_params := smpp.Params{}
// Send SubmitSm
seq, err := tx.SubmitSm("test", "test2", msg, &send_params)
// Pdu gen errors
if err != nil {
fmt.Println("SubmitSm err:", err)
}
// Should save this to match with message_id
fmt.Println("seq:", seq)
}
for {
pdu, err := tx.Read() // This is blocking
if err != nil {
fmt.Println("Read Err:", err)
break
}
// EnquireLinks are auto handles
switch pdu.GetHeader().Id {
case smpp.SUBMIT_SM_RESP:
// message_id should match this with seq message
fmt.Println("MSG ID:", pdu.GetField("message_id").Value())
fmt.Printf("PDU Header: %v", pdu.GetHeader())
fmt.Println()
default:
// ignore all other PDUs or do what you link with them
fmt.Println("PDU ID:", pdu.GetHeader().Id)
}
}
fmt.Println("ending...")
}
|
package server
import (
"context"
"net/http"
"github.com/chitoku-k/ejaculation-counter/supplier/infrastructure/config"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
type engine struct {
ctx context.Context
Environment config.Environment
}
type Engine interface {
Start() error
}
func NewEngine(ctx context.Context, environment config.Environment) Engine {
return &engine{
ctx: ctx,
Environment: environment,
}
}
func (e *engine) Start() error {
router := gin.New()
router.Use(gin.Recovery())
router.Use(gin.LoggerWithConfig(gin.LoggerConfig{
SkipPaths: []string{"/healthz", "/metrics"},
}))
router.Any("/healthz", func(c *gin.Context) {
c.String(http.StatusOK, "OK")
})
router.GET("/metrics", gin.WrapH(promhttp.Handler()))
server := http.Server{
Addr: ":" + e.Environment.Port,
Handler: router,
}
go func() {
<-e.ctx.Done()
server.Shutdown(context.Background())
}()
err := server.ListenAndServe()
if err == http.ErrServerClosed {
return nil
}
return err
}
|
package env_test
import (
"testing"
"github.com/nasermirzaei89/env"
"github.com/stretchr/testify/assert"
)
func TestGetInt8Slice(t *testing.T) {
t.Run("GetAbsentInt8SliceWithDefault", func(t *testing.T) {
def := []int8{21, 22}
res := env.GetInt8Slice("V1", def)
assert.Equal(t, def, res)
})
t.Run("GetValidInt8SliceWithDefault", func(t *testing.T) {
def := []int8{21, 22}
expected := []int8{31, 32, 33}
t.Setenv("V1", "31,32,33")
res := env.GetInt8Slice("V1", def)
assert.Equal(t, expected, res)
})
t.Run("GetInvalidInt8SliceWithDefault", func(t *testing.T) {
def := []int8{21, 22}
t.Setenv("V1", "invalid")
res := env.GetInt8Slice("V1", def)
assert.Equal(t, def, res)
})
t.Run("GetInvalidInt8SliceWithDefault2", func(t *testing.T) {
def := []int8{21, 22}
t.Setenv("V1", "1,2,Three")
res := env.GetInt8Slice("V1", def)
assert.Equal(t, def, res)
})
t.Run("GetEmptyInt8SliceWithDefault", func(t *testing.T) {
def := []int8{21, 22}
expected := make([]int8, 0)
t.Setenv("V1", "")
res := env.GetInt8Slice("V1", def)
assert.Equal(t, expected, res)
})
}
func TestMustGetInt8Slice(t *testing.T) {
t.Run("MustGetAbsentInt8Slice", func(t *testing.T) {
assert.Panics(t, func() {
env.MustGetInt8Slice("V1")
})
})
t.Run("MustGetValidInt8Slice", func(t *testing.T) {
expected := []int8{31, 32, 33}
t.Setenv("V1", "31,32,33")
res := env.MustGetInt8Slice("V1")
assert.Equal(t, expected, res)
})
t.Run("MustGetInvalidInt8Slice", func(t *testing.T) {
t.Setenv("V1", "1,2,Three")
assert.Panics(t, func() {
env.MustGetInt8Slice("V1")
})
})
t.Run("MustGetEmptyInt8Slice", func(t *testing.T) {
expected := make([]int8, 0)
t.Setenv("V1", "")
res := env.MustGetInt8Slice("V1")
assert.Equal(t, expected, res)
})
}
|
package main
import (
"github.com/julienschmidt/httprouter"
"github.com/vincentserpoul/playwithsql/status/islatest"
)
func globalMux(env *localEnv) *httprouter.Router {
router := httprouter.New()
router.POST("/entityone/status/islatest", islatest.EntityoneCreateHandler(env.DB, env.IslatestLink))
router.GET("/entityone/status/islatest", islatest.EntityoneSelectHandler(env.DB, env.IslatestLink))
router.GET("/entityone/status/islatest/:entityoneID", islatest.EntityoneSelectByPKHandler(env.DB, env.IslatestLink))
router.DELETE("/entityone/status/islatest/:entityoneID", islatest.EntityoneDeleteByPKHandler(env.DB, env.IslatestLink))
return router
}
|
// Copyright © 2019 IBM Corporation and others.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd_test
import (
"io/ioutil"
"log"
"testing"
"github.com/appsody/appsody/cmd/cmdtest"
)
func TestDeploy(t *testing.T) {
// first add the test repo index
_, cleanup, err := cmdtest.AddLocalFileRepo("LocalTestRepo", "testdata/index.yaml")
if err != nil {
t.Fatal(err)
}
defer cleanup()
// create a temporary dir to create the project and run the test
projectDir, err := ioutil.TempDir("", "appsody-deploy-test")
if err != nil {
t.Fatal(err)
}
//defer os.RemoveAll(projectDir)
log.Println("Created project dir: " + projectDir)
// appsody init nodejs-express
_, err = cmdtest.RunAppsodyCmdExec([]string{"init", "nodejs-express"}, projectDir)
if err != nil {
t.Fatal(err)
}
// appsody deploy
runChannel := make(chan error)
go func() {
_, err = cmdtest.RunAppsodyCmdExec([]string{"deploy", "-t", "testdeploy/testimage", "--dryrun"}, projectDir)
runChannel <- err
}()
}
|
package main
import "fmt"
func main() {
greeting := []string{
"Good morning!",
"Bonjour!",
"dias!",
"Bongiorno!",
"Ohayo!",
"Selamat pagi!",
"Gutten morgen!",
}
for i, currentEntry := range greeting {
fmt.Println(i, currentEntry)
}
for j := 0; j < len(greeting); j++ {
fmt.Println(greeting[j])
}
}
// 0 Good morning!
// 1 Bonjour!
// 2 dias!
// 3 Bongiorno!
// 4 Ohayo!
// 5 Selamat pagi!
// 6 Gutten morgen!
// Good morning!
// Bonjour!
// dias!
// Bongiorno!
// Ohayo!
// Selamat pagi!
// Gutten morgen!
|
package mirror
import (
"context"
"errors"
"time"
"github.com/google/uuid"
)
// Config holds configuration data that are needed to create a mirror (pulling mirror credentials, urls, keys
// and any other details).
type Config struct {
NamespaceID uuid.UUID
RootName string
URL string
GitRef string
GitCommitHash string
PublicKey string
PrivateKey string
PrivateKeyPassphrase string
CreatedAt time.Time
UpdatedAt time.Time
}
// Process different statuses.
const (
ProcessStatusComplete = "complete"
ProcessStatusPending = "pending"
ProcessStatusExecuting = "executing"
ProcessStatusFailed = "failed"
)
// Process different types.
const (
// Indicates initial mirroring process.
ProcessTypeInit = "init"
// Indicates re-mirroring process.
ProcessTypeSync = "sync"
// Indicates dry run process.
ProcessTypeDryRun = "dryrun"
)
// Process represents an instance of mirroring process that happened or is currently happened. For every mirroring
// process gets executing, a Process instance should be created with mirror.Store.
type Process struct {
ID uuid.UUID
NamespaceID uuid.UUID
RootID uuid.UUID
Status string
Typ string
EndedAt time.Time
CreatedAt time.Time
UpdatedAt time.Time
}
var ErrNotFound = errors.New("ErrNotFound")
// Store *doesn't* lunch any mirroring process. Store is only responsible for fetching and setting mirror.Config and
// mirror.Process from datastore.
type Store interface {
// CreateConfig stores a new config in the store.
CreateConfig(ctx context.Context, config *Config) (*Config, error)
// UpdateConfig updates a config in the store.
UpdateConfig(ctx context.Context, config *Config) (*Config, error)
// GetConfig gets config by namespaceID from the store.
GetConfig(ctx context.Context, namespaceID uuid.UUID) (*Config, error)
// CreateProcess stores a new process in the store.
CreateProcess(ctx context.Context, process *Process) (*Process, error)
// UpdateProcess update a process in the store.
UpdateProcess(ctx context.Context, process *Process) (*Process, error)
// GetProcess gets a process by id from the store.
GetProcess(ctx context.Context, id uuid.UUID) (*Process, error)
// GetProcessesByNamespaceID gets all processes that belong to a namespace from the store.
GetProcessesByNamespaceID(ctx context.Context, namespaceID uuid.UUID) ([]*Process, error)
}
|
package stemcell
type Infrastructure interface {
CreateStemcell(Manifest) (CID, error)
// DeleteStemcell(CID) error
}
type infrastructure struct{}
|
/**
* 功能描述: 对application中的module进行调谐
* @Date: 2019-11-14
* @author: lixiaoming
*/
package controllers
import (
"context"
"fmt"
appv1 "github.com/xm5646/paas-crd-application/api/v1"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"reflect"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
func (r *ApplicationReconciler) reconcileInstance(app *appv1.Application) error {
newDeploys := make(map[string]*v1.Deployment)
for i := range app.Spec.Modules {
module := &app.Spec.Modules[i]
deploy, err := makeModule2Deployment(module, app)
if err != nil {
log.Error(err, "failed to make module to deployment.", "moduleName", module.Name)
return err
}
if err := controllerutil.SetControllerReference(app, deploy, r.Scheme); err != nil {
log.Error(err, "failed to set Owner reference for module", "moduleName", module.Name)
return nil
}
newDeploys[deploy.Name] = deploy
found := &v1.Deployment{}
err = r.Get(context.TODO(), types.NamespacedName{Name: deploy.Name, Namespace: deploy.Namespace}, found)
// deployment is not found
if err != nil && apierrs.IsNotFound(err) {
log.Info("the spec module is not found and create new deployment.", "namespace", app.Namespace, "name", deploy.Name)
if err = r.Create(context.TODO(), deploy); err != nil {
log.Error(err, "failed to create new deployment")
return err
}
} else if err != nil {
// query failed
log.Error(err, "failed to get deployment.", "namespace", app.Namespace, "name", deploy.Name)
return err
} else if !reflect.DeepEqual(deploy.Spec, found.Spec) {
// 如果版本有更新,则进行update
// 如果replica数量变化,以集群内状态为准,并反向更新到App.Spec,防止影响到hpa弹性伸缩
if *deploy.Spec.Replicas != *found.Spec.Replicas {
log.Info("the replicas was changed, will apply the deployment replicas from cluster", "apply", found.Spec.Replicas, "origin", deploy.Spec.Replicas)
app.Spec.Modules[i].Template.Replicas = found.Spec.Replicas
err := r.Update(context.Background(), app)
if err != nil {
log.Error(err, "failed to update app module replicas.", "app", app.Name, "module", deploy.Name)
return err
}
r.Recorder.Event(app, "Normal", "SuccessfulUpdated", fmt.Sprintf("Updated module %s replica to %d in %s/%s", found.Name, found.Spec.Replicas, app.Namespace, app.Spec.DisplayName))
log.Info("Successfully update application module replicas.")
return nil
}
found.Spec = deploy.Spec
// 清空资源版本, 防止与冲突
found.ResourceVersion = ""
err = r.Update(context.TODO(), found)
if err != nil {
log.Error(err, "failed to update deployment.", "namespace", app.Namespace, "name", found.Name)
return err
}
r.Recorder.Event(app, "Normal", "SuccessfulUpdated", fmt.Sprintf("Updated module %s in %s/%s", found.Name, app.Namespace, app.Spec.DisplayName))
log.Info("found deployment has changed and updating by spec module.", "namespace", deploy.Namespace, "name", deploy.Name)
}
}
// 判断是否主动删除module
return r.cleanUpDeployment(app, newDeploys)
}
func (r *ApplicationReconciler) cleanUpDeployment(app *appv1.Application, newDeployList map[string]*v1.Deployment) error {
ctx := context.Background()
deploymentList := &v1.DeploymentList{}
labels := make(map[string]string)
labels[APPNameLabel] = app.Name
if err := r.List(ctx, deploymentList, client.InNamespace(app.Namespace), client.MatchingLabels{APPNameLabel: app.Name}); err != nil {
log.Error(err, "failed to list deployment by namespace and label.", "namespace", app.Namespace, "label", APPNameLabel)
return err
}
for _, oldDeploy := range deploymentList.Items {
// 判断属于当前应用的deploy是否还在app.spec内指定,如果未指定,则需要清理该deployment及其相关的资源配置
if _, isExist := newDeployList[oldDeploy.Name]; isExist == false {
log.Info("Find an isolated deployment. deleting it.", "namespace", app.Namespace, "deploymentName", oldDeploy.Name)
r.Recorder.Event(app, "Normal", "Deleting", fmt.Sprintf("Deleting module %s in %s/%s", oldDeploy.Name, app.Namespace, app.Spec.DisplayName))
// 如果存在ingress config map ,进行删除
err := r.cleanUpProxy(types.NamespacedName{Name: oldDeploy.Name, Namespace: app.Namespace})
if err != nil {
log.Error(err, "failed to delete ingress configmap.", "namespace", app.Namespace, "deploymentName", oldDeploy.Name)
return err
}
r.Recorder.Event(app, "Normal", "SuccessfulDelete", fmt.Sprintf("Deleted proxy config for moudle %s in %s/%s", oldDeploy.Name, app.Namespace, app.Spec.DisplayName))
// 如果存在svc, 则删除对应的svc
svc := &corev1.Service{}
err = r.Get(context.TODO(), types.NamespacedName{Namespace: oldDeploy.Namespace, Name: oldDeploy.Name}, svc)
if err == nil {
// 存在svc 进行删除
err = r.Delete(context.TODO(), svc)
if err != nil {
log.Error(err, "failed to delete the not defined svc.", "namespace", app.Namespace, "deploymentName", oldDeploy.Name)
return err
}
}
r.Recorder.Event(app, "Normal", "SuccessfulDelete", fmt.Sprintf("Deleted svc for moudle %s in %s/%s", oldDeploy.Name, app.Namespace, app.Spec.DisplayName))
// 孤立的deployment, 进行删除
err = r.Delete(context.TODO(), &oldDeploy)
if err != nil {
log.Error(err, "failed to delete the not defined deployment.", "namespace", app.Namespace, "deploymentName", oldDeploy.Name)
return err
}
r.Recorder.Event(app, "Normal", "SuccessfulDelete", fmt.Sprintf("Deleted moudle %s in %s/%s", oldDeploy.Name, app.Namespace, app.Spec.DisplayName))
}
}
return nil
}
func makeModule2Deployment(module *appv1.Module, app *appv1.Application) (*v1.Deployment, error) {
labels := app.Labels
if labels == nil {
labels = make(map[string]string)
}
labels[APPNameLabel] = app.Name
labels[ModuleNameLabel] = module.Name
labels[DeploymentType] = "crd"
deploySpec := module.Template
if deploySpec.Template.Labels == nil {
deploySpec.Template.Labels = make(map[string]string)
}
deploySpec.Template.Labels[APPNameLabel] = app.Spec.DisplayName
deploySpec.Template.Labels[ModuleNameLabel] = module.Name
deploySpec.Template.Labels[PodType] = "crd"
// 判断是否需要拉取软件包
deploy := &v1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: module.Name,
Namespace: app.Namespace,
Labels: labels,
},
Spec: deploySpec,
}
return deploy, nil
}
|
package httputil
import (
"bytes"
"io"
"net/http"
"os"
"text/template"
"time"
)
type templater struct {
fs http.FileSystem
includes map[string]bool
data interface{}
}
var _ http.FileSystem = (*templater)(nil)
func (t *templater) Open(path string) (http.File, error) {
if !t.includes[path] {
return t.fs.Open(path)
}
// FIXME add a cache
f, err := t.fs.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
var buf = new(bytes.Buffer)
if _, err = io.Copy(buf, f); err != nil {
return nil, err
}
tmpl, err := template.New(path).Parse(string(buf.Bytes()))
if err != nil {
return nil, err
}
buf.Reset()
if err = tmpl.Execute(buf, t.data); err != nil {
return nil, err
}
info, err := f.Stat()
if err != nil {
return nil, err
}
return newBfile(buf.Bytes(), info), nil
}
func NewTemplater(fs http.FileSystem, includes []string, data interface{}) http.FileSystem {
t := &templater{fs, make(map[string]bool, len(includes)), data}
for _, path := range includes {
t.includes[path] = true
}
return t
}
type bfile struct {
bytes.Reader
info os.FileInfo
}
func (*bfile) Close() error {
return nil
}
func (*bfile) Readdir(count int) ([]os.FileInfo, error) {
return nil, nil
}
func (f *bfile) Stat() (os.FileInfo, error) {
return f.info, nil
}
var _ http.File = (*bfile)(nil)
type bfileInfo struct {
info os.FileInfo
size int64
}
var _ os.FileInfo = bfileInfo{}
func (i bfileInfo) Name() string {
return i.info.Name()
}
func (i bfileInfo) Size() int64 {
return i.size
}
func (i bfileInfo) Mode() os.FileMode {
return i.info.Mode()
}
func (i bfileInfo) ModTime() time.Time {
return i.info.ModTime()
}
func (i bfileInfo) IsDir() bool {
return i.info.IsDir()
}
func (i bfileInfo) Sys() interface{} {
return i.info.Sys()
}
func newBfile(b []byte, info os.FileInfo) http.File {
return &bfile{
*bytes.NewReader(b),
bfileInfo{
info,
int64(len(b)),
},
}
}
|
package main
import (
"basic-rabbitmq/RabbitMQ"
"fmt"
)
func main() {
rabbitmq := RabbitMQ.NewRabbitMQSimple("goSimple")
rabbitmq.PublishSimple("Hello, RabbitMQ!")
fmt.Println("Send success!")
}
|
package model
import (
"errors"
"fmt"
)
// Move is a struct that represents a chess move
type Move struct {
X, Y int8
}
func (move *Move) String() string {
return fmt.Sprintf("%d,%d", move.X, move.Y)
}
var (
diagonalMoves = []Move{{1, 1}, {1, -1}, {-1, 1}, {-1, -1}}
straightMoves = []Move{{0, 1}, {0, -1}, {1, 0}, {-1, 0}}
moveMap = map[PieceType][]Move{
Rook: straightMoves,
Knight: {
Move{1, 2}, Move{1, -2}, Move{-1, 2}, Move{-1, -2},
Move{2, 1}, Move{2, -1}, Move{-2, 1}, Move{-2, -1},
},
Bishop: diagonalMoves,
Queen: append(diagonalMoves, straightMoves...),
King: append(diagonalMoves, straightMoves...),
Pawn: {Move{0, 1}},
}
maxSlideMap = map[PieceType]uint8{
Rook: 7,
Knight: 1,
Bishop: 7,
Queen: 7,
King: 1,
Pawn: 2,
}
)
func (piece *Piece) takeMoveShort(board *Board, move Move) {
piece.takeMove(board, move, Move{}, nil, nil, nil)
}
func (piece *Piece) takeMove(
board *Board, move Move, previousMove Move, previousMover *Piece,
king *Piece, promoteTo *PieceType,
) (*Piece, error) {
if !piece.IsMoveValid(board, move, previousMove, previousMover, king,
promoteTo) {
return nil, errors.New("piece attempted invalid move")
}
_, capturedPiece, _, _ :=
piece.takeMoveUnsafe(
board, move, previousMove, previousMover, promoteTo,
)
piece.MovesTaken++
return capturedPiece, nil
}
func (piece *Piece) takeMoveUnsafe(
board *Board, move Move, previousMove Move, previousMover *Piece,
promoteTo *PieceType,
) (
newPosition Position, capturedPiece *Piece,
newCastledPosition Position, castledRook *Piece,
) {
yDirection := int8(1)
if piece.Color == Black {
yDirection *= -1
}
newX, newY := addMoveToPosition(piece, move)
enPassantTargetY := uint8(int8(newY) + int8(-1*yDirection))
enPassantTarget := &Piece{}
if newX <= 7 && enPassantTargetY <= 7 {
enPassantTarget = board[newX][enPassantTargetY]
}
isEnPassant := (piece.PieceType == Pawn && newX != piece.File() &&
enPassantTarget != nil && enPassantTarget == previousMover &&
enPassantTarget.PieceType == Pawn &&
(previousMove.Y == 2 || previousMove.Y == -2) &&
piece.Rank() == enPassantTargetY &&
piece.Color != enPassantTarget.Color)
isCastle := piece.PieceType == King && (move.X < -1 || move.X > 1)
if isEnPassant {
capturedPiece = board[enPassantTarget.File()][enPassantTarget.Rank()]
board[enPassantTarget.File()][enPassantTarget.Rank()] = nil
} else if isCastle {
castledRook, newCastledPosition = piece.handleCastle(board, move)
}
if board[newX][newY] != nil {
capturedPiece = board[newX][newY]
}
board[newX][newY] = piece
board[piece.File()][piece.Rank()] = nil
newPosition = Position{newX, newY}
piece.Position = newPosition
if promoteTo != nil {
piece.PieceType = *promoteTo
}
return newPosition, capturedPiece, newCastledPosition, castledRook
}
// IsMoveValid determines whether a move is valid
func (piece *Piece) IsMoveValid(
board *Board, move Move, previousMove Move, previousMover *Piece,
king *Piece, promoteTo *PieceType,
) bool {
validMoves :=
piece.ValidMoves(board, previousMove, previousMover, false, king)
for _, validMove := range validMoves {
if validMove == move && piece.promotionValid(move, promoteTo) {
return true
}
}
return false
}
func (piece *Piece) validMoves(board *Board) []Move {
return piece.ValidMoves(board, Move{}, nil, false, nil)
}
// ValidMoves get all the valid moves for a piece or all threatening moves
func (piece *Piece) ValidMoves(
board *Board, previousMove Move, previousMover *Piece,
allThreatened bool, king *Piece,
) []Move {
validMoves := []Move{}
baseMoves := moveMap[piece.PieceType]
canSlideCapture := true
if piece.PieceType == Pawn {
canSlideCapture = false
}
for _, baseMove := range baseMoves {
validMoves = append(validMoves, piece.validMovesSlide(
baseMove, previousMove, previousMover, board,
maxSlideMap[piece.PieceType], canSlideCapture,
allThreatened, king,
)...)
}
if !allThreatened {
validMoves = append(validMoves, piece.getCastleMove(
board, previousMove, previousMover,
)...)
}
return validMoves
}
func (piece *Piece) promotionValid(move Move, promoteTo *PieceType) bool {
validPromoteTypes := map[PieceType]struct{}{
Bishop: {}, Knight: {},
Rook: {}, Queen: {},
}
_, newY := addMoveToPosition(piece, move)
if piece.PieceType == Pawn {
if promoteTo == nil {
return newY != 7 && newY != 0
}
_, promoteToIsValid := validPromoteTypes[*promoteTo]
return (newY == 7 || newY == 0) && promoteToIsValid
}
return promoteTo == nil
}
func (piece *Piece) getCastleMove(
board *Board, previousMove Move, previousMover *Piece,
) []Move {
out := []Move{}
canLeft, canRight := piece.canCastle(board, previousMove, previousMover)
if canLeft {
out = append(out, Move{int8(-2), 0})
}
if canRight {
out = append(out, Move{int8(2), 0})
}
return out
}
func addMoveToPosition(piece *Piece, move Move) (uint8, uint8) {
newX := uint8(int8(piece.Position.File) + move.X)
newY := uint8(int8(piece.Position.Rank) + move.Y)
return newX, newY
}
func (piece *Piece) isMoveInBounds(move Move) bool {
newX, newY := addMoveToPosition(piece, move)
xInBounds := newX < 8
yInBounds := newY < 8
return xInBounds && yInBounds
}
func (piece *Piece) validCaptureMovesPawn(
board *Board, previousMove Move, previousMover *Piece, allThreatened bool,
king *Piece,
) []Move {
yDirection := int8(1)
if piece.Color == Black {
yDirection *= -1
}
captureMoves := []Move{}
for xDirection := int8(-1); xDirection <= 1; xDirection += 2 {
captureMove := Move{xDirection, yDirection}
wouldBeInCheck := func() bool {
return !allThreatened && piece.wouldBeInCheck(
board, captureMove, previousMove, previousMover, king,
)
}
if !piece.isMoveInBounds(captureMove) || wouldBeInCheck() {
continue
}
newX, newY := addMoveToPosition(piece, captureMove)
pieceAtDest := board[newX][newY]
enPassantTarget := board[newX][newY+uint8(-1*yDirection)]
canEnPassant :=
piece.canEnPassant(previousMove, previousMover, enPassantTarget)
canCapture := pieceAtDest != nil && pieceAtDest.Color != piece.Color
if canCapture || canEnPassant || allThreatened {
captureMoves = append(captureMoves, captureMove)
}
}
return captureMoves
}
func (piece *Piece) canEnPassant(
previousMove Move, previousMover *Piece, enPassantTarget *Piece,
) bool {
return enPassantTarget != nil && enPassantTarget == previousMover &&
enPassantTarget.Color != piece.Color &&
enPassantTarget.PieceType == Pawn &&
(previousMove.Y == 2 || previousMove.Y == -2)
}
func (piece *Piece) validMovesSlide(
move Move, previousMove Move, previousMover *Piece, board *Board,
maxSlide uint8, canSlideCapture bool, allThreatened bool, king *Piece,
) []Move {
validSlides := []Move{}
yDirectionModifier := int8(1)
if piece.PieceType == Pawn {
if piece.Color == Black {
yDirectionModifier = int8(-1)
}
if piece.MovesTaken > 0 {
maxSlide = 1
}
validSlides = append(
validSlides,
piece.validCaptureMovesPawn(
board, previousMove, previousMover, allThreatened, king,
)...,
)
}
for i := int8(1); i <= int8(maxSlide); i++ {
slideMove := Move{move.X * i, move.Y * i * yDirectionModifier}
wouldBeInCheck := func() bool {
return !allThreatened && piece.wouldBeInCheck(
board, slideMove, previousMove, previousMover, king,
)
}
if !piece.isMoveInBounds(slideMove) {
break
}
newX, newY := addMoveToPosition(piece, slideMove)
pieceAtDest := board[newX][newY]
destIsValidNoCapture := pieceAtDest == nil
if wouldBeInCheck() {
// Continue only if there is no piece in the way
if destIsValidNoCapture {
continue
} else {
break
}
}
if destIsValidNoCapture && (piece.PieceType != Pawn || !allThreatened) {
validSlides = append(validSlides, slideMove)
} else {
destIsValidCapture :=
canSlideCapture && pieceAtDest.Color != piece.Color
if destIsValidCapture {
validSlides = append(validSlides, slideMove)
}
break
}
}
return validSlides
}
// AllMoves get all moves for a player or all threatening moves for a player
func AllMoves(
board *Board, color Color, previousMove Move, previousMover *Piece,
allThreatened bool, king *Piece,
) map[Position]bool {
out := map[Position]bool{}
// for each enemy piece
for _, file := range board {
for _, piece := range file {
if piece != nil && piece.Color == color {
for _, position := range piece.Moves(
board, previousMove, previousMover, allThreatened, king,
) {
out[position] = true
}
}
}
}
return out
}
// Moves get all the valid moves or all the threatening moves for a piece
func (piece *Piece) Moves(
board *Board, previousMove Move, previousMover *Piece, allThreatened bool,
king *Piece,
) []Position {
positions := []Position{}
moves :=
piece.ValidMoves(board, previousMove, previousMover, allThreatened,
king)
for _, move := range moves {
threatenedX, threatenedY := addMoveToPosition(piece, move)
positions = append(positions, Position{threatenedX, threatenedY})
}
return positions
}
func (piece *Piece) handleCastle(
board *Board, move Move,
) (castledRook *Piece, newCastledPosition Position) {
if move.X < 0 {
board[3][piece.Rank()] = board[0][piece.Rank()]
board[0][piece.Rank()] = nil
castledRook = board[3][piece.Rank()]
newCastledPosition = Position{3, piece.Rank()}
} else {
board[5][piece.Rank()] = board[7][piece.Rank()]
board[7][piece.Rank()] = nil
castledRook = board[5][piece.Rank()]
newCastledPosition = Position{5, piece.Rank()}
}
castledRook.Position = newCastledPosition
return castledRook, newCastledPosition
}
func (piece *Piece) canCastle(
board *Board, previousMove Move, previousMover *Piece,
) (castleLeft, castleRight bool) {
castleLeft, castleRight = piece.hasCastleRights(board)
if !castleLeft && !castleRight {
return false, false
}
noBlockLeft, noBlockRight := piece.noPiecesBlockingCastle(board)
if !noBlockLeft && !noBlockRight {
return false, false
}
enemyColor := getOppositeColor(piece.Color)
threatenedPositions := AllMoves(
board, enemyColor, previousMove, previousMover, true, nil,
)
noCheckLeft, noCheckRight :=
piece.wouldNotCastleThroughCheck(threatenedPositions)
castleLeft = castleLeft && noBlockLeft && noCheckLeft
castleRight = castleRight && noBlockRight && noCheckRight
return castleLeft, castleRight
}
func (piece *Piece) hasCastleRights(board *Board) (castleLeft, castleRight bool) {
if piece.PieceType != King || piece.MovesTaken > 0 {
return false, false
}
rookPieces := [2]*Piece{board[0][piece.Rank()], board[7][piece.Rank()]}
castleLeft, castleRight = true, true
for i, rook := range rookPieces {
if rook == nil || rook.PieceType != Rook || rook.MovesTaken != 0 {
if i == 0 {
castleLeft = false
} else {
castleRight = false
}
}
}
return castleLeft, castleRight
}
func (piece *Piece) noPiecesBlockingCastle(board *Board) (left, right bool) {
left, right = true, true
for i := int8(1); i < 4; i++ {
leftX, leftY := addMoveToPosition(piece, Move{-i, 0})
rightX, rightY := addMoveToPosition(piece, Move{i, 0})
if board[leftX][leftY] != nil {
left = false
}
if i != 3 && board[rightX][rightY] != nil {
right = false
}
}
return left, right
}
func (piece *Piece) wouldNotCastleThroughCheck(
threatenedPositions map[Position]bool,
) (left, right bool) {
left, right = true, true
for i := int8(0); i < 3; i++ {
leftX, leftY := addMoveToPosition(piece, Move{-i, 0})
rightX, rightY := addMoveToPosition(piece, Move{i, 0})
if threatenedPositions[Position{leftX, leftY}] {
left = false
}
if threatenedPositions[Position{rightX, rightY}] {
right = false
}
}
return left, right
}
func (piece *Piece) wouldBeInCheck(
board *Board, move Move, previousMove Move, previousMover *Piece,
king *Piece,
) bool {
if king == nil {
return false
}
originalPosition := piece.Position
newPosition, capturedPiece, newCastledPosition, castledRook :=
piece.takeMoveUnsafe(board, move, previousMove, previousMover, nil)
wouldBeInCheck := king.isThreatened(board, move, piece)
// Revert the move
board[newPosition.File][newPosition.Rank] = nil
if capturedPiece != nil {
board[capturedPiece.File()][capturedPiece.Rank()] = capturedPiece
}
board[originalPosition.File][originalPosition.Rank] = piece
piece.Position = originalPosition
if castledRook != nil {
board[newCastledPosition.File][newCastledPosition.Rank] = nil
board[newCastledPosition.File][newCastledPosition.Rank] = nil
if castledRook.Position.File == 5 {
castledRook.Position.File = 7
} else {
castledRook.Position.File = 0
}
}
return wouldBeInCheck
}
func (piece *Piece) isThreatened(board *Board, previousMove Move,
previousMover *Piece,
) bool {
enemyColor := Black
if piece.Color == Black {
enemyColor = White
}
threatenedPositions := AllMoves(
board, enemyColor, previousMove, previousMover, true, nil,
)
inCheck := false
if threatenedPositions[piece.Position] {
inCheck = true
}
return inCheck
}
|
package dbi
import (
// _ "github.com/go-goracle/goracle"
_ "github.com/go-sql-driver/mysql"
// _ "github.com/lib/pq"
// _ "github.com/mattn/go-sqlite3"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.