text
stringlengths 11
4.05M
|
|---|
package mymath
func Sqrt(x float64) float64 {
z := 0.0
for i :=0; i<1000; i++ {
z -=(z*z -x) / (2*x)
}
return z
}
|
package main
import (
"github.com/astaxie/beego/orm"
_ "github.com/go-sql-driver/mysql"
//"fmt"
//"encoding/json"
_ "encoding/json"
"fmt"
//"encoding/json"
//"os/user"
"encoding/json"
)
func init() {
orm.RegisterDataBase("default", "mysql", "root:root@tcp(127.0.0.1:3306)/go_g?charset=utf8")
orm.RegisterModel(new(User), new(Post), new(Profile), new(Tag))
orm.RunSyncdb("default", false, true)
}
type User struct {
Id int `json:"id"`
Name string `orm:"size(100)",json:"name"`
Profile *Profile `orm:"rel(one)"` //one to one
Post []*Post `orm:"reverse(many)"` //设置一对多的反向关系
}
type Profile struct {
Id int
Age int16
User *User `orm:"reverse(one)"` //一对一的反向关系(可选)
}
type Post struct {
Id int `orm:"auto",json:"id"`
Title string `orm:"size(100)",json:"title"`
User *User `orm:"rel(fk)",json:"user"` //设置一对多的关系
Tags []*Tag `orm:"rel(m2m)"` //多对多的关系
}
type Tag struct {
Id int
Name string
Posts []*Post `orm:"reverse(many)"` //多对多的反向关系
}
func main() {
orm.Debug = true
o := orm.NewOrm()
//
//o.Using("default") //// 默认使用 default,你可以指定为其他数据库
//
//profile := new(Profile)
//
//profile.Age = 30
//
//user:=new(User)
//
//user.Profile=profile
//
//user.Name="beego"
//
//fmt.Println(o.Insert(profile))
//fmt.Println(o.Insert(user))
defer func() {
if err := recover(); err != nil {
fmt.Println(err)
}
}()
//user := new(User)
//user.Name = "beego"
//通过指定的字段进行查询
//o.Read(user,"Name")
//user := User{Name: "beego222"}
//user.Profile=&Profile{Age:99}
//if created, id, erro := o.ReadOrCreate(&user, "Name"); erro == nil {
//
// if created {
// fmt.Println("New Insert an object.Id:",id)
// } else {
// fmt.Println("Get an object.Id:",id)
// }
//}else {
// fmt.Println(erro)
//}
//o.Read(user.Profile)
//err:=o.Read(user.Profile.User)
//if err ==orm.ErrNoRows{
// fmt.Println("查询不到")
//}else if (err==orm.ErrMissPK) {
// fmt.Println("找不到主键")
//} else {
//
//}
//json, _ := json.Marshal(user)
//fmt.Println(string(json))
//users := []User{{Name: "xxxxxx",Profile: &Profile{Age: 32,Id:12}}, {Name: "WWWWWWWWWWWWW",Profile:&Profile{Age:89,Id:13}}}
//id,err:=o.InsertMulti(100, users)
//fmt.Println(id,"========",err)
user := new(User)
var profile []*User
//o.QueryTable(user).Filter("profile__age__in", 20,30,32).Exclude("profile__lt",2).All(&profile)
//
//json, _ := json.Marshal(profile)
//fmt.Println(string(json))
cond := orm.NewCondition()
cond.And("profile__isnull", false).AndNot("profile__age__in", 30)//.Or("profile__id__gt", 0)
//Limit(5,10) 5条数据,从索引值为10的地方开始查询5条数据
o.QueryTable(user).SetCond(cond).All(&profile)
for _,p := range (profile) {
o.Read(p.Profile)
}
json, _ := json.Marshal(profile)
fmt.Println(string(json))
}
|
package main
import (
"fmt"
)
// https://leetcode-cn.com/problems/4sum-ii/
// 454. 四数相加 II | 4Sum II
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
// Solution
//
// 复杂度分析:
// * 时间: O(N^2)
// * 空间: O(N^2)
func fourSumCount(A []int, B []int, C []int, D []int) int {
if len(A) == 0 {
return 0
}
ab := make(map[int]int, len(A)*len(B))
for _, a := range A {
for _, b := range B {
ab[a+b]++
}
}
res := 0
for _, c := range C {
for _, d := range D {
res += ab[-c-d]
}
}
return res
}
//------------------------------------------------------------------------------
// main
func main() {
cases := [][]int{
{},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(c)
}
}
|
package kubemq_queue
import (
"context"
"net"
"strconv"
queuesStream "github.com/kubemq-io/kubemq-go/queues_stream"
"github.com/sirupsen/logrus"
"github.com/batchcorp/plumber-schemas/build/go/protos/args"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber/types"
)
const (
DefaultReadTimeout = 10000 // Milliseconds
BackendName = "kubemq"
)
type KubeMQ struct {
connOpts *opts.ConnectionOptions
connArgs *args.KubeMQQueueConn
client *queuesStream.QueuesStreamClient
log *logrus.Entry
}
func New(opts *opts.ConnectionOptions) (*KubeMQ, error) {
args := opts.GetKubemqQueue()
host, portStr, err := net.SplitHostPort(args.Address)
if err != nil {
return nil, err
}
port, err := strconv.Atoi(portStr)
if err != nil {
return nil, err
}
var client *queuesStream.QueuesStreamClient
if args.TlsClientCert == "" {
client, err = queuesStream.NewQueuesStreamClient(context.Background(),
queuesStream.WithAddress(host, port),
queuesStream.WithClientId(args.ClientId),
queuesStream.WithAuthToken(args.AuthToken))
} else {
client, err = queuesStream.NewQueuesStreamClient(context.Background(),
queuesStream.WithAddress(host, port),
queuesStream.WithClientId(args.ClientId),
queuesStream.WithCredentials(args.TlsClientCert, ""),
queuesStream.WithAuthToken(args.AuthToken))
}
if err != nil {
}
return &KubeMQ{
connOpts: opts,
connArgs: opts.GetKubemqQueue(),
client: client,
log: logrus.WithField("backend", BackendName),
}, nil
}
func (k *KubeMQ) Name() string {
return BackendName
}
func (k *KubeMQ) Close(_ context.Context) error {
return k.client.Close()
}
func (k *KubeMQ) Test(_ context.Context) error {
return types.NotImplementedErr
}
|
package lang
import "fmt"
// Month just a literature form
type Month int
func outputMonth(m Month) {
fmt.Println(m)
}
|
package perfect
import (
"errors"
)
type Classification int
var ErrOnlyPositive = errors.New("Only positive number are allowed")
const (
ClassificationPerfect Classification = iota
ClassificationAbundant
ClassificationDeficient
)
func Classify(n int64) (Classification, error) {
sumOfDivisors, err := divisorSum(n)
if err != nil {
return ClassificationAbundant, err
}
if sumOfDivisors == 2 * n {
return ClassificationPerfect, nil
} else if sumOfDivisors < 2 * n {
return ClassificationDeficient, nil
}
return ClassificationAbundant, nil
}
func divisorSum(n int64) (int64, error) {
if n <= 0 {
return -1, ErrOnlyPositive
}
sumOfDivisors := int64(0)
d := int64(1)
for ; d * d < n; d++ {
if n % d == 0 {
sumOfDivisors += d + n / d
}
}
if d * d == n {
sumOfDivisors += d
}
return sumOfDivisors, nil
}
|
package main
import (
"context"
"fmt"
"log"
"sync"
"concurrency"
"golang.org/x/sync/errgroup"
)
func main() {
requests := concurrency.GenerateRequests(concurrency.Count)
DoAsync(context.TODO(), requests)
}
func DoAsync(ctx context.Context, requests [][]byte) {
totalWorkers := concurrency.TotalWorkers
// chan buffer should be tuned to the value when channels are not exhausted
// and workers are not waiting for the input:
respChan := make(chan string, totalWorkers)
var resultsWG sync.WaitGroup
resultsWG.Add(1)
go getResults(respChan, &resultsWG)
g, ctx := errgroup.WithContext(ctx) // use `errgroup.Group` literal if you don't need to cancel context on the first error
g.SetLimit(totalWorkers)
for i, req := range requests {
i, req := i, req // https://github.com/golang/go/wiki/CommonMistakes/#using-goroutines-on-loop-iterator-variables
if ctx.Err() != nil {
break
}
log.Printf("sending request #%d", i)
g.Go(func() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("recovered panic: %s", r)
}
}()
// if i == 42 {
// return errors.New("error-42")
// }
Work(ctx, i, req, respChan)
return nil
})
}
if err := g.Wait(); err != nil { // blocking
fmt.Println("worker error:", err)
}
close(respChan)
resultsWG.Wait() // blocking
}
func Work(ctx context.Context, id int, req []byte, respChan chan<- string) {
// if id == 42 {
// panic("42")
// }
s := concurrency.Md5sum(req)
select {
case respChan <- s:
log.Printf("worker #%d: send: %s\n", id, s)
case <-ctx.Done():
log.Printf("worker #%d: cancelling\n", id)
return
}
}
func getResults(respChan <-chan string, wg *sync.WaitGroup) {
batchSize := concurrency.Count
res := make([]string, 0, batchSize)
for {
s, ok := <-respChan
if !ok {
break
}
log.Println("getResults: got from workers:", s)
res = append(res, s)
if len(res) == batchSize {
log.Println("results:", res) // or write to DB...
res = make([]string, 0, batchSize)
}
}
if len(res) != 0 {
log.Println("final results:", res)
}
// all results are saved
wg.Done()
}
|
package arguments
type CommandOpts struct {
recursivelyOpt bool
nameBeginWithADotOpt bool
longFormatOpt bool
sortOpt bool
reverseArrayOpt bool
}
var Options *CommandOpts
func (opts *CommandOpts) RecursivelyOpt() bool {
return opts.recursivelyOpt
}
func (opts *CommandOpts) NameBeginWithADotOpt() bool {
return opts.nameBeginWithADotOpt
}
func (opts *CommandOpts) LongFormatOpt() bool {
return opts.longFormatOpt
}
func (opts *CommandOpts) SortOpt() bool {
return opts.sortOpt
}
func (opts *CommandOpts) ReverseArrayOpt() bool {
return opts.reverseArrayOpt
}
func GetFlags(R *bool, a *bool, l *bool, S *bool, r *bool) *CommandOpts {
Options = &CommandOpts{*R, *a, *l, *S, *r}
return Options
}
|
package handlers
import (
"net/http"
"github.com/pilagod/gorm-cursor-paginator/v2/paginator"
"github.com/root-gg/plik/server/common"
"github.com/root-gg/plik/server/context"
)
// GetUsers return users
func GetUsers(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {
// Double check authorization
if !ctx.IsAdmin() {
ctx.Forbidden("you need administrator privileges")
return
}
pagingQuery := ctx.GetPagingQuery()
// Get uploads
users, cursor, err := ctx.GetMetadataBackend().GetUsers("", false, pagingQuery)
if err != nil {
ctx.InternalServerError("unable to get users : %s", err)
return
}
pagingResponse := common.NewPagingResponse(users, cursor)
common.WriteJSONResponse(resp, pagingResponse)
}
// GetUploads return uploads
func GetUploads(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {
// Double check authorization
if !ctx.IsAdmin() {
ctx.Forbidden("you need administrator privileges")
return
}
pagingQuery := ctx.GetPagingQuery()
user := req.URL.Query().Get("user")
token := req.URL.Query().Get("token")
sort := req.URL.Query().Get("sort")
var uploads []*common.Upload
var cursor *paginator.Cursor
var err error
if sort == "size" {
// Get uploads
uploads, cursor, err = ctx.GetMetadataBackend().GetUploadsSortedBySize(user, token, true, pagingQuery)
if err != nil {
ctx.InternalServerError("unable to get uploads : %s", err)
return
}
} else {
// Get uploads
uploads, cursor, err = ctx.GetMetadataBackend().GetUploads(user, token, true, pagingQuery)
if err != nil {
ctx.InternalServerError("unable to get uploads : %s", err)
return
}
}
pagingResponse := common.NewPagingResponse(uploads, cursor)
common.WriteJSONResponse(resp, pagingResponse)
}
// GetServerStatistics return the server statistics
func GetServerStatistics(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {
// Double check authorization
if !ctx.IsAdmin() {
ctx.Forbidden("you need administrator privileges")
return
}
// Get server statistics
stats, err := ctx.GetMetadataBackend().GetServerStatistics()
if err != nil {
ctx.InternalServerError("unable to get server statistics : %s", err)
return
}
common.WriteJSONResponse(resp, stats)
}
|
package jarviscore
import (
"io/ioutil"
"testing"
)
func TestCtrlScriptFile(t *testing.T) {
ctrl := &CtrlScriptFile{}
dat, err := ioutil.ReadFile("./test/test.sh")
if err != nil {
t.Fatalf("TestCtrlScriptFile load script file %v", err)
}
ci, err := BuildCtrlInfoForScriptFile("test.sh", dat, "", "")
if err != nil {
t.Fatalf("TestCtrlScriptFile BuildCtrlInfoForScriptFile %v", err)
}
ret, err := ctrl.runScript(nil, nil, ci)
if err != nil {
t.Fatalf("TestCtrlScriptFile Run %v", err)
}
t.Logf("TestCtrlScriptFile result is %v", string(ret))
}
|
package graph
import (
"github.com/zond/godip/common"
"reflect"
"testing"
)
func assertPath(t *testing.T, g *Graph, src, dst common.Province, found []common.Province) {
if f := g.Path(src, dst, nil, false); !reflect.DeepEqual(f, found) {
t.Errorf("%v should have a path between %v and %v like %v but found %v", g, src, dst, found, f)
}
}
func TestPath(t *testing.T) {
g := New().
Prov("a").Conn("f").Conn("h").
Prov("b").Conn("g").Conn("c").
Prov("c").Conn("b").Conn("h").Conn("d").Conn("i").
Prov("d").Conn("c").Conn("h").Conn("e").
Prov("e").Conn("d").Conn("g").Conn("f").
Prov("f").Conn("a").Conn("e").
Prov("g").Conn("b").Conn("h").Conn("e").
Prov("h").Conn("a").Conn("c").Conn("d").Conn("g").
Prov("i").Conn("c").
Done()
assertPath(t, g, "a", "e", []common.Province{"f", "e"})
assertPath(t, g, "a", "d", []common.Province{"h", "d"})
assertPath(t, g, "a", "i", []common.Province{"h", "c", "i"})
}
|
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.4.1.dev1
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
package swagger
// mail schema
type PostCharactersCharacterIdMailMail struct {
// approved_cost integer
ApprovedCost int64 `json:"approved_cost,omitempty"`
// body string
Body string `json:"body,omitempty"`
// recipients array
Recipients []PostCharactersCharacterIdMailRecipient `json:"recipients,omitempty"`
// subject string
Subject string `json:"subject,omitempty"`
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mydump_test
import (
"bytes"
"compress/gzip"
"context"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/pingcap/tidb/br/pkg/lightning/common"
"github.com/pingcap/tidb/br/pkg/lightning/config"
"github.com/pingcap/tidb/br/pkg/lightning/log"
md "github.com/pingcap/tidb/br/pkg/lightning/mydump"
"github.com/pingcap/tidb/br/pkg/storage"
filter "github.com/pingcap/tidb/util/table-filter"
router "github.com/pingcap/tidb/util/table-router"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
type testMydumpLoaderSuite struct {
cfg *config.Config
sourceDir string
}
func newConfigWithSourceDir(sourceDir string) *config.Config {
path, _ := filepath.Abs(sourceDir)
return &config.Config{
Mydumper: config.MydumperRuntime{
SourceDir: "file://" + filepath.ToSlash(path),
Filter: []string{"*.*"},
DefaultFileRules: true,
},
}
}
func newTestMydumpLoaderSuite(t *testing.T) *testMydumpLoaderSuite {
var s testMydumpLoaderSuite
var err error
s.sourceDir = t.TempDir()
require.Nil(t, err)
s.cfg = newConfigWithSourceDir(s.sourceDir)
return &s
}
func (s *testMydumpLoaderSuite) touch(t *testing.T, filename ...string) {
components := make([]string, len(filename)+1)
components = append(components, s.sourceDir)
components = append(components, filename...)
path := filepath.Join(components...)
err := os.WriteFile(path, nil, 0o644)
require.Nil(t, err)
}
func (s *testMydumpLoaderSuite) mkdir(t *testing.T, dirname string) {
path := filepath.Join(s.sourceDir, dirname)
err := os.Mkdir(path, 0o755)
require.Nil(t, err)
}
func TestLoader(t *testing.T) {
ctx := context.Background()
cfg := newConfigWithSourceDir("./not-exists")
_, err := md.NewMyDumpLoader(ctx, cfg)
// will check schema in tidb and data file later in DataCheck.
require.NoError(t, err)
cfg = newConfigWithSourceDir("./examples")
mdl, err := md.NewMyDumpLoader(ctx, cfg)
require.NoError(t, err)
dbMetas := mdl.GetDatabases()
require.Len(t, dbMetas, 1)
dbMeta := dbMetas[0]
require.Equal(t, "mocker_test", dbMeta.Name)
require.Len(t, dbMeta.Tables, 4)
expected := []struct {
name string
dataFiles int
}{
{name: "i", dataFiles: 1},
{name: "report_case_high_risk", dataFiles: 1},
{name: "tbl_multi_index", dataFiles: 1},
{name: "tbl_autoid", dataFiles: 1},
}
for i, table := range expected {
assert.Equal(t, table.name, dbMeta.Tables[i].Name)
assert.Equal(t, table.dataFiles, len(dbMeta.Tables[i].DataFiles))
}
}
func TestEmptyDB(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
// will check schema in tidb and data file later in DataCheck.
require.NoError(t, err)
}
func TestDuplicatedDB(t *testing.T) {
/*
Path/
a/
db-schema-create.sql
b/
db-schema-create.sql
*/
s := newTestMydumpLoaderSuite(t)
s.mkdir(t, "a")
s.touch(t, "a", "db-schema-create.sql")
s.mkdir(t, "b")
s.touch(t, "b", "db-schema-create.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.Regexp(t, `invalid database schema file, duplicated item - .*[/\\]db-schema-create\.sql`, err)
}
func TestTableNoHostDB(t *testing.T) {
/*
Path/
notdb-schema-create.sql
db.tbl-schema.sql
*/
s := newTestMydumpLoaderSuite(t)
dir := s.sourceDir
err := os.WriteFile(filepath.Join(dir, "notdb-schema-create.sql"), nil, 0o644)
require.NoError(t, err)
err = os.WriteFile(filepath.Join(dir, "db.tbl-schema.sql"), nil, 0o644)
require.NoError(t, err)
_, err = md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
}
func TestDuplicatedTable(t *testing.T) {
/*
Path/
db-schema-create.sql
a/
db.tbl-schema.sql
b/
db.tbl-schema.sql
*/
s := newTestMydumpLoaderSuite(t)
s.touch(t, "db-schema-create.sql")
s.mkdir(t, "a")
s.touch(t, "a", "db.tbl-schema.sql")
s.mkdir(t, "b")
s.touch(t, "b", "db.tbl-schema.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.Regexp(t, `invalid table schema file, duplicated item - .*db\.tbl-schema\.sql`, err)
}
func TestTableInfoNotFound(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.cfg.Mydumper.CharacterSet = "auto"
s.touch(t, "db-schema-create.sql")
s.touch(t, "db.tbl-schema.sql")
ctx := context.Background()
store, err := storage.NewLocalStorage(s.sourceDir)
require.NoError(t, err)
loader, err := md.NewMyDumpLoader(ctx, s.cfg)
require.NoError(t, err)
for _, dbMeta := range loader.GetDatabases() {
logger, buffer := log.MakeTestLogger()
logCtx := log.NewContext(ctx, logger)
dbSQL := dbMeta.GetSchema(logCtx, store)
require.Equal(t, "CREATE DATABASE IF NOT EXISTS `db`", dbSQL)
for _, tblMeta := range dbMeta.Tables {
sql, err := tblMeta.GetSchema(logCtx, store)
require.Equal(t, "", sql)
require.NoError(t, err)
}
require.NotContains(t, buffer.Stripped(), "failed to extract table schema")
}
}
func TestTableUnexpectedError(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.touch(t, "db-schema-create.sql")
s.touch(t, "db.tbl-schema.sql")
ctx := context.Background()
store, err := storage.NewLocalStorage(s.sourceDir)
require.NoError(t, err)
loader, err := md.NewMyDumpLoader(ctx, s.cfg)
require.NoError(t, err)
for _, dbMeta := range loader.GetDatabases() {
for _, tblMeta := range dbMeta.Tables {
sql, err := tblMeta.GetSchema(ctx, store)
require.Equal(t, "", sql)
require.Contains(t, err.Error(), "failed to decode db.tbl-schema.sql as : Unsupported encoding ")
}
}
}
func TestMissingTableSchema(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.cfg.Mydumper.CharacterSet = "auto"
s.touch(t, "db.tbl.csv")
ctx := context.Background()
store, err := storage.NewLocalStorage(s.sourceDir)
require.NoError(t, err)
loader, err := md.NewMyDumpLoader(ctx, s.cfg)
require.NoError(t, err)
for _, dbMeta := range loader.GetDatabases() {
for _, tblMeta := range dbMeta.Tables {
_, err := tblMeta.GetSchema(ctx, store)
require.ErrorContains(t, err, "schema file is missing for the table")
}
}
}
func TestDataNoHostDB(t *testing.T) {
/*
Path/
notdb-schema-create.sql
db.tbl.sql
*/
s := newTestMydumpLoaderSuite(t)
s.touch(t, "notdb-schema-create.sql")
s.touch(t, "db.tbl.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
// will check schema in tidb and data file later in DataCheck.
require.NoError(t, err)
}
func TestDataNoHostTable(t *testing.T) {
/*
Path/
db-schema-create.sql
db.tbl.sql
*/
s := newTestMydumpLoaderSuite(t)
s.touch(t, "db-schema-create.sql")
s.touch(t, "db.tbl.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
// will check schema in tidb and data file later in DataCheck.
require.NoError(t, err)
}
func TestViewNoHostDB(t *testing.T) {
/*
Path/
notdb-schema-create.sql
db.tbl-schema-view.sql
*/
s := newTestMydumpLoaderSuite(t)
s.touch(t, "notdb-schema-create.sql")
s.touch(t, "db.tbl-schema-view.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.Contains(t, err.Error(), `invalid view schema file, miss host table schema for view 'tbl'`)
}
func TestViewNoHostTable(t *testing.T) {
/*
Path/
db-schema-create.sql
db.tbl-schema-view.sql
*/
s := newTestMydumpLoaderSuite(t)
s.touch(t, "db-schema-create.sql")
s.touch(t, "db.tbl-schema-view.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.Contains(t, err.Error(), `invalid view schema file, miss host table schema for view 'tbl'`)
}
func TestDataWithoutSchema(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
dir := s.sourceDir
p := filepath.Join(dir, "db.tbl.sql")
err := os.WriteFile(p, nil, 0o644)
require.NoError(t, err)
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
require.Equal(t, []*md.MDDatabaseMeta{{
Name: "db",
SchemaFile: md.FileInfo{
TableName: filter.Table{
Schema: "db",
Name: "",
},
FileMeta: md.SourceFileMeta{Type: md.SourceTypeSchemaSchema},
},
Tables: []*md.MDTableMeta{{
DB: "db",
Name: "tbl",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: "tbl"}},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "db", Name: "tbl"}, FileMeta: md.SourceFileMeta{Path: "db.tbl.sql", Type: md.SourceTypeSQL}}},
IsRowOrdered: true,
IndexRatio: 0.0,
}},
}}, mdl.GetDatabases())
}
func TestTablesWithDots(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.touch(t, "db-schema-create.sql")
s.touch(t, "db.tbl.with.dots-schema.sql")
s.touch(t, "db.tbl.with.dots.0001.sql")
s.touch(t, "db.0002-schema.sql")
s.touch(t, "db.0002.sql")
// insert some tables with file name structures which we're going to ignore.
s.touch(t, "db.v-schema-trigger.sql")
s.touch(t, "db.v-schema-post.sql")
s.touch(t, "db.sql")
s.touch(t, "db-schema.sql")
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
require.Equal(t, []*md.MDDatabaseMeta{{
Name: "db",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: ""}, FileMeta: md.SourceFileMeta{Path: "db-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "db",
Name: "0002",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: "0002"}, FileMeta: md.SourceFileMeta{Path: "db.0002-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "db", Name: "0002"}, FileMeta: md.SourceFileMeta{Path: "db.0002.sql", Type: md.SourceTypeSQL}}},
IsRowOrdered: true,
IndexRatio: 0.0,
},
{
DB: "db",
Name: "tbl.with.dots",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: "tbl.with.dots"}, FileMeta: md.SourceFileMeta{Path: "db.tbl.with.dots-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "db", Name: "tbl.with.dots"}, FileMeta: md.SourceFileMeta{Path: "db.tbl.with.dots.0001.sql", Type: md.SourceTypeSQL, SortKey: "0001"}}},
IsRowOrdered: true,
IndexRatio: 0.0,
},
},
}}, mdl.GetDatabases())
}
func TestRouter(t *testing.T) {
// route db and table but with some table not hit rules
{
s := newTestMydumpLoaderSuite(t)
s.cfg.Routes = []*router.TableRule{
{
SchemaPattern: "a*",
TablePattern: "t*",
TargetSchema: "b",
TargetTable: "u",
},
}
s.touch(t, "a0-schema-create.sql")
s.touch(t, "a0.t0-schema.sql")
s.touch(t, "a0.t0.1.sql")
s.touch(t, "a0.t1-schema.sql")
s.touch(t, "a0.t1.1.sql")
s.touch(t, "a1-schema-create.sql")
s.touch(t, "a1.s1-schema.sql")
s.touch(t, "a1.s1.1.sql")
s.touch(t, "a1.t2-schema.sql")
s.touch(t, "a1.t2.1.sql")
s.touch(t, "a1.v1-schema.sql")
s.touch(t, "a1.v1-schema-view.sql")
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
dbs := mdl.GetDatabases()
// hit rules: a0.t0 -> b.u, a0.t1 -> b.0, a1.t2 -> b.u
// not hit: a1.s1, a1.v1
expectedDBS := []*md.MDDatabaseMeta{
{
Name: "a0",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a0", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a0-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
},
{
Name: "a1",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a1-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "a1",
Name: "s1",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "s1"}, FileMeta: md.SourceFileMeta{Path: "a1.s1-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "a1", Name: "s1"}, FileMeta: md.SourceFileMeta{Path: "a1.s1.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}},
IndexRatio: 0.0,
IsRowOrdered: true,
},
{
DB: "a1",
Name: "v1",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "v1"}, FileMeta: md.SourceFileMeta{Path: "a1.v1-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
Views: []*md.MDTableMeta{
{
DB: "a1",
Name: "v1",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "a1", Name: "v1"}, FileMeta: md.SourceFileMeta{Path: "a1.v1-schema-view.sql", Type: md.SourceTypeViewSchema}},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
{
Name: "b",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "b", Name: ""}, FileMeta: md.SourceFileMeta{Path: "a0-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "b",
Name: "u",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t0-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{
{TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t0.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}},
{TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a0.t1.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}},
{TableName: filter.Table{Schema: "b", Name: "u"}, FileMeta: md.SourceFileMeta{Path: "a1.t2.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}},
},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
}
require.Equal(t, expectedDBS, dbs)
}
// only route schema but with some db not hit rules
{
s := newTestMydumpLoaderSuite(t)
s.cfg.Routes = []*router.TableRule{
{
SchemaPattern: "c*",
TargetSchema: "c",
},
}
s.touch(t, "c0-schema-create.sql")
s.touch(t, "c0.t3-schema.sql")
s.touch(t, "c0.t3.1.sql")
s.touch(t, "d0-schema-create.sql")
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
dbs := mdl.GetDatabases()
// hit rules: c0.t3 -> c.t3
// not hit: d0
expectedDBS := []*md.MDDatabaseMeta{
{
Name: "d0",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "d0", Name: ""}, FileMeta: md.SourceFileMeta{Path: "d0-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
},
{
Name: "c",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "c", Name: ""}, FileMeta: md.SourceFileMeta{Path: "c0-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "c",
Name: "t3",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "c", Name: "t3"}, FileMeta: md.SourceFileMeta{Path: "c0.t3-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "c", Name: "t3"}, FileMeta: md.SourceFileMeta{Path: "c0.t3.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
}
require.Equal(t, expectedDBS, dbs)
}
// route schema and table but not have table data
{
s := newTestMydumpLoaderSuite(t)
s.cfg.Routes = []*router.TableRule{
{
SchemaPattern: "e*",
TablePattern: "f*",
TargetSchema: "v",
TargetTable: "vv",
},
}
s.touch(t, "e0-schema-create.sql")
s.touch(t, "e0.f0-schema.sql")
s.touch(t, "e0.f0-schema-view.sql")
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
dbs := mdl.GetDatabases()
// hit rules: e0.f0 -> v.vv
expectedDBS := []*md.MDDatabaseMeta{
{
Name: "e0",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "e0", Name: ""}, FileMeta: md.SourceFileMeta{Path: "e0-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
},
{
Name: "v",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: ""}, FileMeta: md.SourceFileMeta{Path: "e0-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "v",
Name: "vv",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: "vv"}, FileMeta: md.SourceFileMeta{Path: "e0.f0-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
Views: []*md.MDTableMeta{
{
DB: "v",
Name: "vv",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "v", Name: "vv"}, FileMeta: md.SourceFileMeta{Path: "e0.f0-schema-view.sql", Type: md.SourceTypeViewSchema}},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
}
require.Equal(t, expectedDBS, dbs)
}
// route by regex
{
s := newTestMydumpLoaderSuite(t)
s.cfg.Routes = []*router.TableRule{
{
SchemaPattern: "~.*regexpr[1-9]+",
TablePattern: "~.*regexprtable",
TargetSchema: "downstream_db",
TargetTable: "downstream_table",
},
{
SchemaPattern: "~.bdb.*",
TargetSchema: "db",
},
}
s.touch(t, "test_regexpr1-schema-create.sql")
s.touch(t, "test_regexpr1.test_regexprtable-schema.sql")
s.touch(t, "test_regexpr1.test_regexprtable.1.sql")
s.touch(t, "zbdb-schema-create.sql")
s.touch(t, "zbdb.table-schema.sql")
s.touch(t, "zbdb.table.1.sql")
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
dbs := mdl.GetDatabases()
// hit rules: test_regexpr1.test_regexprtable -> downstream_db.downstream_table, zbdb.table -> db.table
expectedDBS := []*md.MDDatabaseMeta{
{
Name: "test_regexpr1",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "test_regexpr1", Name: ""}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
},
{
Name: "db",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: ""}, FileMeta: md.SourceFileMeta{Path: "zbdb-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "db",
Name: "table",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: md.SourceFileMeta{Path: "zbdb.table-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "db", Name: "table"}, FileMeta: md.SourceFileMeta{Path: "zbdb.table.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
{
Name: "downstream_db",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "downstream_db", Name: ""}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "downstream_db",
Name: "downstream_table",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "downstream_db", Name: "downstream_table"}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1.test_regexprtable-schema.sql", Type: md.SourceTypeTableSchema}},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "downstream_db", Name: "downstream_table"}, FileMeta: md.SourceFileMeta{Path: "test_regexpr1.test_regexprtable.1.sql", Type: md.SourceTypeSQL, SortKey: "1"}}},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
}
require.Equal(t, expectedDBS, dbs)
}
// only route db and only route some tables
{
s := newTestMydumpLoaderSuite(t)
s.cfg.Routes = []*router.TableRule{
// only route schema
{
SchemaPattern: "web",
TargetSchema: "web_test",
},
// only route one table
{
SchemaPattern: "x",
TablePattern: "t1*",
TargetSchema: "x2",
TargetTable: "t",
},
}
s.touch(t, "web-schema-create.sql")
s.touch(t, "x-schema-create.sql")
s.touch(t, "x.t10-schema.sql") // hit rules, new name is x2.t
s.touch(t, "x.t20-schema.sql") // not hit rules, name is x.t20
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
dbs := mdl.GetDatabases()
// hit rules: web -> web_test, x.t10 -> x2.t
// not hit: x.t20
expectedDBS := []*md.MDDatabaseMeta{
{
Name: "x",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x", Name: ""}, FileMeta: md.SourceFileMeta{Path: "x-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "x",
Name: "t20",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x", Name: "t20"}, FileMeta: md.SourceFileMeta{Path: "x.t20-schema.sql", Type: md.SourceTypeTableSchema}},
IndexRatio: 0.0,
IsRowOrdered: true,
DataFiles: []md.FileInfo{},
},
},
},
{
Name: "web_test",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "web_test", Name: ""}, FileMeta: md.SourceFileMeta{Path: "web-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
},
{
Name: "x2",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x2", Name: ""}, FileMeta: md.SourceFileMeta{Path: "x-schema-create.sql", Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "x2",
Name: "t",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "x2", Name: "t"}, FileMeta: md.SourceFileMeta{Path: "x.t10-schema.sql", Type: md.SourceTypeTableSchema}},
IndexRatio: 0.0,
IsRowOrdered: true,
DataFiles: []md.FileInfo{},
},
},
},
}
require.Equal(t, expectedDBS, dbs)
}
}
func TestRoutesPanic(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.cfg.Routes = []*router.TableRule{
{
SchemaPattern: "test1",
TargetSchema: "test",
},
}
s.touch(t, "test1.dump_test.001.sql")
s.touch(t, "test1.dump_test.002.sql")
s.touch(t, "test1.dump_test.003.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
}
func TestBadRouterRule(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.cfg.Routes = []*router.TableRule{{
SchemaPattern: "a*b",
TargetSchema: "ab",
}}
s.touch(t, "a1b-schema-create.sql")
_, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.Regexp(t, `.*pattern a\*b not valid`, err.Error())
}
func TestFileRouting(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.cfg.Mydumper.DefaultFileRules = false
s.cfg.Mydumper.FileRouters = []*config.FileRouteRule{
{
Pattern: `(?i)^(?:[^./]*/)*([a-z0-9_]+)/schema\.sql$`,
Schema: "$1",
Type: "schema-schema",
},
{
Pattern: `(?i)^(?:[^./]*/)*([a-z0-9]+)/([a-z0-9_]+)-table\.sql$`,
Schema: "$1",
Table: "$2",
Type: "table-schema",
},
{
Pattern: `(?i)^(?:[^./]*/)*([a-z0-9]+)/([a-z0-9_]+)-view\.sql$`,
Schema: "$1",
Table: "$2",
Type: "view-schema",
},
{
Pattern: `(?i)^(?:[^./]*/)*([a-z][a-z0-9_]*)/([a-z]+)[0-9]*(?:\.([0-9]+))?\.(sql|csv)$`,
Schema: "$1",
Table: "$2",
Type: "$4",
},
{
Pattern: `^(?:[^./]*/)*([a-z]+)(?:\.([0-9]+))?\.(sql|csv)$`,
Schema: "d2",
Table: "$1",
Type: "$3",
},
}
s.mkdir(t, "d1")
s.mkdir(t, "d2")
s.touch(t, "d1/schema.sql")
s.touch(t, "d1/test-table.sql")
s.touch(t, "d1/test0.sql")
s.touch(t, "d1/test1.sql")
s.touch(t, "d1/test2.001.sql")
s.touch(t, "d1/v1-table.sql")
s.touch(t, "d1/v1-view.sql")
s.touch(t, "d1/t1-schema-create.sql")
s.touch(t, "d2/schema.sql")
s.touch(t, "d2/abc-table.sql")
s.touch(t, "abc.1.sql")
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
require.Equal(t, []*md.MDDatabaseMeta{
{
Name: "d1",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "d1", Name: ""}, FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d1/schema.sql"), Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "d1",
Name: "test",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "d1", Name: "test"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d1/test-table.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{
{
TableName: filter.Table{Schema: "d1", Name: "test"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d1/test0.sql"), Type: md.SourceTypeSQL},
},
{
TableName: filter.Table{Schema: "d1", Name: "test"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d1/test1.sql"), Type: md.SourceTypeSQL},
},
{
TableName: filter.Table{Schema: "d1", Name: "test"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d1/test2.001.sql"), Type: md.SourceTypeSQL},
},
},
IndexRatio: 0.0,
IsRowOrdered: true,
},
{
DB: "d1",
Name: "v1",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "d1", Name: "v1"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d1/v1-table.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
Views: []*md.MDTableMeta{
{
DB: "d1",
Name: "v1",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "d1", Name: "v1"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d1/v1-view.sql"), Type: md.SourceTypeViewSchema},
},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
{
Name: "d2",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: "d2", Name: ""}, FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d2/schema.sql"), Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "d2",
Name: "abc",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "d2", Name: "abc"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("d2/abc-table.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "d2", Name: "abc"}, FileMeta: md.SourceFileMeta{Path: "abc.1.sql", Type: md.SourceTypeSQL}}},
IndexRatio: 0.0,
IsRowOrdered: true,
},
},
},
}, mdl.GetDatabases())
}
func TestInputWithSpecialChars(t *testing.T) {
/*
Path/
test-schema-create.sql
test.t%22-schema.sql
test.t%22.0.sql
test.t%2522-schema.sql
test.t%2522.0.csv
test.t%gg-schema.sql
test.t%gg.csv
test.t+gg-schema.sql
test.t+gg.csv
db%22.t%2522-schema.sql
db%22.t%2522.0.csv
*/
s := newTestMydumpLoaderSuite(t)
s.touch(t, "test-schema-create.sql")
s.touch(t, "test.t%22-schema.sql")
s.touch(t, "test.t%22.sql")
s.touch(t, "test.t%2522-schema.sql")
s.touch(t, "test.t%2522.csv")
s.touch(t, "test.t%gg-schema.sql")
s.touch(t, "test.t%gg.csv")
s.touch(t, "test.t+gg-schema.sql")
s.touch(t, "test.t+gg.csv")
s.touch(t, "db%22-schema-create.sql")
s.touch(t, "db%22.t%2522-schema.sql")
s.touch(t, "db%22.t%2522.0.csv")
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
require.Equal(t, []*md.MDDatabaseMeta{
{
Name: `db"`,
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: `db"`, Name: ""}, FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("db%22-schema-create.sql"), Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: `db"`,
Name: "t%22",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: `db"`, Name: "t%22"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("db%22.t%2522-schema.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{
{
TableName: filter.Table{Schema: `db"`, Name: "t%22"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("db%22.t%2522.0.csv"), Type: md.SourceTypeCSV, SortKey: "0"},
},
},
IndexRatio: 0,
IsRowOrdered: true,
},
},
},
{
Name: "test",
SchemaFile: md.FileInfo{TableName: filter.Table{Schema: `test`, Name: ""}, FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("test-schema-create.sql"), Type: md.SourceTypeSchemaSchema}},
Tables: []*md.MDTableMeta{
{
DB: "test",
Name: `t"`,
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "test", Name: `t"`},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("test.t%22-schema.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "test", Name: `t"`}, FileMeta: md.SourceFileMeta{Path: "test.t%22.sql", Type: md.SourceTypeSQL}}},
IndexRatio: 0,
IsRowOrdered: true,
},
{
DB: "test",
Name: "t%22",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "test", Name: "t%22"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("test.t%2522-schema.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "test", Name: "t%22"}, FileMeta: md.SourceFileMeta{Path: "test.t%2522.csv", Type: md.SourceTypeCSV}}},
IndexRatio: 0,
IsRowOrdered: true,
},
{
DB: "test",
Name: "t%gg",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "test", Name: "t%gg"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("test.t%gg-schema.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "test", Name: "t%gg"}, FileMeta: md.SourceFileMeta{Path: "test.t%gg.csv", Type: md.SourceTypeCSV}}},
IndexRatio: 0,
IsRowOrdered: true,
},
{
DB: "test",
Name: "t+gg",
SchemaFile: md.FileInfo{
TableName: filter.Table{Schema: "test", Name: "t+gg"},
FileMeta: md.SourceFileMeta{Path: filepath.FromSlash("test.t+gg-schema.sql"), Type: md.SourceTypeTableSchema},
},
DataFiles: []md.FileInfo{{TableName: filter.Table{Schema: "test", Name: "t+gg"}, FileMeta: md.SourceFileMeta{Path: "test.t+gg.csv", Type: md.SourceTypeCSV}}},
IndexRatio: 0,
IsRowOrdered: true,
},
},
},
}, mdl.GetDatabases())
}
func TestMaxScanFilesOption(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
memStore := storage.NewMemStorage()
require.NoError(t, memStore.WriteFile(ctx, "/test-src/db1.tbl1-schema.sql",
[]byte("CREATE TABLE db1.tbl1 ( id INTEGER, val VARCHAR(255) );"),
))
require.NoError(t, memStore.WriteFile(ctx, "/test-src/db1-schema-create.sql",
[]byte("CREATE DATABASE db1;"),
))
const dataFilesCount = 200
maxScanFilesCount := 500
for i := 0; i < dataFilesCount; i++ {
require.NoError(t, memStore.WriteFile(ctx, fmt.Sprintf("/test-src/db1.tbl1.%d.sql", i),
[]byte(fmt.Sprintf("INSERT INTO db1.tbl1 (id, val) VALUES (%d, 'aaa%d');", i, i)),
))
}
cfg := newConfigWithSourceDir("/test-src")
mdl, err := md.NewMyDumpLoaderWithStore(ctx, cfg, memStore)
require.NoError(t, err)
require.NotNil(t, mdl)
dbMetas := mdl.GetDatabases()
require.Equal(t, 1, len(dbMetas))
dbMeta := dbMetas[0]
require.Equal(t, 1, len(dbMeta.Tables))
tbl := dbMeta.Tables[0]
require.Equal(t, dataFilesCount, len(tbl.DataFiles))
mdl, err = md.NewMyDumpLoaderWithStore(ctx, cfg, memStore,
md.WithMaxScanFiles(maxScanFilesCount),
)
require.NoError(t, err)
require.NotNil(t, mdl)
dbMetas = mdl.GetDatabases()
require.Equal(t, 1, len(dbMetas))
dbMeta = dbMetas[0]
require.Equal(t, 1, len(dbMeta.Tables))
tbl = dbMeta.Tables[0]
require.Equal(t, dataFilesCount, len(tbl.DataFiles))
maxScanFilesCount = 100
mdl, err = md.NewMyDumpLoaderWithStore(ctx, cfg, memStore,
md.WithMaxScanFiles(maxScanFilesCount),
)
require.EqualError(t, err, common.ErrTooManySourceFiles.Error())
require.NotNil(t, mdl)
dbMetas = mdl.GetDatabases()
require.Equal(t, 1, len(dbMetas))
dbMeta = dbMetas[0]
require.Equal(t, 1, len(dbMeta.Tables))
tbl = dbMeta.Tables[0]
require.Equal(t, maxScanFilesCount-2, len(tbl.DataFiles))
}
func TestExternalDataRoutes(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
s.touch(t, "test_1-schema-create.sql")
s.touch(t, "test_1.t1-schema.sql")
s.touch(t, "test_1.t1.sql")
s.touch(t, "test_2-schema-create.sql")
s.touch(t, "test_2.t2-schema.sql")
s.touch(t, "test_2.t2.sql")
s.touch(t, "test_3-schema-create.sql")
s.touch(t, "test_3.t1-schema.sql")
s.touch(t, "test_3.t1.sql")
s.touch(t, "test_3.t3-schema.sql")
s.touch(t, "test_3.t3.sql")
s.cfg.Mydumper.SourceID = "mysql-01"
s.cfg.Routes = []*router.TableRule{
{
TableExtractor: &router.TableExtractor{
TargetColumn: "c_table",
TableRegexp: "t(.*)",
},
SchemaExtractor: &router.SchemaExtractor{
TargetColumn: "c_schema",
SchemaRegexp: "test_(.*)",
},
SourceExtractor: &router.SourceExtractor{
TargetColumn: "c_source",
SourceRegexp: "mysql-(.*)",
},
SchemaPattern: "test_*",
TablePattern: "t*",
TargetSchema: "test",
TargetTable: "t",
},
}
mdl, err := md.NewMyDumpLoader(context.Background(), s.cfg)
require.NoError(t, err)
var database *md.MDDatabaseMeta
for _, db := range mdl.GetDatabases() {
if db.Name == "test" {
require.Nil(t, database)
database = db
}
}
require.NotNil(t, database)
require.Len(t, database.Tables, 1)
require.Len(t, database.Tables[0].DataFiles, 4)
expectExtendCols := []string{"c_table", "c_schema", "c_source"}
expectedExtendVals := [][]string{
{"1", "1", "01"},
{"2", "2", "01"},
{"1", "3", "01"},
{"3", "3", "01"},
}
for i, fileInfo := range database.Tables[0].DataFiles {
require.Equal(t, expectExtendCols, fileInfo.FileMeta.ExtendData.Columns)
require.Equal(t, expectedExtendVals[i], fileInfo.FileMeta.ExtendData.Values)
}
}
func TestSampleFileCompressRatio(t *testing.T) {
s := newTestMydumpLoaderSuite(t)
store, err := storage.NewLocalStorage(s.sourceDir)
require.NoError(t, err)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
byteArray := make([]byte, 0, 4096)
bf := bytes.NewBuffer(byteArray)
compressWriter := gzip.NewWriter(bf)
csvData := []byte("aaaa\n")
for i := 0; i < 1000; i++ {
_, err = compressWriter.Write(csvData)
require.NoError(t, err)
}
err = compressWriter.Flush()
require.NoError(t, err)
fileName := "test_1.t1.csv.gz"
err = store.WriteFile(ctx, fileName, bf.Bytes())
require.NoError(t, err)
ratio, err := md.SampleFileCompressRatio(ctx, md.SourceFileMeta{
Path: fileName,
Compression: md.CompressionGZ,
}, store)
require.NoError(t, err)
require.InDelta(t, ratio, 5000.0/float64(bf.Len()), 1e-5)
}
|
package lib
import (
"github.com/mayflower/docker-ls/lib/connector"
)
func createConnector(cfg *Config) connector.Connector {
if cfg.basicAuth {
return connector.NewBasicAuthConnector(cfg)
}
return connector.NewTokenAuthConnector(cfg)
}
|
// Copyright 2019 TiKV Project Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mockid
import "sync/atomic"
// IDAllocator mocks IDAllocator and it is only used for test.
type IDAllocator struct {
base uint64
}
// NewIDAllocator creates a new IDAllocator.
func NewIDAllocator() *IDAllocator {
return &IDAllocator{base: 0}
}
// Alloc returns a new id.
func (alloc *IDAllocator) Alloc() (uint64, error) {
return atomic.AddUint64(&alloc.base, 1), nil
}
// Rebase implements the IDAllocator interface.
func (*IDAllocator) Rebase() error {
return nil
}
|
package main
import "fmt"
const d string = "Some Variable"
func main(){
const a = "ANAND"
const s = 10
fmt.Println(a)
fmt.Println(s)
fmt.Println(d)
}
|
package controller
import (
"net/http"
"github.com/corentindeboisset/golang-api/app/service"
"github.com/corentindeboisset/golang-api/app/repository"
)
// UserController handles routes related to users
type UserController struct {
ServiceContainer *service.Container
RepositoryContainer *repository.Container
}
// InitializeUserController returns an instance of a UserController
func InitializeUserController() (*UserController, error) {
serviceContainer, err := service.GetContainer()
if err != nil {
return nil, err
}
repositoryContainer, err := repository.GetContainer()
if err != nil {
return nil, err
}
return &UserController{ServiceContainer: serviceContainer, RepositoryContainer: repositoryContainer}, nil
}
func (ctrl UserController) UserPage(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("home"))
}
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmdutil
import (
"github.com/mattn/go-isatty"
"github.com/spencercjh/sshctx/internal/env"
"os"
"os/exec"
)
// isTerminal determines if given fd is a TTY.
func isTerminal(fd *os.File) bool {
return isatty.IsTerminal(fd.Fd())
}
// fzfInstalled determines if fzf(1) is in PATH.
func fzfInstalled() bool {
v, _ := exec.LookPath("fzf")
return v != ""
}
// UseFzf determines if we can do choosing with fzf.
func UseFzf(stdout *os.File) bool {
v := os.Getenv(env.FZFIgnore)
return v == "" && isTerminal(stdout) && fzfInstalled()
}
// UsePromptui determines if we can do choosing with promptui.
func UsePromptui(stdout *os.File) bool {
v := os.Getenv(env.FZFIgnore)
return isTerminal(stdout) && (!fzfInstalled() || v != "")
}
|
package initialisation
/*
構造体や配列の初期化に関するメモ
*/
type Struct1 struct {
x string
y []string
}
func GetStruct1() *Struct1 {
s1 := new(Struct1)
s1.x = "A"
s1.y = append(s1.y, "B")
s1.y = append(s1.y, "C")
return s1
}
|
package backtracking
import (
"strconv"
"strings"
)
func getPermutation2(n int, k int) string {
factorial := make(map[int]int, n+1)
sum := 1
factorial[0] = 1
for i := 1; i <= n; i++ {
sum *= i
factorial[i] = sum
}
numbers := []int{}
for i := 1; i <= n; i++ {
numbers = append(numbers, i)
}
// base 0
k--
var strs []string
for i := 1; i <= n; i++ {
index := k / factorial[n-i]
strs = append(strs, strconv.Itoa(numbers[index]))
numbers = append(numbers[:index], numbers[index+1:]...)
k -= index * factorial[n-i]
}
return strings.Join(strs, "")
}
// =====================================
func getPermutation(n int, k int) string {
var res string
var visited = make([]int, n)
help60(n, factorial(n-1), k, &visited, &res)
return res
}
func help60(n int, f int, k int, visited *[]int, res *string) {
// 判断是第几组
offset := k % f
groupIndex := k / f
if offset > 0 {
groupIndex += 1
}
var i int
for ; i < len(*visited) && groupIndex > 0; i++ {
if (*visited)[i] == 0 {
groupIndex--
}
}
// 标记已使用
(*visited)[i-1] = 1
if n-1 > 0 {
if offset == 0 {
offset = f
}
*res += strconv.Itoa(i)
help60(n-1, f/(n-1), offset, visited, res)
} else {
*res += strconv.Itoa(i)
}
}
func factorial(n int) int {
res := 1
for i := n; i > 1; i-- {
res *= i
}
return res
}
|
package main
import (
"context"
"log"
"github.com/circonus-labs/gosnowth"
)
// ExampleGetNodeState demonstrates how to get the snowth node's state from
// a particular node.
func ExampleGetNodeState() {
// Create a new client.
cfg := gosnowth.NewConfig(SnowthServers...)
client, err := gosnowth.NewClient(context.Background(), cfg)
if err != nil {
log.Fatalf("failed to create snowth client: %v", err)
}
// Get the node state.
for _, node := range client.ListActiveNodes() {
state, err := client.GetNodeState(node)
if err != nil {
log.Fatalf("failed to get state: %v", err)
}
log.Println(state)
}
}
// ExampleGetNodeGossip demontrates how to get gossip details from a node.
func ExampleGetNodeGossip() {
// Create a new client.
cfg := gosnowth.NewConfig(SnowthServers...)
client, err := gosnowth.NewClient(context.Background(), cfg)
if err != nil {
log.Fatalf("failed to create snowth client: %v", err)
}
// Get the gossip data from the node.
for _, node := range client.ListActiveNodes() {
gossip, err := client.GetGossipInfo(node)
if err != nil {
log.Fatalf("failed to get gossip: %v", err)
}
log.Println(gossip)
}
}
// ExampleGetTopology demonstrates how to get topology details from a node.
func ExampleGetTopology() {
// Create a new client.
cfg := gosnowth.NewConfig(SnowthServers...)
client, err := gosnowth.NewClient(context.Background(), cfg)
if err != nil {
log.Fatalf("failed to create snowth client: %v", err)
}
// Get the topology from the node.
for _, node := range client.ListActiveNodes() {
topology, err := client.GetTopologyInfo(node)
if err != nil {
log.Fatalf("failed to get topology: %v", err)
}
log.Println(topology)
}
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
//apiUrl = "https://coinmarketcap-nexuist.rhcloud.com/api/"
//ethApi = apiUrl + "eth"
//btcApi = apiUrl + "btc"
func main() {
r, _ := http.Get("https://coinmarketcap-nexuist.rhcloud.com/api/eth")
defer r.Body.Close()
body, _ := ioutil.ReadAll(r.Body)
var data interface{}
json.Unmarshal(body, &data)
// Generate your Go Structures from JSON data by
// using this link: http://json2struct.mervine.net/
type ApiReturn struct {
Change string `json:"change"`
MarketCap struct {
Aud float64 `json:"aud"`
Btc float64 `json:"btc"`
Cad float64 `json:"cad"`
Cny float64 `json:"cny"`
Eur float64 `json:"eur"`
Gbp float64 `json:"gbp"`
Hkd float64 `json:"hkd"`
Jpy float64 `json:"jpy"`
Rub float64 `json:"rub"`
Usd float64 `json:"usd"`
} `json:"market_cap"`
Name string `json:"name"`
Position string `json:"position"`
Price struct {
Aud float64 `json:"aud"`
Btc float64 `json:"btc"`
Cad float64 `json:"cad"`
Cny float64 `json:"cny"`
Eur float64 `json:"eur"`
Gbp float64 `json:"gbp"`
Hkd float64 `json:"hkd"`
Jpy float64 `json:"jpy"`
Rub float64 `json:"rub"`
Usd float64 `json:"usd"`
} `json:"price"`
Supply string `json:"supply"`
Symbol string `json:"symbol"`
Timestamp string `json:"timestamp"`
Volume struct {
Aud float64 `json:"aud"`
Btc float64 `json:"btc"`
Cad float64 `json:"cad"`
Cny float64 `json:"cny"`
Eur float64 `json:"eur"`
Gbp float64 `json:"gbp"`
Hkd float64 `json:"hkd"`
Jpy float64 `json:"jpy"`
Rub float64 `json:"rub"`
Usd int `json:"usd"`
} `json:"volume"`
}
var n ApiReturn
json.Unmarshal(body, &n)
fmt.Printf("1 ETH = %f USD\n", n.Price.Usd)
}
|
package dushengchen
/*
question:
https://leetcode.com/problems/longest-palindromic-substring/
Submission:
https://leetcode.com/submissions/detail/289234911/
*/
func longestPalindrome(s string) string {
var max string
for i:=0; i < len(s); i++ {
start, end := i, i
for ;(end+1 < len(s)) && s[start] == s[end+1]; end++ {
}
i = end
x := 1
for {
if (start - x) >= 0 && (end + x) < len(s) && s[start-x] == s[end+x] {
x++
}else {
break
}
}
if (end - start + 1 + 2 * (x-1)) > len(max) {
max = s[start-x+1: end+x]
//fmt.Printf("start=%d, end=%d, max=%s, x=%d", start, end, max, x)
}
}
return max
}
|
package middlewares
import (
"net/http"
"github.com/JeanCntrs/airbnb-catalog-server/db"
)
// CheckDB : Check the status of database and if everything is fine, continue the process, otherwise, stop the execution
func CheckDB(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if db.CheckConnection() == 0 {
http.Error(w, "Lost connection to database", 500)
return
}
next.ServeHTTP(w, r)
}
}
|
/*
Go Language Raspberry Pi Interface
(c) Copyright David Thorpe 2016-2018
All Rights Reserved
Documentation http://djthorpe.github.io/gopi/
For Licensing and Usage information, please see LICENSE.md
*/
package sensordb
import (
"context"
"fmt"
// Frameworks
"github.com/djthorpe/gopi"
"github.com/djthorpe/gopi-rpc/sys/grpc"
"github.com/djthorpe/gopi/util/event"
"github.com/djthorpe/sensors"
// Protocol buffers
pb "github.com/djthorpe/sensors/rpc/protobuf/sensordb"
empty "github.com/golang/protobuf/ptypes/empty"
)
////////////////////////////////////////////////////////////////////////////////
// TYPES
type Service struct {
Server gopi.RPCServer
Database sensors.Database
}
type service struct {
log gopi.Logger
database sensors.Database
// Emit events
event.Publisher
}
////////////////////////////////////////////////////////////////////////////////
// OPEN AND CLOSE
// Open the server
func (config Service) Open(log gopi.Logger) (gopi.Driver, error) {
log.Debug("<grpc.service.sensordb.Open>{ server=%v database=%v }", config.Server, config.Database)
// Check for bad input parameters
if config.Server == nil || config.Database == nil {
return nil, gopi.ErrBadParameter
}
this := new(service)
this.log = log
this.database = config.Database
// Register service with GRPC server
pb.RegisterSensorDBServer(config.Server.(grpc.GRPCServer).GRPCServer(), this)
// Success
return this, nil
}
func (this *service) Close() error {
this.log.Debug("<grpc.service.sensordb.Close>{}")
// Close publisher
this.Publisher.Close()
// Release resources
this.database = nil
// Success
return nil
}
////////////////////////////////////////////////////////////////////////////////
// STRINGIFY
func (this *service) String() string {
return fmt.Sprintf("<grpc.service.sensordb>{ database=%v }", this.database)
}
////////////////////////////////////////////////////////////////////////////////
// CANCEL STREAMING REQUESTS
func (this *service) CancelRequests() error {
this.log.Debug2("<grpc.service.sensordb.CancelRequests>{}")
// Cancel any streaming requests
this.Publisher.Emit(event.NullEvent)
// Return success
return nil
}
////////////////////////////////////////////////////////////////////////////////
// RPC METHODS
// Ping returns an empty response
func (this *service) Ping(ctx context.Context, _ *empty.Empty) (*empty.Empty, error) {
this.log.Debug2("<grpc.service.sensordb.Ping>{ }")
return &empty.Empty{}, nil
}
|
package awsvaultcredsprovider
import (
"github.com/jcmturner/restclient"
"github.com/jcmturner/vaultclient"
"github.com/jcmturner/vaultmock"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
const (
Test_SecretAccessKey = "9drTJvcXLB89EXAMPLELB8923FB892xMFI"
Test_SessionToken = "AQoXdzELDDY//////////wEaoAK1wvxJY12r2IrDFT2IvAzTCn3zHoZ7YNtpiQLF0MqZye/qwjzP2iEXAMPLEbw/m3hsj8VBTkPORGvr9jM5sgP+w9IZWZnU+LWhmg+a5fDi2oTGUYcdg9uexQ4mtCHIHfi4citgqZTgco40Yqr4lIlo4V2b2Dyauk0eYFNebHtYlFVgAUj+7Indz3LU0aTWk1WKIjHmmMCIoTkyYp/k7kUG7moeEYKSitwQIi6Gjn+nyzM+PtoA3685ixzv0R7i5rjQi0YE0lf1oeie3bDiNHncmzosRM6SFiPzSvp6h/32xQuZsjcypmwsPSDtTPYcs0+YN/8BRi2/IcrxSpnWEXAMPLEXSDFTAQAM6Dl9zR0tXoybnlrZIwMLlMi1Kcgo5OytwU="
Test_Expiration = "2016-03-15T00:05:07Z"
Test_AccessKeyId = "ASIAJEXAMPLEXEG2JICEA"
Test_SecretsPath = "/secret/awskeys/"
Test_Arn = "arn:aws:iam::123456789012:user/test"
)
func TestVaultCredsProvider_StoreAndReadBack(t *testing.T) {
s, addr, certPool, _, test_app_id, test_user_id := vaultmock.RunMockVault(t)
defer s.Close()
c := restclient.NewConfig().WithEndPoint(addr).WithCACertPool(certPool)
vconf := vaultclient.Config{
SecretsPath: Test_SecretsPath,
ReSTClientConfig: *c,
}
vcreds := vaultclient.Credentials{
UserID: test_user_id,
AppID: test_app_id,
}
p, err := NewVaultCredsProvider(Test_Arn, vconf, vcreds)
if err != nil {
t.Fatalf("Error creating VaultCredsProvider: %v", err)
}
xt, err := time.Parse(time.RFC3339, Test_Expiration)
if err != nil {
t.Logf("Error parsing test expiry time: %v", err)
}
cred := AWSCredential{
secretAccessKey: Test_SecretAccessKey,
sessionToken: Test_SessionToken,
AccessKeyId: Test_AccessKeyId,
Expiration: xt,
}
p.Credential = cred
// Store
err = p.Store()
if err != nil {
t.Fatalf("Failed to store AWS credential: %v", err)
}
// Read back
pr, err := NewVaultCredsProvider(Test_Arn, vconf, vcreds)
if err != nil {
t.Fatalf("Error creating VaultCredsProvider for read: %v", err)
}
err = pr.Read()
if err != nil {
t.Fatalf("Failed to store AWS credential: %v", err)
}
assert.Equal(t, cred.AccessKeyId, pr.Credential.AccessKeyId, "AccessKeyId not as expected")
assert.Equal(t, cred.Expiration, pr.Credential.Expiration, "Expiration not as expected")
assert.Equal(t, cred.sessionToken, pr.Credential.sessionToken, "SessionToken not as expected")
assert.Equal(t, cred.secretAccessKey, pr.Credential.secretAccessKey, "SecretAccessKey not as expected")
}
func TestVaultCredsProvider_IsExpired(t *testing.T) {
p := VaultCredsProvider{}
p.Credential = AWSCredential{
secretAccessKey: Test_SecretAccessKey,
sessionToken: Test_SessionToken,
AccessKeyId: Test_AccessKeyId,
}
p.SetExpiration(time.Now().UTC().Add(time.Hour))
assert.False(t, p.IsExpired(), "IsExpired stating credential is expired when it isn't")
p.SetExpiration(time.Now().UTC().Add(time.Hour * -1))
assert.True(t, p.IsExpired(), "IsExpired does not recognise credential is expired")
p.Credential.TTL = -1
assert.True(t, p.IsExpired(), "IsExpired should always be true when TTL < 0")
}
func TestVaultCredsProvider_Retrieve(t *testing.T) {
s, addr, certPool, _, test_app_id, test_user_id := vaultmock.RunMockVault(t)
defer s.Close()
c := restclient.NewConfig().WithEndPoint(addr).WithCACertPool(certPool)
vconf := vaultclient.Config{
SecretsPath: Test_SecretsPath,
ReSTClientConfig: *c,
}
vcreds := vaultclient.Credentials{
UserID: test_user_id,
AppID: test_app_id,
}
p, err := NewVaultCredsProvider(Test_Arn, vconf, vcreds)
if err != nil {
t.Fatalf("Error creating VaultCredsProvider: %v", err)
}
xt, err := time.Parse(time.RFC3339, Test_Expiration)
if err != nil {
t.Logf("Error parsing test expiry time: %v", err)
}
cred := AWSCredential{
secretAccessKey: Test_SecretAccessKey,
sessionToken: Test_SessionToken,
AccessKeyId: Test_AccessKeyId,
Expiration: xt,
}
p.Credential = cred
// Store
err = p.Store()
if err != nil {
t.Fatalf("Failed to store AWS credential: %v", err)
}
v, err := p.Retrieve()
if err != nil {
t.Fatalf("Error retrieving credentials: %v", err)
}
assert.Equal(t, PROVIDER_NAME, v.ProviderName, "Provider name not as expected")
assert.Equal(t, cred.AccessKeyId, v.AccessKeyID, "AccessKeyId not as expected")
}
|
// Copyright 2017 Mirantis
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extensions
import (
"encoding/json"
"time"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
const (
GroupName string = "network-checker.ext"
Version string = "v1"
)
var (
SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: Version}
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(
SchemeGroupVersion,
&Agent{},
&AgentList{},
&meta_v1.ListOptions{},
&meta_v1.DeleteOptions{},
)
return nil
}
type AgentSpec struct {
ReportInterval int `json:"report_interval"`
PodName string `json:"podname"`
HostDate time.Time `json:"hostdate"`
LastUpdated time.Time `json:"last_updated"`
LookupHost map[string][]string `json:"nslookup"`
IPs map[string][]string `json:"ips"`
}
type Agent struct {
meta_v1.TypeMeta `json:",inline"`
Metadata meta_v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Spec AgentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
type AgentList struct {
meta_v1.TypeMeta `json:",inline"`
Metadata meta_v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
Items []Agent `json:"items" protobuf:"bytes,2,rep,name=items"`
}
func (e *Agent) GetObjectKind() schema.ObjectKind {
return &e.TypeMeta
}
func (e *Agent) GetObjectMeta() meta_v1.Object {
return &e.Metadata
}
func (el *AgentList) GetObjectKind() schema.ObjectKind {
return &el.TypeMeta
}
func (el *AgentList) GetListMeta() meta_v1.List {
return &el.Metadata
}
type AgentCopy Agent
type AgentListCopy AgentList
func (e *Agent) UnmarshalJSON(data []byte) error {
tmp := AgentCopy{}
err := json.Unmarshal(data, &tmp)
if err != nil {
return err
}
tmp2 := Agent(tmp)
*e = tmp2
return nil
}
func (el *AgentList) UnmarshalJSON(data []byte) error {
tmp := AgentListCopy{}
err := json.Unmarshal(data, &tmp)
if err != nil {
return err
}
tmp2 := AgentList(tmp)
*el = tmp2
return nil
}
|
// +build aws_lambda
package cmd
import (
"github.com/aws/aws-lambda-go/lambda"
"github.com/awslabs/aws-lambda-go-api-proxy/httpadapter"
"github.com/movieManagement/gen/restapi/operations"
ini "github.com/movieManagement/init"
log "github.com/movieManagement/logging"
)
// Start is the lambda main entry point
func Start(api *operations.MovieServiceAPI, _ int) error {
log.Infof("Running Init()...")
ini.Init()
adapter := httpadapter.New(api.Serve(nil))
log.Debugf("Starting Lambda")
lambda.Start(adapter.Proxy)
return nil
}
|
package core
import (
"reflect"
"sort"
"testing"
)
func TestSplit(t *testing.T) {
type args struct {
participants []string
}
tests := []struct {
name string
args args
wantAmountOfGroups int
}{
{"6 participants", args{participants: []string{"Alex", "Igor", "Olga", "Max", "Vladimir", "Vadim"}}, 2},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got, _ := Split(tt.args.participants); !reflect.DeepEqual(len(got), tt.wantAmountOfGroups) {
t.Errorf("len of Split() = %v, wantAmountOfGroups %v", got, tt.wantAmountOfGroups)
}
})
}
}
func TestSplitOddAmountOfParticipants(t *testing.T) {
// given
participants := []string{"Alex", "Igor", "Olga", "Max", "Vladimir"}
// when
_, error := Split(participants)
// then
if error == nil {
t.Errorf("Wanted error but got nil for 5 participants %v", participants)
}
}
func TestRandomOrder(t *testing.T) {
// given
participants := []string{"Alex", "Igor", "Olga", "Max", "Vladimir", "Vadim"}
// when
firstSplit, _ := Split(participants)
secondSplit, _ := Split(participants)
thirdSplit, _ := Split(participants)
// then
if len(firstSplit) != len(secondSplit) && len(firstSplit) != len(thirdSplit) {
t.Errorf("Size of split for first, second and third split should be equal")
}
if !reflect.DeepEqual(sortAndMerge(firstSplit), sortAndMerge(secondSplit)) || !reflect.DeepEqual(sortAndMerge(secondSplit), sortAndMerge(thirdSplit)) {
t.Errorf("Consequence runs produce different results (size)")
}
sameOrder := true
out:
for i, firstSplitPart := range firstSplit {
for j, elementInFirstSplitPart := range firstSplitPart {
if elementInFirstSplitPart != secondSplit[i][j] ||
elementInFirstSplitPart != thirdSplit[i][j] ||
secondSplit[i][j] != thirdSplit[i][j] {
sameOrder = false
break out
}
}
}
if sameOrder == true {
t.Errorf("Consequence runs produce same order results")
}
}
func sortAndMerge(splitToSort [][]string) []string {
result := make([]string, 0)
totalSize := 0
for i := range splitToSort {
totalSize += len(splitToSort[i])
}
for _, element := range splitToSort {
tmp := make([]string, len(element))
copy(tmp, element)
sort.Strings(tmp)
for _, value := range tmp {
result = append(result, value)
}
}
return result
}
|
package industrydb
import (
_ "github.com/go-sql-driver/mysql"
"stockdb"
"entity/xlsentity"
"util"
)
type MinorIndustryDatabase struct {
stockdb.DBBase
}
func (s *MinorIndustryDatabase) InsertIndustry(industry xlsentity.Industry) int {
db := s.Open()
stmt, err := db.Prepare("insert csrcminorindustry set code=?, name=?, name_en=?, bigcode=?")
util.CheckError(err)
res, err := stmt.Exec(industry.Code, industry.Name, industry.Name_en, industry.Parent)
util.CheckError(err)
defer stmt.Close()
_, reserr := res.LastInsertId()
util.CheckError(reserr)
db.Close()
return 0
}
func (s *MinorIndustryDatabase) DeleteIndustry(code int) int {
db := s.Open()
stmt, err := db.Prepare("delete from csrcminorindustry where code=?")
util.CheckError(err)
res, err := stmt.Exec(code)
util.CheckError(err)
defer stmt.Close()
_, reserr := res.RowsAffected()
util.CheckError(reserr)
db.Close()
return 0
}
func (s *MinorIndustryDatabase) UpdateIndustry(industry xlsentity.Industry) int {
db := s.Open()
stmt, err := db.Prepare("update csrcminorindustry set name=?, name_en=?, bigcode=? where code=?")
util.CheckError(err)
res, err := stmt.Exec(industry.Name, industry.Name_en, industry.Parent, industry.Code)
util.CheckError(err)
defer stmt.Close()
_, reserr := res.RowsAffected()
util.CheckError(reserr)
db.Close()
return 0
}
func (s *MinorIndustryDatabase) QueryIndustry(code int) xlsentity.Industry {
db := s.Open()
stmt, err := db.Prepare("select code, name, name_en, bigcode from csrcminorindustry where code = ?")
util.CheckError(err)
defer stmt.Close()
var name, name_en, bigcode string
var minorcode string
err = stmt.QueryRow(code).Scan(&minorcode, &name, &name_en, &bigcode)
util.CheckError(err)
db.Close()
return xlsentity.Industry{
Code: minorcode,
Parent: bigcode,
Name: name,
Name_en: name_en,
}
}
func (s *MinorIndustryDatabase) TranInsertIndustry(industries map[string] xlsentity.Industry) int {
db := s.Open()
tx, err := db.Begin()
util.CheckError(err)
for key, industry := range industries {
stmt, err := tx.Prepare("insert csrcminorindustry set code=?, name=?, name_en=?, bigcode=?")
util.CheckError(err)
_, reserr := stmt.Exec(key, industry.Name, industry.Name_en, industry.Parent)
util.CheckError(reserr)
defer stmt.Close()
}
err = tx.Commit()
util.CheckError(err)
db.Close()
return 0
}
func NewMinorIndustryDB(dbname string) *MinorIndustryDatabase {
stdb := new(MinorIndustryDatabase)
stdb.Init(dbname)
return stdb
}
|
// Package cloudsysfs provides a function for detecting the current host's cloud provider, based on the contents of the /sys filesystem.
package cloudsysfs
import "github.com/erichs/cloudsysfs/providers"
var cloudProviders = [...]func(chan<- string){
providers.AWS,
providers.Azure,
providers.DigitalOcean,
providers.GCE,
providers.OpenStack,
}
// Detect tries to detect the current cloud provider a host is using.
// It returns a lowercase string identifying the provider if found, or empty string if none were detected.
func Detect() string {
sysfscheck := make(chan string)
for _, cloud := range cloudProviders {
go cloud(sysfscheck)
}
provider := ""
for i := 0; i < len(cloudProviders); i++ {
v := <-sysfscheck
if v != "" {
provider = v
}
}
return provider
}
|
package types
import (
sdk "github.com/cosmos/cosmos-sdk/types"
)
// BankKeeper defines the expected bank keeper
type BankKeeper interface {
AddCoins(ctx sdk.Context, addr sdk.AccAddress, amt sdk.Coins) (sdk.Coins, error)
SendCoins(ctx sdk.Context, from sdk.AccAddress, to sdk.AccAddress, amt sdk.Coins) error
}
|
package prof
type SmallLarge struct {
Small string `json:"small"`
Large string `json:"large"`
}
type profile struct {
Username string `json:"username"`
FullName string `json:"full_name"`
Bio string `json:"bio"`
FollowedBy int64 `json:"followed_by"`
Follows int64 `json:"follows"`
Location string `json:"location"`
Avatars SmallLarge `json:"avatar"`
Wallpaper string `json:"wallpaper"`
}
type post struct {
ArtID string `json:"art_id"`
Title string `json:"title"`
Desc string `json:"desc"`
LikesCount int64 `json:"likes_count"`
CommentsCount int64 `json:"comments_count"`
Tags []string `json:"tags"`
Date int64 `json:"date"`
DisplaySource SmallLarge `json:"display_source"`
}
|
// Copyright 2021 Frederik Zipp. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This example was ported from:
// https://codepen.io/hakimel/pen/KanIi
// Original copyright:
//
// Copyright (c) 2021 by Hakim El Hattab (https://codepen.io/hakimel/pen/KanIi)
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// An interactive animation of rotating particles that follow the mouse or
// touch pointer.
package main
import (
"flag"
"fmt"
"log"
"math"
"math/rand"
"time"
"github.com/fzipp/canvas"
)
const (
radius = 110
radiusScaleMin = 1
radiusScaleMax = 1.5
// The number of particles that are used to generate the trail
quantity = 25
)
func main() {
http := flag.String("http", ":8080", "HTTP service address (e.g., '127.0.0.1:8080' or just ':8080')")
flag.Parse()
fmt.Println("Listening on " + httpLink(*http))
err := canvas.ListenAndServe(*http, run,
canvas.Size(750, 1334),
//canvas.Size(900, 600),
canvas.ScaleFullPage(true, true),
canvas.EnableEvents(
canvas.MouseMoveEvent{},
canvas.MouseDownEvent{},
canvas.MouseUpEvent{},
canvas.TouchStartEvent{},
canvas.TouchMoveEvent{},
),
canvas.Reconnect(time.Second),
)
if err != nil {
log.Fatal(err)
}
}
func run(ctx *canvas.Context) {
d := &demo{
mouseX: float64(ctx.CanvasWidth() / 2),
mouseY: float64(ctx.CanvasHeight() / 2),
radiusScale: 1.0,
}
d.createParticles()
for {
select {
case event := <-ctx.Events():
if _, ok := event.(canvas.CloseEvent); ok {
return
}
d.handle(event)
default:
d.draw(ctx)
ctx.Flush()
time.Sleep((1000 / 60) * time.Millisecond)
}
}
}
type point struct {
x, y float64
}
type particle struct {
position point
shift point
size float64
angle float64
speed float64
targetSize float64
fillColor string
orbit float64
}
type demo struct {
particles []particle
radiusScale float64
mouseX float64
mouseY float64
mouseIsDown bool
}
func (d *demo) createParticles() {
d.particles = make([]particle, 0, quantity)
for i := 0; i < quantity; i++ {
p := particle{
position: point{x: d.mouseX, y: d.mouseY},
shift: point{x: d.mouseX, y: d.mouseY},
size: 1,
angle: 0,
speed: 0.01 + rand.Float64()*0.04,
targetSize: 1,
fillColor: "#" + fmt.Sprintf("%6x", int(rand.Float64()*0x404040+0xaaaaaa)),
orbit: radius*.5 + (radius * .5 * rand.Float64()),
}
d.particles = append(d.particles, p)
}
}
func (d *demo) handle(ev canvas.Event) {
switch e := ev.(type) {
case canvas.MouseMoveEvent:
d.mouseX = float64(e.X)
d.mouseY = float64(e.Y)
case canvas.MouseDownEvent:
d.mouseIsDown = true
case canvas.MouseUpEvent:
d.mouseIsDown = false
case canvas.TouchStartEvent:
if len(e.Touches) == 1 {
d.mouseX = float64(e.Touches[0].X)
d.mouseY = float64(e.Touches[0].Y)
}
case canvas.TouchMoveEvent:
if len(e.Touches) == 1 {
d.mouseX = float64(e.Touches[0].X)
d.mouseY = float64(e.Touches[0].Y)
}
}
}
func (d *demo) draw(ctx *canvas.Context) {
if d.mouseIsDown {
// Scale upward to the max scale
d.radiusScale += (radiusScaleMax - d.radiusScale) * (0.02)
} else {
// Scale downward to the min scale
d.radiusScale -= (d.radiusScale - radiusScaleMin) * (0.02)
}
d.radiusScale = math.Min(d.radiusScale, radiusScaleMax)
// Fade out the lines slowly by drawing a rectangle over the entire canvas
ctx.SetFillStyleString("rgba(0,0,0,0.05)")
ctx.FillRect(0, 0, float64(ctx.CanvasWidth()), float64(ctx.CanvasHeight()))
for i := range d.particles {
p := &d.particles[i]
lp := point{x: p.position.x, y: p.position.y}
// Offset the angle to keep the spin going
p.angle += p.speed
// Follow mouse with some lag
p.shift.x += (d.mouseX - p.shift.x) * (p.speed)
p.shift.y += (d.mouseY - p.shift.y) * (p.speed)
// Apply position
p.position.x = p.shift.x + math.Cos(float64(i)+p.angle)*(p.orbit*d.radiusScale)
p.position.y = p.shift.y + math.Sin(float64(i)+p.angle)*(p.orbit*d.radiusScale)
// Limit to screen bounds
p.position.x = math.Max(math.Min(p.position.x, float64(ctx.CanvasWidth())), 0)
p.position.y = math.Max(math.Min(p.position.y, float64(ctx.CanvasHeight())), 0)
p.size += (p.targetSize - p.size) * 0.05
// If we're at the target size, set a new one. Think of it like a regular day at work.
if math.Round(p.size) == math.Round(p.targetSize) {
p.targetSize = 1 + rand.Float64()*7
}
ctx.BeginPath()
ctx.SetFillStyleString(p.fillColor)
ctx.SetStrokeStyleString(p.fillColor)
ctx.SetLineWidth(p.size)
ctx.MoveTo(lp.x, lp.y)
ctx.LineTo(p.position.x, p.position.y)
ctx.Stroke()
ctx.Arc(p.position.x, p.position.y, p.size/2, 0, math.Pi*2, true)
ctx.Fill()
}
}
func httpLink(addr string) string {
if addr[0] == ':' {
addr = "localhost" + addr
}
return "http://" + addr
}
|
package sensu
import (
"encoding/json"
"errors"
"fmt"
"github.com/bitly/go-simplejson"
"io/ioutil"
"log"
"path/filepath"
)
type ClientConfig struct {
Name string `json:"name"`
Address string `json:"address"`
Version string `json:"version"`
Subscriptions []string `json:"subscriptions"`
}
type RabbitmqConfigSSL struct {
PrivateKeyFile string `json:"private_key_file"`
CertChainFile string `json:"cert_chain_file"`
}
type RabbitmqConfig struct {
Host string `json:"host"`
Port int `json:"port"`
Vhost string `json:"vhost"`
User string `json:"user"`
Password string `json:"password"`
Ssl RabbitmqConfigSSL `json:"ssl"`
}
type Config struct {
Checks map[string]Check `json:"checks"`
Client ClientConfig `json:"client"`
Rabbitmq RabbitmqConfig `json:"rabbitmq"`
rawData *simplejson.Json
}
func LoadConfigs(configFile string, configDirs []string) (*Config, error) {
js, ferr := parseFile(configFile)
if ferr != nil {
log.Printf("Unable to open config file: %s", ferr)
}
for _, dir := range configDirs {
files, derr := ioutil.ReadDir(dir)
if derr != nil {
log.Printf("Unable to open config directory: %s", derr)
}
for _, f := range files {
jsd, err := parseFile(filepath.Join(dir, f.Name()))
if err != nil {
log.Printf("Could not load %s: %s", f.Name(), err)
continue
}
err = js.Extend(jsd)
if err != nil {
log.Printf("Error merging configs: %s", err)
}
}
}
//Reencoding merged JSON to parse to concrete type
if nil == js {
return nil, errors.New("There was no configuration.")
}
mergedJson, err := json.Marshal(js.data)
if err != nil {
return nil, errors.New("Unable to reencode merged json")
}
config := new(Config)
json.Unmarshal(mergedJson, &config)
config.rawData, _ = simplejson.NewJson(mergedJson)
validationErrors := validateConfig(config)
if len(validationErrors) > 0 {
for _, e := range validationErrors {
fmt.Print(e)
}
return nil, errors.New("Error validating the configs")
}
return config, nil
}
func parseFile(filename string) (*Json, error) {
j := new(Json)
file, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("File error: %v", err)
}
err = json.Unmarshal(file, &j.data)
if err != nil {
return nil, fmt.Errorf("json error: %v", err)
}
return j, nil
}
func validateConfig(cfg *Config) []error {
errs := []error{}
if cfg.Client.Address == "" {
errs = append(errs, errors.New("Missing client address"))
}
return errs
}
func (c *Config) Data() *simplejson.Json {
return c.rawData
}
|
package main
import "github.com/davecgh/go-spew/spew"
type ListNode struct {
Val int
Next *ListNode
}
func addToFront(val int, head *ListNode) *ListNode {
temp := &ListNode{Val:val}
temp.Next = head
return temp
}
func lenList(l *ListNode) int {
var n int
for i:=l; i != nil; i = i.Next {
n++
}
return n
}
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
n1 := lenList(l1)
n2 := lenList(l2)
cur1, cur2 := l1, l2
var res *ListNode
for ;n1>0 && n2>0; {
var sum int
if n1>=n2 {
sum += cur1.Val
cur1 = cur1.Next
n1--
}
if n2 > n1 {
sum += cur2.Val
cur2 = cur2.Next
n2--
}
// 结果翻转
res = addToFront(sum, res)
}
// 再次翻转
var ret *ListNode
var carry int
for cur := res; cur != nil; cur=cur.Next {
carry += cur.Val
ret = addToFront(carry%10, ret)
carry /= 10
}
if carry > 0{
ret = addToFront(carry%10, ret)
}
return ret
}
func main() {
N1_1 := &ListNode{Val:7}
N1_2 := &ListNode{Val:2}
N1_3 := &ListNode{Val:4}
N1_4 := &ListNode{Val:3}
N1_1.Next = N1_2
N1_2.Next = N1_3
N1_3.Next = N1_4
N2_1 := &ListNode{Val:5}
N2_2 := &ListNode{Val:6}
N2_3 := &ListNode{Val:4}
N2_1.Next = N2_2
N2_2.Next = N2_3
spew.Dump(addTwoNumbers(N1_1, N2_1))
}
|
package assets
import (
"embed"
)
//go:embed static
var Static embed.FS
|
package catp
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01400101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:catp.014.001.01 Document"`
Message *ATMDepositCompletionAdviceV01 `xml:"ATMDpstCmpltnAdvc"`
}
func (d *Document01400101) AddMessage() *ATMDepositCompletionAdviceV01 {
d.Message = new(ATMDepositCompletionAdviceV01)
return d.Message
}
// The ATMDepositCompletionAdvice message is sent by an ATM to an acquirer or its agent to inform of the result of a deposit transaction at an ATM.
// If the ATM is configured to only send negative completion, a generic completion message should be used instead of ATMCompletionAdvice.
// This message can be used each time a bundle is put in the ATM safe and/or at the end of a multi bundle deposit.
type ATMDepositCompletionAdviceV01 struct {
// Information related to the protocol management on a segment of the path from the ATM to the acquirer.
Header *iso20022.Header32 `xml:"Hdr"`
// Encrypted body of the message.
ProtectedATMDepositCompletionAdvice *iso20022.ContentInformationType10 `xml:"PrtctdATMDpstCmpltnAdvc,omitempty"`
// Information related to the completion of a deposit transaction on the ATM.
ATMDepositCompletionAdvice *iso20022.ATMDepositCompletionAdvice1 `xml:"ATMDpstCmpltnAdvc,omitempty"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType15 `xml:"SctyTrlr,omitempty"`
}
func (a *ATMDepositCompletionAdviceV01) AddHeader() *iso20022.Header32 {
a.Header = new(iso20022.Header32)
return a.Header
}
func (a *ATMDepositCompletionAdviceV01) AddProtectedATMDepositCompletionAdvice() *iso20022.ContentInformationType10 {
a.ProtectedATMDepositCompletionAdvice = new(iso20022.ContentInformationType10)
return a.ProtectedATMDepositCompletionAdvice
}
func (a *ATMDepositCompletionAdviceV01) AddATMDepositCompletionAdvice() *iso20022.ATMDepositCompletionAdvice1 {
a.ATMDepositCompletionAdvice = new(iso20022.ATMDepositCompletionAdvice1)
return a.ATMDepositCompletionAdvice
}
func (a *ATMDepositCompletionAdviceV01) AddSecurityTrailer() *iso20022.ContentInformationType15 {
a.SecurityTrailer = new(iso20022.ContentInformationType15)
return a.SecurityTrailer
}
|
package 待分类
import "fmt"
type notifier interface {
notify()
}
type human struct {
name string
email string
}
func (h *human) notify() {
fmt.Printf("Sending human email to %s<%s>\n", h.name, h.email)
}
func main() {
h := human{"Tom", "110@qq.com"}
//sendNotification(h)
/*
# command-line-arguments
src\demo12.go:19:18: cannot use h (type human) as type notifier in argument to sendNotification:
human does not implement notifier (notify method has pointer receiver)
h是值类型,其方法集由值接收者声明的方法组成,notify()方法接收者为指针,所有h的方法集不包括notify(),即h没有实现notifier接口
*/
sendNotification(&h) //&h是指针类型,其方法集由值接收者和指针接收者声明的方法组成,即&h实现了notifier接口
/*定义接口接收规则
3.方法集
1.GO规范方法集解释: T类型的方法集由值接收者声明的方法组成,*T类型的方法集由值/指针接收者声明的方法组成
Values Methods Receivers
T (t T)
*T (t T) and (t *T)
2.从接收者类型角度看方法集
使用指针(*x)接收者实现接口, 只有*T实现接口
使用值(x)接收者实现接口, T和*T都实现接口
Methods Receivers Values
(t T) T and *T
(t *T) *T
3.为什么会有这样的规则
1.编译器有 [自动引用/ 解引用功能 | 自动转换值和指针类型]
1.但编译器并不是总能自动获取一个值的地址 ~ 参见demo13
2.所以值的方法集的只包括使用值接收者实现的方法
*/
}
func sendNotification(h notifier) {
h.notify()
}
|
package main
import (
"fmt"
"math/rand"
"ms/sun_old/base"
"ms/sun/shared/helper"
"ms/sun/shared/x"
"time"
)
var i4 = 0
func main() {
base.DefultConnectToMysql()
for i := 0; i < 6; i++ {
go f4()
}
time.Sleep(time.Hour)
}
func f4() {
for {
arr := make([]x.HomeFanout, 0, 50)
for i := 0; i < 100; i++ {
p := x.HomeFanout{
OrderId: helper.NextRowsSeqId(),
ForUserId: rand.Intn(10000)+1,
PostId: helper.NextRowsSeqId(),
}
i4++
if i4%1000 == 0{
fmt.Println(i4)
}
arr = append(arr, p)
}
x.MassReplace_HomeFanout(arr, base.DB)
}
}
|
package module
import (
"backend/src/global"
"gorm.io/gorm"
"time"
)
// 用户
type User struct {
BaseTable
// 用户名称
UserName string `json:"userName" form:"userName"`
// 密码
Password string `json:"password" form:"password"`
// 盐
Salt string `json:"salt" form:"salt"`
// 用户昵称
NickName string `json:"nickName" form:"nickName"`
// 邮件地址
UserMail string `json:"userMail" form:"userMail"`
// 权限
Privileges string `json:"privileges" form:"privileges"`
// 登录Token
LoginToken string `json:"loginToken" form:"loginToken"`
// 登录Token生成时间
LoginTokenTime time.Time `json:"loginTokenTime" form:"loginTokenTime"`
// 状态(0:初始,1:禁用,2:正常)
State uint `json:"state" form:"state"`
// 上级
ParentName string `json:"parentName" form:"parentName"`
}
// 表名
func (user User) TableName() string {
return "user"
}
// 表名
func (user User) Save(db *gorm.DB) {
db.Save(user)
}
// 获取用户
func GetUser(userName string) *User {
var dbUser User
global.DataBase.Where(&User{UserName: userName}).First(&dbUser)
return &dbUser
}
|
package main
import (
"fmt"
"net/http"
"github.com/mgarmuno/mediaWebServer/server/api/file"
"github.com/mgarmuno/mediaWebServer/server/api/omdb"
"github.com/mgarmuno/mediaWebServer/server/data"
)
func main() {
initialChecks()
fs := http.FileServer(http.Dir("client"))
http.Handle("/", fs)
http.Handle("/api/omdb/", &omdb.OmdbAPI{})
http.Handle("/api/file/", &file.FileUploadAPI{})
fmt.Println("Serving...")
http.ListenAndServe(":8080", nil)
}
func initialChecks() {
data.CheckDatabase()
}
|
package agent
import (
"errors"
"fmt"
"sync"
"time"
"golang.org/x/net/context"
"github.com/Sirupsen/logrus"
"github.com/bryanl/dolb/kvs"
)
var (
// checkTTL is the time to live for cluster member keys
checkTTL = 10 * time.Second
// ErrClusterNotJoined is returned when this agent has not joined a cluster.
ErrClusterNotJoined = errors.New("agent has not joined the cluster")
// ErrClusterJoined is returned when this agent has already joined a cluster.
ErrClusterJoined = errors.New("agent has already joined the cluster")
)
// RegisterError is a cluster registration error.
type RegisterError struct {
name string
err error
}
func (re *RegisterError) Error() string {
return fmt.Sprintf("unable to register agent %q: %v", re.name, re.err)
}
// ClusterStatus is the status of the cluster.
type ClusterStatus struct {
FloatingIP string `json:"floating_ip"`
Leader string `json:"leader"`
IsLeader bool `json:"is_leader"`
NodeCount int `json:"node_count"`
}
// ClusterMember is an agent cluster membership.
type ClusterMember struct {
cmKVS *kvs.Cluster
context context.Context
name string
root string
Leader string
NodeCount int
started bool
modifiedIndex uint64
schedule func(*ClusterMember, string, scheduleFn, time.Duration)
poll func(el *ClusterMember) error
refresh func(el *ClusterMember) error
logger *logrus.Entry
mu sync.Mutex
}
// NewClusterMember builds a ClusterMember.
func NewClusterMember(name string, config *Config) *ClusterMember {
return &ClusterMember{
cmKVS: kvs.NewCluster(config.KVS, checkTTL),
context: config.Context,
logger: logrus.WithFields(logrus.Fields{
"member-name": name,
}),
name: name,
refresh: refresh,
schedule: schedule,
poll: poll,
}
}
// Change creates a channel that outputs the current cluster leader.
func (cm *ClusterMember) Change() chan ClusterStatus {
t := time.NewTicker(time.Millisecond * 250)
out := make(chan ClusterStatus, 1)
leader := cm.Leader
go func() {
for {
select {
case <-t.C:
cm.mu.Lock()
currentLeader := cm.Leader
cm.mu.Unlock()
if leader != currentLeader {
cs := ClusterStatus{
IsLeader: cm.isLeader(),
Leader: currentLeader,
NodeCount: cm.NodeCount,
}
leader = currentLeader
out <- cs
}
case <-cm.context.Done():
break
}
}
}()
return out
}
func (cm *ClusterMember) isLeader() bool {
cm.mu.Lock()
defer cm.mu.Unlock()
return cm.name == cm.Leader
}
func (cm *ClusterMember) key() string {
return cm.root + cm.name
}
// Start starts a cluster membership process.
func (cm *ClusterMember) Start() error {
if cm.started {
return ErrClusterJoined
}
cm.started = true
mi, err := cm.cmKVS.RegisterAgent(cm.name)
if err != nil {
return &RegisterError{err: err, name: cm.name}
}
cm.modifiedIndex = mi
go cm.schedule(cm, "poll", poll, time.Second)
go cm.schedule(cm, "refresh", refresh, cm.cmKVS.CheckTTL/2)
return nil
}
// Stop stops a cluster membership process.
func (cm *ClusterMember) Stop() error {
if !cm.started {
return ErrClusterNotJoined
}
cm.started = false
return nil
}
type scheduleFn func(*ClusterMember) error
func schedule(cm *ClusterMember, name string, fn scheduleFn, timeout time.Duration) {
logger := cm.logger.WithField("cluster-action", name)
t := time.NewTicker(timeout)
quit := make(chan struct{})
for {
if !cm.started {
t.Stop()
close(quit)
break
}
select {
case <-t.C:
err := fn(cm)
if err != nil {
logger.WithError(err).Error("could not run scheduled item")
t.Stop()
close(quit)
}
case <-quit:
logger.Info("shutting down")
return
}
}
}
func poll(cm *ClusterMember) error {
leader, err := cm.cmKVS.Leader()
if err != nil {
return err
}
logMsg := cm.logger
shouldLog := false
cm.mu.Lock()
if l := leader.Name; cm.Leader != l {
logMsg = logMsg.WithField("leader", l)
cm.Leader = l
shouldLog = true
}
cm.mu.Unlock()
if nc := leader.NodeCount; cm.NodeCount != nc {
logMsg = logMsg.WithField("node-count", nc)
cm.NodeCount = nc
shouldLog = true
}
if shouldLog {
logMsg.Info("cluster updated")
}
return nil
}
func refresh(cm *ClusterMember) error {
mi, err := cm.cmKVS.Refresh(cm.name, cm.modifiedIndex)
if err != nil {
return err
}
cm.modifiedIndex = mi
return nil
}
|
package config
// Config is the configuration struct
type Config struct {
LogLevel string `default:"debug"`
CoSign CoSignConfig
Address string `default:"0.0.0.0:8080"`
}
// CoSignConfig contains the credentials for info exposed through the webapi
type CoSignConfig struct {
Name string `required:"true"`
Password string `required:"true"`
}
|
package controllers
import (
"github.com/astaxie/beego"
"reporter/models"
"didapinche.com/db_util"
"gotool/generator"
"github.com/astaxie/beego/config"
"reporter/vo"
"com.didapinche.go.commons/log"
"fmt"
)
type MainController struct {
beego.Controller
}
func (c *MainController) Post() {
iniconf, err := config.NewConfig("ini", "conf/app.conf")
generator.CheckErr(err)
connProp := db_util.ConnProp{iniconf.String("mysql.host"), iniconf.String("mysql.port"),
"didapinche", "", iniconf.String("mysql.username"), iniconf.String("mysql.password")}
form := vo.Form{c.Input().Get("PhoneNo"), c.Input().Get("LastDate")}
log.Debugf("user income:%s", form.PhoneNo, form.LastDate)
list, total := models.Query(connProp, form)
c.Data["list"] = list
c.Data["total"] = fmt.Sprintf("%.2f", total)
c.Data["phone"] = form.PhoneNo
c.Data["Website"] = "beego.me"
c.Data["Email"] = "astaxie@gmail.com"
c.TplName = "index.tpl"
}
|
package main
import (
"fmt"
"sort"
"strings"
"github.com/bwmarrin/discordgo"
)
func getRole(s *discordgo.Session, u *discordgo.User, m *discordgo.Message, roleName string) bool {
// see what the user permission level is
roles, err := s.GuildRoles(m.GuildID)
if err != nil {
fmt.Printf("Error getting roles: %s", err)
return false
}
mem, err := s.GuildMember(m.GuildID, u.ID)
if err != nil {
fmt.Printf("Error getting members: %s", err)
return false
}
for _, role := range roles {
if role.Name == roleName {
// check and see if user has role
for _, v := range mem.Roles {
if role.ID == v {
return true
}
}
}
}
return false
}
func getAdmin(s *discordgo.Session, m *discordgo.MessageCreate) bool {
return getRole(s, m.Author, m.Message, loadedConfigData.ModRoleName)
}
func getOps(s *discordgo.Session, m *discordgo.MessageCreate) bool {
// bleh O(n) lookup. use a map
for _, v := range loadedConfigData.OpsUsers {
if v == m.Author.ID {
return true
}
}
return false
}
func parseUpdateRole(s *discordgo.Session, m *discordgo.MessageCreate, removing bool) {
// if an admin is modifying another persons' role
if getAdmin(s, m) && len(m.Mentions) > 0 {
split := strings.SplitN(strings.TrimPrefix(m.Content, loadedConfigData.Prefix), " ", 3)
if len(split) != 3 {
s.ChannelMessageSend(m.ChannelID, "Make sure to specify a role after the mention.")
return
}
role := split[2]
updateRole(m, m.Mentions[0], s, role, removing)
return
}
split := strings.SplitN(strings.TrimPrefix(m.Content, loadedConfigData.Prefix), " ", 2)
if len(split) < 2 {
s.ChannelMessageSend(m.ChannelID, "Make sure to specify a role.")
return
}
role := split[1]
updateRole(m, m.Author, s, role, removing)
}
func updateRole(m *discordgo.MessageCreate, u *discordgo.User, s *discordgo.Session, roleName string, removing bool) {
// first see if the user has permissions to change the role
availableRoles, err := getAvailableRoles(s, m, m.Author)
if err != nil {
fmt.Printf("Error getting avialable roles: %s", err)
return
}
roleName = strings.ToLower(roleName)
found := false
for _, role := range availableRoles {
if roleName == strings.ToLower(role.Name) {
found = true
}
}
if !found {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("You don't have permission to change role ``%s``.", roleName))
return
}
g, err := s.Guild(m.GuildID)
if err != nil {
fmt.Printf("Error getting guild: %s", err)
return
}
rs, err := s.GuildRoles(m.GuildID)
if err != nil {
fmt.Printf("Error getting roles: %s", err)
return
}
for _, role := range rs {
if strings.ToLower(role.Name) == roleName {
if removing {
err := s.GuildMemberRoleRemove(g.ID, u.ID, role.ID)
if err != nil {
fmt.Printf("Error removing role %s to %s", roleName, m.Author.Username)
return
}
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Role ``%s`` successfully removed.", roleName))
return
}
err := s.GuildMemberRoleAdd(g.ID, u.ID, role.ID)
if err != nil {
fmt.Printf("Error adding role %s to %s", roleName, m.Author.Username)
return
}
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Role ``%s`` successfully added.", roleName))
return
}
}
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Role ``%s`` not found. Please try again or contact an admin.", roleName))
}
func getGuildRoles(s *discordgo.Session, m *discordgo.MessageCreate) ([]*discordgo.Role, error) {
roles, err := s.GuildRoles(m.GuildID)
if err != nil {
return nil, err
}
sort.SliceStable(roles, func(i, j int) bool {
return roles[i].Position > roles[j].Position
})
return roles, nil
}
func getAvailableRoles(s *discordgo.Session, m *discordgo.MessageCreate, u *discordgo.User) ([]*discordgo.Role, error) {
admin := getAdmin(s, m)
roles := []*discordgo.Role{}
guildRoles, err := getGuildRoles(s, m)
if err != nil {
return nil, err
}
for _, role := range guildRoles {
if itemInSlice(role.Name, loadedConfigData.EnabledRoles.UserRoles) {
roles = append(roles, role)
}
if admin && itemInSlice(role.Name, loadedConfigData.EnabledRoles.AdminRoles) {
roles = append(roles, role)
}
}
return roles, nil
}
func setMuted(s *discordgo.Session, m *discordgo.MessageCreate, u *discordgo.User, muting bool) error {
// if muted is true than mute the user otherwise unmute/remove the muted role
var err error = nil
if muting {
err = s.GuildMemberRoleAdd(m.GuildID, u.ID, loadedConfigData.MutedRole)
} else {
err = s.GuildMemberRoleRemove(m.GuildID, u.ID, loadedConfigData.MutedRole)
}
if err != nil {
fmt.Printf("Unable to change muted status of user: %s", err)
}
return err
}
|
// Copyright (c) 2020 Blockwatch Data Inc.
// Author: alex@blockwatch.cc
package models
import (
"math"
"sync"
"tezos_index/chain"
)
var incomePool = &sync.Pool{
New: func() interface{} { return new(Income) },
}
// Income is a per-cycle income sheet for baker accounts.
type Income struct {
RowId uint64 `gorm:"primary_key;index;column:row_id" json:"row_id"`
Cycle int64 `gorm:"column:cycle;index:cyacc" json:"cycle"` // this income cycle (=snapshot+7)
AccountId AccountID `gorm:"column:account_id;index:cyacc" json:"account_id"`
Rolls int64 `gorm:"column:rolls" json:"rolls"` // at snapshot block
Balance int64 `gorm:"column:balance" json:"balance"` // at snapshot block
Delegated int64 `gorm:"column:delegated" json:"delegated"` // at snapshot block
NDelegations int64 `gorm:"column:n_delegations" json:"n_delegations"` // at snapshot block
NBakingRights int64 `gorm:"column:n_baking_rights" json:"n_baking_rights"`
NEndorsingRights int64 `gorm:"column:n_endorsing_rights" json:"n_endorsing_rights"`
Luck int64 `gorm:"column:luck" json:"luck"` // in coins based on fair share by rolls
LuckPct int64 `gorm:"column:luck_percent" json:"luck_percent"` // 0.0 .. +N.00 based on fair share by rolls
ContributionPct int64 `gorm:"column:contribution_percent" json:"contribution_percent"` // 0.0 .. +N.00 based on rights utilized
PerformancePct int64 `gorm:"column:performance_percent" json:"performance_percent"` // -N.00 .. +N.00 based on expected income
NBlocksBaked int64 `gorm:"column:n_blocks_baked" json:"n_blocks_baked"`
NBlocksLost int64 `gorm:"column:n_blocks_lost" json:"n_blocks_lost"`
NBlocksStolen int64 `gorm:"column:n_blocks_stolen" json:"n_blocks_stolen"`
NSlotsEndorsed int64 `gorm:"column:n_slots_endorsed" json:"n_slots_endorsed"`
NSlotsMissed int64 `gorm:"column:n_slots_missed" json:"n_slots_missed"`
NSeedsRevealed int64 `gorm:"column:n_seeds_revealed" json:"n_seeds_revealed"`
ExpectedIncome int64 `gorm:"column:expected_income" json:"expected_income"`
ExpectedBonds int64 `gorm:"column:expected_bonds" json:"expected_bonds"`
TotalIncome int64 `gorm:"column:total_income" json:"total_income"`
TotalBonds int64 `gorm:"column:total_bonds" json:"total_bonds"`
BakingIncome int64 `gorm:"column:baking_income" json:"baking_income"`
EndorsingIncome int64 `gorm:"column:endorsing_income" json:"endorsing_income"`
DoubleBakingIncome int64 `gorm:"column:double_baking_income" json:"double_baking_income"`
DoubleEndorsingIncome int64 `gorm:"column:double_endorsing_income" json:"double_endorsing_income"`
SeedIncome int64 `gorm:"column:seed_income" json:"seed_income"`
FeesIncome int64 `gorm:"column:fees_income" json:"fees_income"`
MissedBakingIncome int64 `gorm:"column:missed_baking_income" json:"missed_baking_income"` // from lost blocks
MissedEndorsingIncome int64 `gorm:"column:missed_endorsing_income" json:"missed_endorsing_income"` // from missed endorsements
StolenBakingIncome int64 `gorm:"column:stolen_baking_income" json:"stolen_baking_income"` // from others
TotalLost int64 `gorm:"column:total_lost" json:"total_lost"` // from all denounciations and missed seed nonce revelations
LostAccusationFees int64 `gorm:"column:lost_accusation_fees" json:"lost_accusation_fees"` // from denounciations
LostAccusationRewards int64 `gorm:"column:lost_accusation_rewards" json:"lost_accusation_rewards"` // from denounciations
LostAccusationDeposits int64 `gorm:"column:lost_accusation_deposits" json:"lost_accusation_deposits"` // from denounciations
LostRevelationFees int64 `gorm:"column:lost_revelation_fees" json:"lost_revelation_fees"` // from missed seed nonce revelations
LostRevelationRewards int64 `gorm:"column:lost_revelation_rewards" json:"lost_revelation_rewards"` // from missed seed nonce revelations
}
func NewIncome() *Income {
return allocIncome()
}
func allocIncome() *Income {
return incomePool.Get().(*Income)
}
func (s *Income) Free() {
s.Reset()
incomePool.Put(s)
}
func (s Income) ID() uint64 {
return uint64(s.RowId)
}
func (s *Income) SetID(id uint64) {
s.RowId = id
}
func (s *Income) Reset() {
s.RowId = 0
s.Cycle = 0
s.AccountId = 0
s.Rolls = 0
s.Balance = 0
s.Delegated = 0
s.NDelegations = 0
s.NBakingRights = 0
s.NEndorsingRights = 0
s.Luck = 0
s.LuckPct = 0
s.ContributionPct = 0
s.PerformancePct = 0
s.NBlocksBaked = 0
s.NBlocksLost = 0
s.NBlocksStolen = 0
s.NSlotsEndorsed = 0
s.NSlotsMissed = 0
s.NSeedsRevealed = 0
s.ExpectedIncome = 0
s.ExpectedBonds = 0
s.TotalIncome = 0
s.TotalBonds = 0
s.BakingIncome = 0
s.EndorsingIncome = 0
s.DoubleBakingIncome = 0
s.DoubleEndorsingIncome = 0
s.SeedIncome = 0
s.FeesIncome = 0
s.MissedBakingIncome = 0
s.MissedEndorsingIncome = 0
s.StolenBakingIncome = 0
s.TotalLost = 0
s.LostAccusationFees = 0
s.LostAccusationRewards = 0
s.LostAccusationDeposits = 0
s.LostRevelationFees = 0
s.LostRevelationRewards = 0
}
func (s *Income) UpdateLuck(totalRolls int64, p *chain.Params) {
// fraction of all rolls
rollsShare := float64(s.Rolls) / float64(totalRolls)
// full blocks, truncated
fairBakingShare := int64(math.Round(rollsShare * float64(p.BlocksPerCycle)))
// full endorsements, truncated
fairEndorsingShare := int64(math.Round(rollsShare * float64(p.BlocksPerCycle) * float64(p.EndorsersPerBlock)))
// fair income as a multiple of blocks and endorsements
fairIncome := fairBakingShare * p.BlockReward
fairIncome += fairEndorsingShare * p.EndorsementReward
// diff between expected and fair (positive when higher, negative when lower)
s.Luck = s.ExpectedIncome - fairIncome
// absolute luck as expected vs fair income where 100% is the ideal case
// =100%: fair == expected (luck == 0)
// <100%: fair > expected (luck < 0)
// >100%: fair < expected (luck > 0)
if fairIncome > 0 {
s.LuckPct = 10000 + s.Luck*10000/fairIncome
}
}
|
/**
* day 05 2020
* https://adventofcode.com/2020/day/5
*
* compile: go build main.go
* run: ./main < input
* compile & run: go run main.go < input
**/
package main
import (
"bufio"
"fmt"
"os"
"sort"
)
// get upper 7 bits - the 'F' and 'B' part
func upper7Bits(b int) int {
return (b >> 3) & 0x7F
}
// get lower 3 bits - the 'L' and 'R' part
func lower3bits(b int) int {
return b & 0x7
}
// convert the seating chart string to an int
func toBin(s string) int {
var n int
for _, c := range s {
if c == 'B' || c == 'R' {
n = n << 1 | 1
} else {
n = n << 1
}
}
return n
}
func findMyTicket(tkts []int) int {
sort.Ints(tkts[:])
// start 5 tickets in
for i := 5; i < len(tkts); i++ {
// the ticket we want is the missing one
// so if tkts[i] is 713 and there is no 714
// then 714 is our ticket
if tkts[i + 1] != tkts[i] + 1 {
return tkts[i] + 1
}
}
return 0
}
func main() {
largestSeat := 0
var tkts []int // save all tkt numbers to use for part 2
scan := bufio.NewScanner(os.Stdin)
for scan.Scan() {
buf := scan.Text()
seat := toBin(buf)
row := upper7Bits(seat)
col := lower3bits(seat)
// part 1 - get largest seat number
seat = row*8 + col
if seat > largestSeat {
largestSeat = seat
}
tkts = append(tkts, seat)
}
// part 2
mySeat := findMyTicket(tkts)
fmt.Println("part 1:", largestSeat)
fmt.Println("part 2:", mySeat)
}
|
package main
import (
"github.com/gin-gonic/gin"
"github.com/gin-gonic/contrib/static"
"fmt"
// "database/sql"
_ "github.com/lib/pq"
)
const (
host = "localhost"
port = 5432
user = "user"
password= "mysecretpassword"
dbname = "test"
)
func main() {
/*
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable",host,port,user,password,dbname)
db, err := sql.Open("postgres",psqlInfo)
if err != nil {
panic(err)
}
defer db.Close()
err = db.Ping()
if err != nil{
panic(err)
}
fmt.Println("Successfully connected!")
*/
//sqlStatement := `INSERT INTO news (title,author,content) values ($1,$2,$3) returning id`
//sqlStatement1 := `insert into "news" (title,author,content) values ("2","3","4");`
//id := 0
//s2 := `select * from news`
//fmt.Println(db.Query(s2))
/*
err = db.QueryRow(sqlStatement1).Scan(&id)
if err != nil {
panic(err)
}
fmt.Println("New record ID is:", id)
*/
/*
id := 0
err = db.QueryRow(sqlStatement, "first title","yale","this is an article content").Scan(&id)
if err != nil {
panic(err)
}
fmt.Println("New record ID is:", id)
*/
r := gin.Default()
r.Use(static.Serve("/", static.LocalFile("./dist",true)))
/*
r.GET("/web",func(c *gin.Context){
fmt.Printf("in /web918")
c.HTML(http.StatusOK,"index.html",gin.H{
"title":"Main website",
})
})
*/
r.GET("/ping", func(c *gin.Context) {
fmt.Printf("http connect")
c.JSON(200, gin.H{
"message": "pong",
})
})
r.Run(":5000")
}
|
package mqtt
import (
"context"
"io/ioutil"
"time"
mqtt "github.com/eclipse/paho.mqtt.golang"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/batchcorp/collector-schemas/build/go/protos/events"
"github.com/batchcorp/plumber-schemas/build/go/protos/args"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/tools/mqttfakes"
"github.com/batchcorp/plumber/tunnel/tunnelfakes"
"github.com/batchcorp/plumber/validate"
)
var _ = Describe("MQTT Backend", func() {
var tunnelOpts *opts.TunnelOptions
BeforeEach(func() {
tunnelOpts = &opts.TunnelOptions{
Mqtt: &opts.TunnelGroupMQTTOptions{
Args: &args.MQTTWriteArgs{
Topic: "test",
WriteTimeoutSeconds: 1,
},
},
}
})
Context("validateTunnelOptions", func() {
It("validates nil tunnel options", func() {
err := validateTunnelOptions(nil)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyTunnelOpts))
})
It("validates nil backend group", func() {
tunnelOpts.Mqtt = nil
err := validateTunnelOptions(tunnelOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendGroup))
})
It("validates empty backend args", func() {
tunnelOpts.Mqtt.Args = nil
err := validateTunnelOptions(tunnelOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(validate.ErrEmptyBackendArgs))
})
It("validates empty topic", func() {
tunnelOpts.Mqtt.Args.Topic = ""
err := validateTunnelOptions(tunnelOpts)
Expect(err).To(HaveOccurred())
Expect(err).To(Equal(ErrEmptyTopic))
})
It("passes validation", func() {
err := validateTunnelOptions(tunnelOpts)
Expect(err).ToNot(HaveOccurred())
})
})
Context("Tunnel", func() {
var fakeTunnel *tunnelfakes.FakeITunnel
BeforeEach(func() {
fakeTunnel = &tunnelfakes.FakeITunnel{}
fakeTunnel.ReadStub = func() chan *events.Outbound {
ch := make(chan *events.Outbound, 1)
ch <- &events.Outbound{Blob: []byte(`testing`)}
return ch
}
})
It("validates tunnel options", func() {
errorCh := make(chan *records.ErrorRecord)
err := (&MQTT{}).Tunnel(context.Background(), nil, nil, errorCh)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring(validate.ErrEmptyTunnelOpts.Error()))
})
It("returns an error on publish timeout", func() {
fakeMQTT := &mqttfakes.FakeClient{}
fakeMQTT.PublishStub = func(string, byte, bool, interface{}) mqtt.Token {
return &mqttfakes.FakeToken{
WaitTimeoutStub: func(_ time.Duration) bool {
return false
},
}
}
m := &MQTT{
client: fakeMQTT,
connArgs: &args.MQTTConn{},
log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}),
}
errorCh := make(chan *records.ErrorRecord)
err := m.Tunnel(context.Background(), tunnelOpts, fakeTunnel, errorCh)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("timed out"))
})
It("returns an error when publish fails", func() {
fakeMQTT := &mqttfakes.FakeClient{}
fakeMQTT.PublishStub = func(string, byte, bool, interface{}) mqtt.Token {
return &mqttfakes.FakeToken{
ErrorStub: func() error {
return errors.New("test error")
},
WaitTimeoutStub: func(_ time.Duration) bool {
return true
},
}
}
m := &MQTT{
client: fakeMQTT,
connArgs: &args.MQTTConn{},
log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}),
}
errorCh := make(chan *records.ErrorRecord)
err := m.Tunnel(context.Background(), tunnelOpts, fakeTunnel, errorCh)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("unable to replay message"))
})
It("replays a message", func() {
fakeMQTT := &mqttfakes.FakeClient{}
fakeMQTT.PublishStub = func(string, byte, bool, interface{}) mqtt.Token {
return &mqttfakes.FakeToken{
ErrorStub: func() error { return nil },
WaitTimeoutStub: func(_ time.Duration) bool { return true },
}
}
m := &MQTT{
client: fakeMQTT,
connArgs: &args.MQTTConn{},
log: logrus.NewEntry(&logrus.Logger{Out: ioutil.Discard}),
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
time.Sleep(time.Second)
cancel()
}()
errorCh := make(chan *records.ErrorRecord)
err := m.Tunnel(ctx, tunnelOpts, fakeTunnel, errorCh)
Expect(err).ToNot(HaveOccurred())
Expect(fakeTunnel.StartCallCount()).To(Equal(1))
Expect(fakeTunnel.ReadCallCount()).To(Equal(1))
Expect(fakeMQTT.PublishCallCount()).To(Equal(1))
})
})
})
|
package tls
import (
"bytes"
"encoding/pem"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/openshift/installer/pkg/asset"
)
// CertBundle contains a multiple certificates in a bundle.
type CertBundle struct {
BundleRaw []byte
FileList []*asset.File
}
// Cert returns the certificate bundle.
func (b *CertBundle) Cert() []byte {
return b.BundleRaw
}
// Generate generates the cert bundle from certs.
func (b *CertBundle) Generate(filename string, certs ...CertInterface) error {
if len(certs) < 1 {
return errors.New("atleast one certificate required for a bundle")
}
buf := bytes.Buffer{}
for _, c := range certs {
cert, err := PemToCertificate(c.Cert())
if err != nil {
logrus.Debugf("Failed to decode bundle certificate: %s", err)
return errors.Wrap(err, "decoding certificate from PEM")
}
if err := pem.Encode(&buf, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
logrus.Debugf("Failed to encode bundle certificates: %s", err)
return errors.Wrap(err, "encoding certificate to PEM")
}
}
b.BundleRaw = buf.Bytes()
b.FileList = []*asset.File{
{
Filename: assetFilePath(filename + ".crt"),
Data: b.BundleRaw,
},
}
return nil
}
// Files returns the files generated by the asset.
func (b *CertBundle) Files() []*asset.File {
return b.FileList
}
// Load is a no-op because TLS assets are not written to disk.
func (b *CertBundle) Load(asset.FileFetcher) (bool, error) {
return false, nil
}
|
package domain
import "errors"
var (
ErrVideoNotFound = errors.New("video not found")
ErrFailedDeleteLike = errors.New("failed delete like")
ErrFailedAddLike = errors.New("failed add like")
ErrInternal = errors.New("internal error")
ErrAlreadyLike = errors.New("already like")
ErrAlreadyDisLike = errors.New("already dislike")
ErrPlaylistNotFound = errors.New("not found")
ErrPlaylistDuplicate = errors.New("playlist duplicate video in playlist")
ErrUnknownModificationAction = errors.New("unknown playlist modification action")
)
|
package xpost
import (
"errors"
)
var gExchanger *Exchanger
func init() {
if gExchanger == nil {
gExchanger = &Exchanger{
xp: nil,
wires: make(map[string]*wire)}
}
}
// GetExchanger returns a global default exchanger instance
func GetExchanger() *Exchanger {
if gExchanger == nil {
gExchanger = &Exchanger{
xp: nil,
wires: make(map[string]*wire)}
}
return gExchanger
}
// wire represents a communication channel for a courier
// multiple instances of the same courier share one wire
type wire struct {
name string
cap int
pipe chan *Message
}
// Exchanger is a collector of wires
type Exchanger struct {
xp *Xpost
wires map[string]*wire
}
func newWire(n string, c int) *wire {
if c < 0 || len(n) <= 0 {
logFatalf("Invalid wire attributes: name=%s, cap=%d", n, c)
return nil
}
return &wire{
name: n,
cap: c,
pipe: make(chan *Message, c),
}
}
func (e Exchanger) wireExist(n string) bool {
_, ok := e.wires[n]
return ok
}
func (e *Exchanger) setXpost(xp *Xpost) {
e.xp = xp
}
func (e *Exchanger) registerWire(n string, c int) bool {
w := newWire(n, c)
if w == nil {
logFatal("Could not create new wire") // will call os.Exit(1)
return false
}
e.wires[n] = w
return true
}
// Info return s the current stats of the exchanger
func (e Exchanger) Info() {
logInfof("Wires info:")
for _, wire := range e.wires {
logInfof(">>>>>> name: %s", wire.name)
logInfof(">>>>>> capacity: %d", wire.cap)
logInfof(">>>>>> msg-queued: %d", len(wire.pipe))
}
}
type wireDeliverJob struct {
msg *Message
wire *wire
}
func (wdj *wireDeliverJob) Run() {
wdj.wire.pipe <- wdj.msg
}
func (e *Exchanger) deliver(m *Message) error {
if m == nil {
return nil
}
wires := make([]*wire, 0)
for _, dest := range m.dest {
w, ok := e.wires[dest]
if !ok {
logErrorf("Deliver to not exist wire: %s", dest)
return errors.New("Wire not found")
}
wires = append(wires, w)
}
donechs := make([]<-chan struct{}, 0)
for idx, w := range wires {
newMsg := m
if idx > 0 {
newMsg = m.Clone()
}
wdj := &wireDeliverJob{msg: newMsg, wire: w}
donech := e.xp.pool.Dispatch(wdj)
donechs = append(donechs, donech)
}
for _, donech := range donechs {
<-donech
}
return nil
}
func (e *Exchanger) wait(n string) *Message {
w, ok := e.wires[n]
if !ok {
return nil
}
msg := <-w.pipe
return msg
}
func (e *Exchanger) isClean() bool {
for _, w := range e.wires {
if l := len(w.pipe); l > 0 {
logErrorf("Wire %s is not clean: %d", w.name, l)
return false
}
}
return true
}
|
package ipproxy
import (
"sync"
)
// closeable is a helper type for asynchronous processes that follow an orderly
// close sequence.
type closeable struct {
finalizer func() error
closeCh chan struct{}
readyToFinalizeCh chan struct{}
closedCh chan struct{}
closeNowOnce sync.Once
closeOnce sync.Once
}
func (cl *closeable) Close() (err error) {
cl.closeOnce.Do(func() {
close(cl.closeCh)
<-cl.readyToFinalizeCh
if cl.finalizer != nil {
err = cl.finalizer()
}
close(cl.closedCh)
if err != nil {
log.Error(err)
}
})
return
}
func (cl *closeable) closeNow() (err error) {
cl.closeNowOnce.Do(func() {
close(cl.readyToFinalizeCh)
err = cl.Close()
})
return err
}
|
package config
import (
"path/filepath"
"sync"
"github.com/BurntSushi/toml"
)
type mallConf struct {
Title string `toml:"title"`
Server struct {
Port string `toml:"port"`
} `toml:"server"`
Token struct {
Secret string `toml:"secret"`
} `toml:"token"`
Datebases struct {
Alpha struct {
Host string `toml:"host"`
ConnectionMax int `toml:"connection_max"`
User string `toml:"user"`
Password string `toml:"password"`
DbName string `toml:"db_name"`
Ssl string `toml:"ssl"`
Schema string `toml:"schema"`
} `toml:"alpha"`
Beta struct {
Host string `toml:"host"`
ConnectionMax int `toml:"connection_max"`
User string `toml:"user"`
Password string `toml:"password"`
DbName string `toml:"db_name"`
Ssl string `toml:"ssl"`
Schema string `toml:"schema"`
} `toml:"beta"`
Release struct {
Host string `toml:"host"`
ConnectionMax int `toml:"connection_max"`
User string `toml:"user"`
Password string `toml:"password"`
DbName string `toml:"db_name"`
Ssl string `toml:"ssl"`
Schema string `toml:"schema"`
} `toml:"release"`
} `toml:"datebases"`
}
var (
conf *mallConf
once sync.Once
cfgLock sync.RWMutex
)
func New() *mallConf {
once.Do(ParseConfig)
cfgLock.RLock()
defer cfgLock.RUnlock()
return conf
}
// 解析toml文件
func ParseConfig() {
path, err := filepath.Abs("../config/mall.toml")
if err != nil {
panic(err)
}
config := new(mallConf)
cfgLock.Lock()
defer cfgLock.Unlock()
if _, err := toml.DecodeFile(path, config); err != nil {
panic(err)
}
conf = config
return
}
|
package leetcode
/*You need to construct a string consists of parenthesis and integers from a binary tree with the preorder traversing way.
The null node needs to be represented by empty parenthesis pair "()". And you need to omit all the empty parenthesis pairs that don't affect the one-to-one mapping relationship between the string and the original binary tree.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/construct-string-from-binary-tree
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
import "strconv"
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func tree2str(t *TreeNode) string {
if t == nil {
return ""
}
this := strconv.Itoa(t.Val)
if t.Left == nil && t.Right == nil {
return this
} else if t.Left != nil && t.Right != nil {
return this + "(" + tree2str(t.Left) + ")" + "(" + tree2str(t.Right) + ")"
} else if t.Left != nil && t.Right == nil {
return this + "(" + tree2str(t.Left) + ")"
} else {
return this + "()" + "(" + tree2str(t.Right) + ")"
}
}
|
package main
//#include <unistd.h>
import "C"
import (
linuxproc "github.com/c9s/goprocinfo/linux"
"github.com/prometheus/client_golang/prometheus"
"strconv"
"time"
)
type Core struct {
ID int
IdleTime uint64
}
type StatReader func(string) (*linuxproc.Stat, error)
var (
sysCpuGaugeVec = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "core_idle_time",
Help: "Total CPU core idle time, since boot",
},
[]string{"core_id"},
)
)
func getCpuCoresIdleTime(ReadStat StatReader) (cores []Core) {
stat, err := ReadStat("/proc/stat")
for index, s := range stat.CPUStats {
cores = append(cores, Core{index, s.Idle})
}
DealWithErr(err)
return
}
func updateCpuIdleTime() {
for {
for _, core := range getCpuCoresIdleTime(linuxproc.ReadStat) {
sysCpuGaugeVec.
WithLabelValues(strconv.Itoa(core.ID)).
Set(float64(core.IdleTime))
}
time.Sleep(10 * time.Millisecond)
}
}
func StartCpuMonitoring() {
prometheus.MustRegister(sysCpuGaugeVec)
go updateCpuIdleTime()
}
|
package main
import "testing"
func TestSum(test *testing.T) {
result := sum(5, 5)
shouldBe := 10
if result != shouldBe {
test.Errorf("Função sum deveria retornar 10, mas retornou %v", result);
}
}
|
/*
* @lc app=leetcode.cn id=416 lang=golang
*
* [416] 分割等和子集
*/
// @lc code=start
package main
import "fmt"
import "sort"
func canPartition(nums []int) bool {
sum := 0
for _, v := range nums {
sum += v
}
if sum % 2 != 0 {
return false
}
target := sum / 2
sort.Ints(nums)
sum2 := 0
for _, v := range nums {
sum2 += v
if sum2 == target {
return true
}
if sum2 > target {
return false
}
}
return false
}
// @lc code=end
func main(){
a := []int{1, 5, 11, 5}
fmt.Printf("%v, %t\n", a, canPartition(a))
a = []int{1, 2, 3, 5}
fmt.Printf("%v, %t\n", a, canPartition(a))
a = []int{2,2,1,1}
fmt.Printf("%v, %t\n", a, canPartition(a))
}
|
// Copyright 2021 Akamai Technologies, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
dns "github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v2"
"github.com/akamai/edgedns-registrar-coordinator/registrar"
log "github.com/apex/log"
"context"
"github.com/stretchr/testify/assert"
"sync"
"testing"
)
const ()
var (
pluginTestMutex = &sync.Mutex{}
)
func initRegistrarStub(entry *log.Entry) registrar.PluginConfig {
config := registrar.PluginConfig{
PluginLibPath: "./test-plugin/test-plugin-lib",
//PluginName string
PluginConfigPath: "./test-plugin/test-plugin.yaml",
LogEntry: entry,
//Registrar *plugin.Plugin
}
return config
}
func testSetup(testRegistrar *PluginRegistrar) {
testRegistrar.pluginTest = true
testRegistrar.pluginResult.PluginError = nil
testRegistrar.pluginResult.PluginResult = nil
return
}
//
// Test Functions
//
func TestRegistrarGetDomainsDirect(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetDomains",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncOutput": []string{"test1.com", "test2.com"}}
appLog.Debugf("Invoking %s library GetDomains", testRegistrar.pluginConfig.PluginName)
testRegistrar.pluginGetDomains()
assert.Nil(t, testRegistrar.pluginResult.PluginError)
dl, _ := testRegistrar.pluginResult.PluginResult.([]string)
assert.Equal(t, dl, []string{"test1.com", "test2.com"})
}
func TestRegistrarGetDomains(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetDomains",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncOutput": []string{"test1.com", "test2.com"}}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetDomains", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetDomains(ctx)
assert.Nil(t, testRegistrar.pluginResult.PluginError)
dl, _ := testRegistrar.pluginResult.PluginResult.([]string)
assert.Equal(t, dl, []string{"test1.com", "test2.com"})
}
func TestRegistrarGetDomainsFail(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetDomainsFail",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncErrors": "GetDomainsFail"}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetDomains", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetDomains(ctx)
assert.NotNil(t, testRegistrar.pluginResult.PluginError)
assert.Contains(t, testRegistrar.pluginResult.PluginError.Error(), "Fail")
}
func TestRegistrarGetDomain(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetDomain",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncOutput": ®istrar.Domain{Name: "test1.com", Type: "PRIMARY", SignAndServeAlgorithm: "abcdefg"}}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetDomain", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetDomain(ctx, "test1.com")
assert.Nil(t, testRegistrar.pluginResult.PluginError)
dl, _ := testRegistrar.pluginResult.PluginResult.(*registrar.Domain)
assert.Equal(t, dl.Name, "test1.com")
}
func TestRegistrarGetDomainFail(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetDomainFail",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncErrors": "GetDomainFail"}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetDomain", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetDomain(ctx, "test1.com")
assert.NotNil(t, testRegistrar.pluginResult.PluginError)
assert.Contains(t, testRegistrar.pluginResult.PluginError.Error(), "Fail")
}
func TestRegistrarGetTsigKey(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetTsigKey",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncOutput": &dns.TSIGKey{Name: "tsig", Algorithm: "abcd", Secret: "boo"}}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetTsigKey", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetTsigKey(ctx, "test1.com")
assert.Nil(t, testRegistrar.pluginResult.PluginError)
key, _ := testRegistrar.pluginResult.PluginResult.(*dns.TSIGKey)
assert.Equal(t, key.Name, "tsig")
}
func TestRegistrarGetTsigKeyFail(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetTsigKeyFail",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncErrors": "GetTsigKeyFail"}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetTsigKey", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetTsigKey(ctx, "test1.com")
assert.NotNil(t, testRegistrar.pluginResult.PluginError)
assert.Contains(t, testRegistrar.pluginResult.PluginError.Error(), "Fail")
}
func TestRegistrarGetServeAlgorithm(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetServeAlgorithm",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncOutput": "ServeAlgorithm"}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetServeAlgorithm", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetServeAlgorithm(ctx, "test1.com")
assert.Nil(t, testRegistrar.pluginResult.PluginError)
algo, _ := testRegistrar.pluginResult.PluginResult.(string)
assert.Equal(t, algo, "ServeAlgorithm")
}
func TestRegistrarGetServeAlgorithmFail(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetServeAlgorithmFail",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncErrors": "GetServeAlgorithmFail"}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetServeAlgorithm", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetServeAlgorithm(ctx, "test1.com")
assert.NotNil(t, testRegistrar.pluginResult.PluginError)
assert.Contains(t, testRegistrar.pluginResult.PluginError.Error(), "Fail")
}
func TestRegistrarGetMasterIPs(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetMasterIPs",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncOutput": []string{"1.2.3.4", "4.5.6.7.8"}}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetMasterIPs", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetMasterIPs(ctx)
assert.Nil(t, testRegistrar.pluginResult.PluginError)
ml, _ := testRegistrar.pluginResult.PluginResult.([]string)
assert.Equal(t, ml, []string{"1.2.3.4", "4.5.6.7.8"})
}
func TestRegistrarGetMasterIPsFail(t *testing.T) {
pluginTestMutex.Lock()
defer pluginTestMutex.Unlock()
ctx := context.TODO()
logLevel, _ := log.ParseLevel("info")
log.SetLevel(logLevel)
appLog := log.WithFields(log.Fields{
"registrar": "Test Plugin",
"subcommand": "GetMasterIPsFail",
})
ctx = context.WithValue(ctx, "appLog", appLog)
config := initRegistrarStub(appLog)
testRegistrar, err := NewPluginRegistrar(ctx, config)
assert.Nil(t, err)
// Test plugin will take are and place in Result
testRegistrar.pluginArgs.PluginArg = map[string]interface{}{"FuncErrors": "GetMasterIPsFail"}
testSetup(testRegistrar)
appLog.Debugf("Invoking %s library GetMasterIPs", testRegistrar.pluginConfig.PluginName)
testRegistrar.GetMasterIPs(ctx)
assert.NotNil(t, testRegistrar.pluginResult.PluginError)
assert.Contains(t, testRegistrar.pluginResult.PluginError.Error(), "Fail")
}
|
package reporter
import (
"fmt"
"os"
)
// Reporter interface is responsible for reporting responses in the event of an
// errors. We make it an interface so its easier to inject a mock for unit
// testing.
type Reporter interface {
ReportIfError(error, string, ...interface{})
}
// FmtReporter is a production implementation of the Reporter interface
// which writes errors to stdout.
type FmtReporter struct {
}
// ReportIfError outputs the string if the passed error is not nil.
func (f *FmtReporter) ReportIfError(err error, format string, a ...interface{}) {
if err != nil {
fmt.Printf(fmt.Sprintf("%s\n", format), a...)
fmt.Printf("Error: %v\n", err)
// @TODO(mattjmcnaughton) Make exit code more accurate.
os.Exit(1)
}
}
|
package main
import (
"testing"
"time"
)
func TestBanTimes(t *testing.T) {
timeinfuture := time.Date(time.Now().Year()+1, time.September, 10, 23, 0, 0, 0, time.UTC)
timeinpast := time.Date(time.Now().Year()-1, time.September, 10, 23, 0, 0, 0, time.UTC)
uid := Userid(1)
ip := "10.1.2.3"
bans.users[uid] = timeinfuture
if !bans.isUseridBanned(uid) {
t.Error("user should be banned because the expiretime is in the future")
}
bans.users[uid] = timeinpast
if bans.isUseridBanned(uid) {
t.Error("user should NOT be banned because the expiretime is in the past")
}
bans.ips[ip] = timeinfuture
if !bans.isIPBanned(ip) {
t.Error("ip should be banned because the expiretime is in the future")
}
bans.ips[ip] = timeinpast
if bans.isIPBanned(ip) {
t.Error("ip should NOT be banned because the expiretime is in the past")
}
bans.clean()
if len(bans.users) > 0 {
t.Error("bans.clean did not clean the users")
}
if len(bans.ips) > 0 {
t.Error("bans.clean did not clean the ips")
}
}
|
package domain
import "github.com/google/uuid"
type uuidPlayerIdGenerator struct {
}
func NewUUIDPlayerIdGenerator() PlayerIdGenerator {
return uuidPlayerIdGenerator{}
}
func (g uuidPlayerIdGenerator) Generate() PlayerId {
return uuid.New()
}
|
/*
Copyright (C) 2018 Black Duck Software, Inc.
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
*/
package skyfire
import (
"encoding/json"
"fmt"
"net/http"
"os"
"time"
"github.com/blackducksoftware/perceptor-skyfire/pkg/hub"
"github.com/blackducksoftware/perceptor-skyfire/pkg/kube"
"github.com/blackducksoftware/perceptor-skyfire/pkg/perceptor"
"github.com/blackducksoftware/perceptor-skyfire/pkg/report"
"github.com/juju/errors"
log "github.com/sirupsen/logrus"
)
// Skyfire .....
type Skyfire struct {
Scraper *Scraper
LastPerceptorDump *perceptor.Dump
LastHubDumps map[string]*hub.Dump
LastKubeDump *kube.Dump
LastReport *report.Report
stop <-chan struct{}
}
// NewSkyfire .....
func NewSkyfire(config *Config, stop <-chan struct{}) (*Skyfire, error) {
kubeDumper, err := kube.NewKubeClient(config.KubeClientConfig())
if err != nil {
return nil, errors.Trace(err)
}
perceptorDumper := perceptor.NewClient(config.PerceptorHost, config.PerceptorPort)
hubPassword, ok := os.LookupEnv(config.HubUserPasswordEnvVar)
if !ok {
return nil, fmt.Errorf("unable to get Hub password: environment variable %s not set", config.HubUserPasswordEnvVar)
}
createHubClient := func(host string) (hub.ClientInterface, error) {
return hub.NewHubDumper(host, config.HubUser, hubPassword)
}
kubeInterval := time.Duration(config.KubeDumpIntervalSeconds) * time.Second
hubInterval := time.Duration(config.HubDumpPauseSeconds) * time.Second
perceptorInterval := time.Duration(config.PerceptorDumpIntervalSeconds) * time.Second
scraper := NewScraper(kubeDumper, kubeInterval, createHubClient, hubInterval, perceptorDumper, perceptorInterval, stop)
skyfire := &Skyfire{
Scraper: scraper,
LastPerceptorDump: nil,
LastHubDumps: map[string]*hub.Dump{},
LastKubeDump: nil,
LastReport: nil,
stop: stop}
go skyfire.HandleScrapes()
http.HandleFunc("/latestreport", skyfire.LatestReportHandler())
return skyfire, nil
}
// SetHubs ...
func (sf *Skyfire) SetHubs(hosts []string) {
sf.Scraper.SetHubs(hosts)
}
// LatestReportHandler .....
func (sf *Skyfire) LatestReportHandler() func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
log.Infof("received latest report request")
bytes, err := json.MarshalIndent(sf.LastReport, "", " ")
if err != nil {
recordError("unable to marshal report")
http.Error(w, err.Error(), 500)
return
}
recordEvent("latest report handler")
fmt.Fprint(w, string(bytes))
}
}
// HandleScrapes .....
func (sf *Skyfire) HandleScrapes() {
for {
select {
case <-sf.stop:
return
case dump := <-sf.Scraper.HubDumps:
sf.LastHubDumps[dump.host] = dump.dump
case k := <-sf.Scraper.KubeDumps:
sf.LastKubeDump = k
case p := <-sf.Scraper.PerceptorDumps:
sf.LastPerceptorDump = p
}
sf.BuildReport()
}
}
// BuildReport .....
func (sf *Skyfire) BuildReport() {
if sf.LastPerceptorDump == nil {
recordError("unable to generate report: perceptor dump is nil")
log.Warnf("unable to generate report: perceptor dump is nil")
return
}
if sf.LastKubeDump == nil {
recordError("unable to generate report: kube dump is nil")
log.Warnf("unable to generate report: kube dump is nil")
return
}
dump := report.NewDump(sf.LastKubeDump, sf.LastPerceptorDump, sf.LastHubDumps)
sf.LastReport = report.NewReport(dump)
IssueReportMetrics(sf.LastReport)
recordEvent("built report")
log.Infof("successfully built report")
}
// IssueReportMetrics .....
func IssueReportMetrics(report *report.Report) {
IssueHubReportMetrics(report.Hubs)
IssueKubeReportMetrics(report.Kube)
IssuePerceptorHubMetrics(report.PerceptorHub)
IssueKubePerceptorReportMetrics(report.KubePerceptor)
}
// IssueHubReportMetrics .....
func IssueHubReportMetrics(reports map[string]*report.HubReport) {
for host, report := range reports {
recordReportProblem(fmt.Sprintf("%s-hub_projects_multiple_versions", host), len(report.ProjectsMultipleVersions))
recordReportProblem(fmt.Sprintf("%s-hub_versions_multiple_code_locations", host), len(report.VersionsMultipleCodeLocations))
recordReportProblem(fmt.Sprintf("%s-hub_code_locations_multiple_scan_summaries", host), len(report.CodeLocationsMultipleScanSummaries))
}
}
// IssueKubeReportMetrics .....
func IssueKubeReportMetrics(report *report.KubeReport) {
recordReportProblem("kube_unparseable_images", len(report.UnparseableImages))
recordReportProblem("kube_partially_annotated_pods", len(report.PartiallyAnnotatedPods))
recordReportProblem("kube_partially_labeled_pods", len(report.PartiallyLabeledPods))
recordReportProblem("kube_unanalyzeable_pods", len(report.UnanalyzeablePods))
}
// IssueKubePerceptorReportMetrics .....
func IssueKubePerceptorReportMetrics(report *report.KubePerceptorReport) {
recordReportProblem("kube-perceptor_images_just_in_kube", len(report.JustKubeImages))
recordReportProblem("kube-perceptor_pods_just_in_kube", len(report.JustKubePods))
recordReportProblem("kube-perceptor_images_just_in_perceptor", len(report.JustPerceptorImages))
recordReportProblem("kube-perceptor_pods_just_in_perceptor", len(report.JustPerceptorPods))
recordReportProblem("kube-perceptor_incorrect_pod_annotations", len(report.ConflictingAnnotationsPods))
recordReportProblem("kube-perceptor_incorrect_pod_labels", len(report.ConflictingLabelsPods))
recordReportProblem("kube-perceptor_finished_pods_just_kube", len(report.FinishedJustKubePods))
recordReportProblem("kube-perceptor_finished_pods_just_perceptor", len(report.FinishedJustPerceptorPods))
}
// IssuePerceptorHubMetrics .....
func IssuePerceptorHubMetrics(report *report.PerceptorHubReport) {
recordReportProblem("perceptor-hub_images_just_in_hub", len(report.JustHubImages))
recordReportProblem("perceptor-hub_images_just_in_perceptor", len(report.JustPerceptorImages))
}
|
package twine
import (
"bytes"
"testing"
)
var tests = []struct {
key []byte
plain []byte
cipher []byte
}{
// http://jpn.nec.com/rd/crl/code/research/image/twine_SAC_full_v4.pdf
{
[]byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef},
[]byte{0x7c, 0x1f, 0x0f, 0x80, 0xb1, 0xdf, 0x9c, 0x28},
},
{
[]byte{0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF},
[]byte{0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef},
[]byte{0x97, 0x9F, 0xF9, 0xB3, 0x79, 0xB5, 0xA9, 0xB8},
},
}
func TestTWINE(t *testing.T) {
for _, tst := range tests {
c, _ := New(tst.key)
var ct [8]byte
c.Encrypt(ct[:], tst.plain[:])
if !bytes.Equal(ct[:], tst.cipher) {
t.Errorf("encrypt failed:\ngot : % 02x\nwant: % 02x", ct[:], tst.cipher)
}
var p [8]byte
c.Decrypt(p[:], ct[:])
if !bytes.Equal(p[:], tst.plain) {
t.Errorf("decrypt failed:\ngot : % 02x\nwant: % 02x", p[:], tst.plain)
}
}
}
|
// Copyright © 2010-12 Qtrac Ltd.
//
// This program or package and any associated files are licensed under the
// Apache License, Version 2.0 (the "License"); you may not use these files
// except in compliance with the License. You can get a copy of the License
// at: http://www.apache.org/licenses/LICENSE-2.0.
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stack
import "errors"
type Stack []interface{} // 声明一个接口类型的变量
/**
内置的数据集合类型(映射和切片)、通信通道、和字符串都可以使用 len() 函数获取长度
切片和通道通过 cap() 获取容量
通常所有自定义的集合类型(包括我们自己实现的以及 Go 语言标准库中的自定义数据集合类型) 都应该实现 Len() 和 Cap() 方法
*/
/**
函数和方法都用 func 声明
但是定义方法时 (方法所作用的值的类型) 方法名 (参数列表) (返回值列表如果一个值的话可以不适用括号) {}
作为参数传进去的 stack Stack 叫做接收器
指针是指一个保存了另一值的内存地址的变量 使用指针的原因是为了效率 还有一个原因就是可以是一个值可以被修改
指针一般在类型前面加 * 号表示
Go 语言中的通道(channel)、映射(map)、和切片(slice)等数据结构必须通过 make() 函数创建,
而使用 make() 函数返回的是该类型的一个引用,因此大部分情况下无需使用 * ,但是如果在一个函数或方法内部使用 append()
修改一个切片(不同于仅仅修改一个元素内容)则必须使用该切片的引用如 Push() 方法
Go 语言中使用 nil 表示空指针以及空引用 用于条件判断或者赋值
Go 语言会保证当一个值被创建时,他会初始化成相应的空值 eg: 数字默认 0 、字符串默认 '' 这样做减少了出错的麻烦
*/
func (stack *Stack) Pop() (interface{}, error) {
theStack := *stack // 这里为什么不直接使用指针而是使用变量?
//因为开销小 *stack 指向的是切片的引用,而 theStack 是切片值
if len(theStack) == 0 {
return nil, errors.New("can't Pop() an empty stack")
}
x := theStack[len(theStack)-1] // 取出栈顶元素
*stack = theStack[:len(theStack)-1] // 利用切片找到0:len(theStack)-1 的新切片这时该切片容量不变长度-1
// 并不是真的将所有数据都拷贝到另一新的切片
// 切片通过 [] 操作符和一个索引范围形成新的切片 eg: [first:end] first和end可以省略
// 可以通过 len(切片)-1 获取最后一个
return x, nil
}
func (stack *Stack) Push(x interface{}) { //使用引用方式修改值
*stack = append(*stack, x) //这里边因为传的是指针该指针已经被修改过了所以无需返回 error
}
func (stack Stack) Top() (interface{}, error) { // 返回栈顶元素 error 是一个接口类型
if len(stack) == 0 {
// 使用 error 接口返回错误
return nil, errors.New("can't Top() an empty stack")
}
return stack[len(stack)-1], nil
}
func (stack Stack) Cap() int {
return cap(stack)
}
func (stack Stack) Len() int {
return len(stack)
}
func (stack Stack) IsEmpty() bool {
return len(stack) == 0
}
|
func isPalindrome(x int) bool {
if x < 0 {
return false
}
if x == 0 || x == x % 10{
return true
}
if x/10 == 0 {
return false
}
x_ary := []int{}
rightDigit := 0
for x > 0 {
rightDigit = x % 10
x = x / 10
x_ary = append(x_ary, rightDigit)
}
for i := 0; i <= len(x_ary) / 2; i++ {
if x_ary[i] != x_ary[len(x_ary) - 1 - i] {
return false
}
}
return true
}
|
package handler
import (
"testing"
"github.com/gin-gonic/gin"
)
func TestNewHandler(t *testing.T) {
h := func(c *gin.Context) (resp Response, err *ErrResponse) {
return nil, nil
}
NewHandler(h)
h = func(c *gin.Context) (Response, *ErrResponse) {
return nil, &ErrResponse{}
}
NewHandler(h)
}
|
package utils
import (
"fmt"
"unsafe"
)
//打印浮点数的特定表现格式
func Float64Bits(f float64, d int) {
b := *(*uint64)(unsafe.Pointer(&f))
switch d {
case 16:
fmt.Printf("浮点数%.1f的16进制表示是%016x\n", f, b)
case 2:
fmt.Printf("浮点数%.1f的2进制表示是%02b\n", f, b)
default:
fmt.Println("error decimal: ", d)
}
}
|
package location
import (
"context"
"fmt"
"log"
"os"
"strings"
"time"
"github/thisissoon/Go-Cloud-Functions-Examples/functions/events/location/updateLocation/postcodes"
"github/thisissoon/Go-Cloud-Functions-Examples/functions/events/location/updateLocation/storage"
"cloud.google.com/go/firestore"
firebase "firebase.google.com/go"
"google.golang.org/genproto/googleapis/type/latlng"
)
// FirestoreEvent is the payload of a Firestore event.
type FirestoreEvent struct {
OldValue FirestoreValue `json:"oldValue"`
Value FirestoreValue `json:"value"`
UpdateMask struct {
FieldPaths []string `json:"fieldPaths"`
} `json:"updateMask"`
}
// FirestoreValue holds Firestore fields.
type FirestoreValue struct {
CreateTime time.Time `json:"createTime"`
// Fields is the data for this value. The type depends on the format of your
// database. Log an interface{} value and inspect the result to see a JSON
// representation of your database fields.
Fields *Fields `json:"fields"`
Name string `json:"name"`
UpdateTime time.Time `json:"updateTime"`
}
// Fields represents values from Firestore. The type definition depends on the format of your database.
type Fields struct {
CreatedAt struct {
TimestampValue time.Time `json:"timestampValue"`
} `json:"createdAt"`
Dob struct {
TimestampValue time.Time `json:"timestampValue"`
} `json:"dob"`
Firstname struct {
StringValue string `json:"stringValue"`
} `json:"firstname"`
Lastname struct {
StringValue string `json:"stringValue"`
} `json:"lastname"`
Postcode struct {
StringValue string `json:"stringValue"`
} `json:"postcode"`
Location struct {
GeopointValue latlng.LatLng `json:"geopointValue"`
} `json:"location"`
}
// GCLOUD_PROJECT is automatically set by the Cloud Functions runtime.
var projectID = os.Getenv("GCLOUD_PROJECT")
// client is a Firestore client, reused between function invocations.
var client *firestore.Client
func init() {
// Use the application default credentials.
conf := &firebase.Config{ProjectID: projectID}
// Use context.Background() because the app/client should persist across
// invocations.
ctx := context.Background()
app, err := firebase.NewApp(ctx, conf)
if err != nil {
log.Fatalf("firebase.NewApp: %v", err)
}
client, err = app.Firestore(ctx)
if err != nil {
log.Fatalf("app.Firestore: %v", err)
}
}
func runUpdateLocation(
ctx context.Context,
e FirestoreEvent,
l postcodes.Location,
s storage.Storage,
) error {
// Check if there's any data to process
// this works because fields is a pointer
if e.Value.Fields == nil {
return nil
}
// if old values don't exist initialize to empty values
if e.OldValue.Fields == nil {
e.OldValue.Fields = &Fields{}
}
fullPath := strings.Split(e.Value.Name, "/documents/")[1]
pathParts := strings.Split(fullPath, "/")
collection := pathParts[0]
doc := strings.Join(pathParts[1:], "/")
// check if postcode has changed
oldPostcode := e.OldValue.Fields.Postcode.StringValue
newPostcode := e.Value.Fields.Postcode.StringValue
if oldPostcode != newPostcode {
// get location data for postcode
newLocation, err := l.GetLatLong(newPostcode)
if err != nil {
return err
}
fmt.Printf("for postcode: %v got lat long: %+v", newPostcode, newLocation)
// update database with new location could use Set but that would overwrite everything
update := firestore.Update{
Path: "location",
Value: newLocation,
}
err = s.UpdateDoc(ctx, collection, doc, update)
if err != nil {
return fmt.Errorf("Update error: %v", err)
}
}
return nil
}
func UpdateLocation(ctx context.Context, e FirestoreEvent) error {
p := postcodes.Postcodes{}
s := storage.Store{
Client: client,
}
return runUpdateLocation(ctx, e, p, s)
}
|
/*
MIT License
Copyright (c) 2020 Operator Foundation
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package modes
import (
"errors"
"fmt"
"io"
"net"
"net/url"
"os"
locketgo "github.com/OperatorFoundation/locket-go"
commonLog "github.com/OperatorFoundation/shapeshifter-dispatcher/common/log"
"github.com/OperatorFoundation/shapeshifter-dispatcher/common/pt_extras"
"github.com/kataras/golog"
)
func ClientSetupTCP(socksAddr string, ptClientProxy *url.URL, names []string, options string, clientHandler ClientHandlerTCP, enableLocket bool, stateDir string) (launched bool) {
// Launch each of the client listeners.
for _, name := range names {
ln, err := net.Listen("tcp", socksAddr)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to listen %s %s", name, err.Error())
golog.Errorf("failed to listen %s %s", name, err.Error())
continue
}
go clientAcceptLoop(name, options, ln, ptClientProxy, clientHandler, enableLocket, stateDir)
golog.Infof("%s - registered listener: %s", name, ln.Addr())
launched = true
}
return
}
func clientAcceptLoop(name string, options string, ln net.Listener, proxyURI *url.URL, clientHandler ClientHandlerTCP, enableLocket bool, stateDir string) {
for {
conn, err := ln.Accept()
if err != nil {
if e, ok := err.(net.Error); ok && !e.Temporary() {
fmt.Fprintf(os.Stderr, "Fatal listener error: %s", err.Error())
golog.Errorf("Fatal listener error: %s", err.Error())
return
}
golog.Warnf("Failed to accept connection: %s", err.Error())
continue
}
if enableLocket {
locketConn, err := locketgo.NewLocketConn(conn, stateDir, "DispatcherClient")
if err != nil {
golog.Error("client failed to enable Locket")
conn.Close()
return
}
conn = locketConn
}
go clientHandler(name, options, conn, proxyURI, enableLocket, stateDir)
}
}
func ServerSetupTCP(ptServerInfo pt_extras.ServerInfo, stateDir string, options string, serverHandler ServerHandler, enableLocket bool) (launched bool) {
// Launch each of the server listeners.
for _, bindaddr := range ptServerInfo.Bindaddrs {
name := bindaddr.MethodName
// Deal with arguments.
listen, parseError := pt_extras.ArgsToListener(name, stateDir, options, enableLocket, stateDir)
if parseError != nil {
return false
}
go func() {
for {
transportLn, LnError := listen()
if LnError != nil {
print(LnError)
break
}
print(name)
print(" listening on ")
println(bindaddr.Addr.String())
golog.Infof("%s - registered listener: %s", name, commonLog.ElideAddr(bindaddr.Addr.String()))
ServerAcceptLoop(name, transportLn, &ptServerInfo, serverHandler, enableLocket, stateDir)
transportLnErr := transportLn.Close()
if transportLnErr != nil {
fmt.Fprintf(os.Stderr, "Listener close error: %s", transportLnErr.Error())
golog.Errorf("Listener close error: %s", transportLnErr.Error())
}
}
}()
launched = true
}
return
}
func CopyLoop(client net.Conn, server net.Conn) error {
if server == nil {
println("--> Copy loop has a nil server connection.")
fmt.Fprintln(os.Stderr, "--> Copy loop has a nil server connection (b).")
return errors.New("copy loop has a nil connection (b)")
}
if client == nil {
println("--> Copy loop has a nil client connection.")
fmt.Fprintln(os.Stderr, "--> Copy loop has a nil connection (a).")
return errors.New("copy loop has a nil connection (a)")
}
// Note: b is always the pt connection. a is the SOCKS/ORPort connection.
okToCloseClientChannel := make(chan bool)
okToCloseServerChannel := make(chan bool)
copyErrorChannel := make(chan error)
go CopyClientToServer(client, server, okToCloseClientChannel, copyErrorChannel)
go CopyServerToClient(client, server, okToCloseServerChannel, copyErrorChannel)
serverRunning := true
clientRunning := true
var copyError error
for clientRunning || serverRunning {
select {
case <-okToCloseClientChannel:
clientRunning = false
case <-okToCloseServerChannel:
serverRunning = false
case copyError = <-copyErrorChannel:
golog.Errorf("Error while copying", copyError)
}
}
client.Close()
server.Close()
return copyError
}
func CopyClientToServer(client net.Conn, server net.Conn, okToCloseClient chan bool, errorChannel chan error) {
_, copyError := io.Copy(server, client)
okToCloseClient <- true
if copyError != nil {
println("!! CopyClientToServer received an error from io.Copy: ")
println(copyError)
errorChannel <- copyError
}
}
func CopyServerToClient(client net.Conn, server net.Conn, okToCloseServer chan bool, errorChannel chan error) {
_, copyError := io.Copy(client, server)
okToCloseServer <- true
if copyError != nil {
fmt.Printf("\n!! CopyServerToClient received an error: %s", commonLog.ElideError(copyError))
errorChannel <- copyError
}
}
|
package client
import (
"context"
"fmt"
"io"
"github.com/qnib/metahub/pkg/registry"
"github.com/docker/distribution"
"github.com/docker/distribution/reference"
registryClient "github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest"
manifestListSchema "github.com/docker/distribution/manifest/manifestlist"
manifestSchema "github.com/docker/distribution/manifest/schema2"
)
// https://docs.docker.com/registry/spec/manifest-v2-2/#image-manifest-field-descriptions
func init() {
_ = manifestListSchema.SchemaVersion
_ = manifestSchema.SchemaVersion
}
type service struct {
serverBase string
}
// NewService returns a new HTTP client registry service
func NewService() registry.Service {
return &service{
serverBase: "https://registry-1.docker.io",
}
}
func (s *service) newRepositoryClient(repositoryString string) (distribution.Repository, error) {
//TODO: add "library" segment?
repositoryName, err := reference.WithName(repositoryString)
if err != nil {
return nil, fmt.Errorf("error parsing repository name: %v", err)
}
// get backend blob service
transportAuth := backendAuthTransport(serverBase, repositoryString)
repositoryClient, err := registryClient.NewRepository(repositoryName, s.serverBase, transportAuth)
if err != nil {
return nil, fmt.Errorf("error creating repository object: %v", err)
}
return repositoryClient, nil
}
func (s *service) GetBlob(ctx context.Context, repositoryString string, d digest.Digest) (io.ReadCloser, registry.Blob, error) {
var blob registry.Blob
// get backend repository blobs service
repositoryClient, err := s.newRepositoryClient(repositoryString)
if err != nil {
return nil, blob, fmt.Errorf("error loading blob stats from backend: %v", err)
}
blobService := repositoryClient.Blobs(ctx)
// get blob stats
blobStats, err := blobService.Stat(ctx, d)
if err != nil {
return nil, blob, fmt.Errorf("error loading blob stats from backend: %v", err)
}
blob.Size = blobStats.Size
blob.MediaType = blobStats.MediaType
// open blob content stream
blobContentReader, err := blobService.Open(ctx, d)
if err != nil {
return nil, blob, fmt.Errorf("error getting blob stream from backend: %v", err)
}
return blobContentReader, blob, nil
}
func (s *service) GetManifest(ctx context.Context, repositoryString string, referenceString string) (registry.Manifest, error) {
var m registry.Manifest
// get image reference
var tag distribution.ManifestServiceOption
var d digest.Digest
{
dgst, err := digest.Parse(referenceString)
if err != nil {
tag = distribution.WithTag(referenceString)
} else {
d = dgst
}
}
// get backend manifest service
repositoryClient, err := s.newRepositoryClient(repositoryString)
if err != nil {
return m, fmt.Errorf("error loading blob stats from backend: %v", err)
}
manifestService, err := repositoryClient.Manifests(ctx)
if err != nil {
return m, fmt.Errorf("error creating repository object: %v", err)
}
// call backend manifest
var manifest distribution.Manifest
if tag == nil {
manifest, err = manifestService.Get(ctx, d)
} else {
manifest, err = manifestService.Get(ctx, d, tag)
}
if err != nil {
return m, fmt.Errorf("error getting backend manifest: %v", err)
}
mediaType, payload, err := manifest.Payload()
if err != nil {
return m, fmt.Errorf("error getting manifest payload: %v", err)
}
m = registry.Manifest{
Data: payload,
ContentType: mediaType,
}
return m, nil
}
|
package requests
// This is a common base for most requests
type BaseRequest struct {
Action string `json:"action" mapstructure:"action"`
Wallet string `json:"wallet" mapstructure:"wallet"`
BpowKey *string `json:"bpow_key,omitempty" mapstructure:"bpow_key,omitempty"`
}
|
package schema
// AssetType define an asset type
type AssetType string
// RelationKeyType define a relation type
type RelationKeyType string
// RelationType define a relation type
type RelationType struct {
FromType AssetType `json:"from_type"`
Type RelationKeyType `json:"relation_type"`
ToType AssetType `json:"to_type"`
}
|
package rsa
/*
Reference: https://medium.com/better-programming/build-an-rsa-asymmetric-cryptography-generator-in-go-d202b18bcfd0
Do take sometime to read the blog post from the above link, the below package code combines PEM and writeFile functions into one function.
*/
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"io/ioutil"
)
// generateRSAKeyPair generates RSA key pair which is private and public respectively.
func generateRSAKeyPair(size int) (privKey *rsa.PrivateKey, pubKey *rsa.PublicKey) {
privKey, _ = rsa.GenerateKey(rand.Reader, size)
pubKey = &privKey.PublicKey
return
}
// ExportRSAKeysToFile exports key pairs to files.
func ExportRSAKeysToFile(pubFileName, privFileName string, keySize int) error {
privKey, pubKey := generateRSAKeyPair(keySize)
privKeyBytes := x509.MarshalPKCS1PrivateKey(privKey)
privPEMBytes := pem.EncodeToMemory(
&pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: privKeyBytes,
},
)
wrPrivKeyFileErr := ioutil.WriteFile(privFileName, privPEMBytes, 0600)
if wrPrivKeyFileErr != nil {
return wrPrivKeyFileErr
}
pubKeyBytes, pubKeyMarshalErr := x509.MarshalPKIXPublicKey(pubKey)
if pubKeyMarshalErr != nil {
return pubKeyMarshalErr
}
pubPEMBytes := pem.EncodeToMemory(
&pem.Block{
Type: "RSA PUBLIC KEY",
Bytes: pubKeyBytes,
},
)
wrPubKeyFileErr := ioutil.WriteFile(pubFileName, pubPEMBytes, 0600)
if wrPubKeyFileErr != nil {
return wrPubKeyFileErr
}
return nil
}
|
package testutil
import (
"crypto/md5"
"database/sql"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"cinemo.com/shoping-cart/framework/db"
"cinemo.com/shoping-cart/pkg/projectpath"
"cinemo.com/shoping-cart/pkg/trace"
"github.com/golang-migrate/migrate/v4"
// include migrate file driver
"github.com/golang-migrate/migrate/v4/database/postgres"
// include migrate file driver
_ "github.com/golang-migrate/migrate/v4/source/file"
// import pq driver
_ "github.com/lib/pq"
)
// PrepareDatabase helps create unique schema database
func PrepareDatabase(traceInfo trace.Info) (*sql.DB, string, error) {
schema := "schema_" + fmt.Sprintf("%x", md5.Sum([]byte(traceInfo.FunctionName)))
migrateDbConnPool := db.InitDatabase(os.Getenv("DATABASE_URL"))
defer func() {
migrateDbConnPool.Close()
}()
migrateDbConnPool.Exec("DROP SCHEMA IF EXISTS " + schema + " CASCADE")
_, err := migrateDbConnPool.Exec("CREATE SCHEMA " + schema)
if err != nil {
log.Fatalf("error: %s", err.Error())
}
dbConnPool := db.InitDatabase(os.Getenv("DATABASE_URL") + "&search_path=" + schema)
driver, err := postgres.WithInstance(dbConnPool, &postgres.Config{})
if err != nil {
log.Fatalf("=====error: %s", err.Error())
return nil, schema, err
}
m, err := migrate.NewWithDatabaseInstance(
"file://"+projectpath.Root+"/data/migrations",
"postgres", driver)
if err != nil {
log.Fatalf("=====error: %s", err.Error())
return nil, schema, err
}
m.Up()
return dbConnPool, schema, err
}
// LoadFixture will load and execute SQL queries from fixture file
func LoadFixture(dbConnPool *sql.DB, fixturePath string) error {
if fixturePath != "" {
input, err := ioutil.ReadFile(fixturePath)
if err != nil {
return err
}
queries := strings.Split(string(input), ";")
for _, query := range queries {
_, err = dbConnPool.Exec(query)
if err != nil {
return err
}
}
}
return nil
}
|
package config
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGetRedis(t *testing.T) {
defer func() {
if r := recover(); r != nil {
t.Fatal("Panic error:", r)
}
}()
_ = GetRedis()
assert.True(t, true)
}
|
package main
import (
"fmt"
)
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
type ListNode struct {
Val int
Next *ListNode
}
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
if l1 == nil && l2 == nil {
return nil
}
n3 := new(ListNode)
head := n3 // 保留头节点
next := 0
for (l1 != nil) || (l2 != nil) || (next > 0) {
n3.Next = new(ListNode)
n3 = n3.Next
if l1 != nil {
next += l1.Val
l1 = l1.Next
}
if l2 != nil {
next += l2.Val
l2 = l2.Next
}
n3.Val = next % 10
next = next / 10
}
return head.Next
}
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packet
import (
"errors"
"math"
)
const tcpHeaderLength = 20
// maxPacketLength is the largest length that all headers support.
// IPv4 headers using uint16 for this forces an upper bound of 64KB.
const maxPacketLength = math.MaxUint16
var (
errSmallBuffer = errors.New("buffer too small")
errLargePacket = errors.New("packet too large")
)
// Header is a packet header capable of marshaling itself into a byte buffer.
type Header interface {
// Len returns the length of the header after marshaling.
Len() int
// Marshal serializes the header into buf in wire format.
// It clobbers the header region, which is the first h.Length() bytes of buf.
// It explicitly initializes every byte of the header region,
// so pre-zeroing it on reuse is not required. It does not allocate memory.
// It fails if and only if len(buf) < Length().
Marshal(buf []byte) error
// ToResponse transforms the header into one for a response packet.
// For instance, this swaps the source and destination IPs.
ToResponse()
}
// Generate generates a new packet with the given header and payload.
// Unlike Header.Marshal, this does allocate memory.
func Generate(h Header, payload []byte) []byte {
hlen := h.Len()
buf := make([]byte, hlen+len(payload))
copy(buf[hlen:], payload)
h.Marshal(buf)
return buf
}
|
package collectors
import (
"time"
"github.com/cloudfoundry-community/go-cfclient"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
)
type RoutesCollector struct {
namespace string
environment string
deployment string
cfClient *cfclient.Client
routeInfoMetric *prometheus.GaugeVec
routesScrapesTotalMetric prometheus.Counter
routesScrapeErrorsTotalMetric prometheus.Counter
lastRoutesScrapeErrorMetric prometheus.Gauge
lastRoutesScrapeTimestampMetric prometheus.Gauge
lastRoutesScrapeDurationSecondsMetric prometheus.Gauge
}
func NewRoutesCollector(
namespace string,
environment string,
deployment string,
cfClient *cfclient.Client,
) *RoutesCollector {
routeInfoMetric := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "route",
Name: "info",
Help: "Labeled Cloud Foundry Route information with a constant '1' value.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
[]string{"route_id", "route_host", "route_path", "domain_id", "space_id", "service_instance_id"},
)
routesScrapesTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "routes_scrapes",
Name: "total",
Help: "Total number of scrapes for Cloud Foundry Routes.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
routesScrapeErrorsTotalMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: namespace,
Subsystem: "routes_scrape_errors",
Name: "total",
Help: "Total number of scrape error of Cloud Foundry Routes.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastRoutesScrapeErrorMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_routes_scrape_error",
Help: "Whether the last scrape of Routes metrics from Cloud Foundry resulted in an error (1 for error, 0 for success).",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastRoutesScrapeTimestampMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_routes_scrape_timestamp",
Help: "Number of seconds since 1970 since last scrape of Routes metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
lastRoutesScrapeDurationSecondsMetric := prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: "",
Name: "last_routes_scrape_duration_seconds",
Help: "Duration of the last scrape of Routes metrics from Cloud Foundry.",
ConstLabels: prometheus.Labels{"environment": environment, "deployment": deployment},
},
)
return &RoutesCollector{
namespace: namespace,
environment: environment,
deployment: deployment,
cfClient: cfClient,
routeInfoMetric: routeInfoMetric,
routesScrapesTotalMetric: routesScrapesTotalMetric,
routesScrapeErrorsTotalMetric: routesScrapeErrorsTotalMetric,
lastRoutesScrapeErrorMetric: lastRoutesScrapeErrorMetric,
lastRoutesScrapeTimestampMetric: lastRoutesScrapeTimestampMetric,
lastRoutesScrapeDurationSecondsMetric: lastRoutesScrapeDurationSecondsMetric,
}
}
func (c RoutesCollector) Collect(ch chan<- prometheus.Metric) {
var begun = time.Now()
errorMetric := float64(0)
if err := c.reportRoutesMetrics(ch); err != nil {
errorMetric = float64(1)
c.routesScrapeErrorsTotalMetric.Inc()
}
c.routesScrapeErrorsTotalMetric.Collect(ch)
c.routesScrapesTotalMetric.Inc()
c.routesScrapesTotalMetric.Collect(ch)
c.lastRoutesScrapeErrorMetric.Set(errorMetric)
c.lastRoutesScrapeErrorMetric.Collect(ch)
c.lastRoutesScrapeTimestampMetric.Set(float64(time.Now().Unix()))
c.lastRoutesScrapeTimestampMetric.Collect(ch)
c.lastRoutesScrapeDurationSecondsMetric.Set(time.Since(begun).Seconds())
c.lastRoutesScrapeDurationSecondsMetric.Collect(ch)
}
func (c RoutesCollector) Describe(ch chan<- *prometheus.Desc) {
c.routeInfoMetric.Describe(ch)
c.routesScrapesTotalMetric.Describe(ch)
c.routesScrapeErrorsTotalMetric.Describe(ch)
c.lastRoutesScrapeErrorMetric.Describe(ch)
c.lastRoutesScrapeTimestampMetric.Describe(ch)
c.lastRoutesScrapeDurationSecondsMetric.Describe(ch)
}
func (c RoutesCollector) reportRoutesMetrics(ch chan<- prometheus.Metric) error {
c.routeInfoMetric.Reset()
routes, err := c.cfClient.ListRoutes()
if err != nil {
log.Errorf("Error while listing routes: %v", err)
return err
}
for _, route := range routes {
c.routeInfoMetric.WithLabelValues(
route.Guid,
route.Host,
route.Path,
route.DomainGuid,
route.SpaceGuid,
route.ServiceInstanceGuid,
).Set(float64(1))
}
c.routeInfoMetric.Collect(ch)
return nil
}
|
package game
import (
"errors"
"awesome-dragon.science/go/goGoGameBot/internal/command"
"awesome-dragon.science/go/goGoGameBot/internal/config/tomlconf"
"awesome-dragon.science/go/goGoGameBot/pkg/format"
)
func (g *Game) createCommandCallback(fmt format.Format) command.Callback {
return func(data *command.Data) {
res, err := fmt.ExecuteBytes(data)
if err != nil {
g.manager.Error(err)
return
}
if _, err := g.Write(res); err != nil {
g.manager.Error(err)
}
}
}
func (g *Game) registerCommand(name string, conf tomlconf.Command) error {
if name == "" {
return errors.New("cannot have a game command with an empty name")
}
if conf.Help == "" {
return errors.New("cannot have a game command with an empty help string")
}
fmt := format.Format{FormatString: conf.Format}
if err := fmt.Compile(name, nil, nil); err != nil {
return err
}
return g.manager.Cmd.AddSubCommand(
g.name,
name,
conf.RequiresAdmin,
g.createCommandCallback(fmt),
conf.Help,
)
}
func (g *Game) clearCommands() error {
return g.manager.Cmd.RemoveCommand(g.name)
}
|
package sorting
// InsertionSort sorting algorithm implementation
// Time complexity: O(n^2)
// Space complexity: O(1)
func InsertionSort(arr []int) {
for j := 1; j < len(arr); j++ {
key := arr[j]
i := j - 1
for i >= 0 && arr[i] > key {
arr[i+1] = arr[i]
i--
}
arr[i+1] = key
}
}
|
package dict
import (
"bytes"
"compress/zlib"
"encoding/binary"
"io/ioutil"
"sync"
)
const (
MAXIMUM_LEADBYTES = 12
SIZE_OF_WCHAR = 2
UnicodeNull = 0
)
const (
MB_TBL_SIZE = 256 /* size of MB tables */
GLYPH_TBL_SIZE = MB_TBL_SIZE /* size of GLYPH tables */
DBCS_TBL_SIZE = 256 /* size of DBCS tables */
GLYPH_HEADER = 1 /* size of GLYPH table header */
DBCS_HEADER = 1 /* size of DBCS table header */
LANG_HEADER = 1 /* size of LANGUAGE file header */
UP_HEADER = 1 /* size of UPPERCASE table header */
LO_HEADER = 1 /* size of LOWERCASE table header */
)
type codePageTableInfo struct {
CodePage uint16 // code page number
MaximumCharacterSize uint16 // max length (bytes) of a char
DefaultChar uint16 // default character (MB)
UniDefaultChar uint16 // default character (Unicode)
TransDefaultChar uint16 // translation of default char (Unicode)
TransUniDefaultChar uint16 // translation of Unic default char (MB)
DBCSCodePage bool // Non 0 for DBCS code pages
// LeadByte [MAXIMUM_LEADBYTES]byte // lead byte ranges
MultiByteTable []uint16 // pointer to MB translation table
WideCharTable []uint16 // pointer to WC translation table
// DBCSRanges uint // pointer to DBCS ranges
TranslateTable []uint16 // pointer to DBCS offsets
data []byte
initialized bool
encode func(table *codePageTableInfo, str String) []byte
decode func(table *codePageTableInfo, bytes []byte) String
}
var lock = &sync.Mutex{}
func bytesToUInt16Array(bytes []byte) []uint16 {
arr := make([]uint16, len(bytes)/2)
for i := range arr {
arr[i] = uint16(bytes[i*2]) | (uint16(bytes[i*2+1]) << 8)
}
return arr
}
func decompressData(compressed []byte) []byte {
reader, err := zlib.NewReader(bytes.NewReader(compressed))
if err != nil {
panic(err)
}
defer reader.Close()
compressed, err = ioutil.ReadAll(reader)
if err != nil {
panic(err)
}
return compressed
}
func extractTable(bytes []byte) ([]uint16, int) {
tableSize, offset := binary.Uvarint(bytes)
table := bytes[offset : offset+int(tableSize)]
return bytesToUInt16Array(table), offset + int(tableSize)
}
func (self *codePageTableInfo) initialize() {
if self.initialized {
return
}
lock.Lock()
defer lock.Unlock()
if self.initialized {
return
}
uncompressed := decompressData(self.data)
self.data = nil
offset := 0
size := 0
self.MultiByteTable, size = extractTable(uncompressed)
offset += size
self.TranslateTable, size = extractTable(uncompressed[offset:])
offset += size
self.WideCharTable, size = extractTable(uncompressed[offset:])
offset += size
self.initialized = true
}
|
package main
import (
"fmt"
"encoding/json"
)
func main() {
type User struct {
FirstName string
LastName string
Books [] string
}
userVar1 := &User{
FirstName: "John",
LastName: "Smith",
Books: []string{ "The Art of Programming", "Golang for Dummies" }}
userVar2, _ := json.Marshal(userVar1)
fmt.Println(string(userVar2))
}
/*{"FirstName":"John","LastName":"Smith","Books":["The Art of Programming",
"Golang for Dummies"]}*/
|
package utils
import (
"errors"
"strconv"
"github.com/gin-gonic/gin"
)
func GetInt64InQuery(c *gin.Context, name string) (int64, error) {
str, ok := c.GetQuery(name)
if !ok {
return 0, errors.New("miss parameter " + name + " InQuery")
}
return strconv.ParseInt(str, 10, 64)
}
|
package encryptor
import (
"time"
)
type fakeEncryptor struct {
singleFrameEncryptionTimeInMs time.Duration
}
func NewEncryptor(singleFrameEncryptionTimeInMs time.Duration) *fakeEncryptor {
return &fakeEncryptor{
singleFrameEncryptionTimeInMs: singleFrameEncryptionTimeInMs,
}
}
func (f *fakeEncryptor) Encrypt(frameNo int) (int, error) {
time.Sleep(f.singleFrameEncryptionTimeInMs)
return frameNo, nil
}
|
package cotacao
import (
"fmt"
"github.com/fabioxgn/go-bot"
"github.com/fabioxgn/go-bot/web"
)
var (
url = "http://developers.agenciaideias.com.br/cotacoes/json"
)
type retorno struct {
Bovespa struct {
Cotacao string `json:"cotacao"`
Variacao string `json:"variacao"`
} `json:"bovespa"`
Dolar struct {
Cotacao string `json:"cotacao"`
Variacao string `json:"variacao"`
} `json:"dolar"`
Euro struct {
Cotacao string `json:"cotacao"`
Variacao string `json:"variacao"`
} `json:"euro"`
Atualizacao string `json:"atualizacao"`
}
func cotacao(command *bot.Cmd) (msg string, err error) {
data := &retorno{}
err = web.GetJSON(url, data)
if err != nil {
return "", err
}
return fmt.Sprintf("Dólar: %s (%s), Euro: %s (%s)",
data.Dolar.Cotacao, data.Dolar.Variacao,
data.Euro.Cotacao, data.Euro.Variacao), nil
}
func init() {
bot.RegisterCommand(
"cotacao",
"Informa a cotação do Dólar e Euro.",
"",
cotacao)
}
|
// Package deferred has machinery for providing JavaScript with
// deferred values. These arise because we want to have library
// functions for JavaScript to request things that will only later be
// supplied -- either because it will take some time to get, and it's
// better if JavaScript doesn't have to block (e.g., an HTTP request),
// or because we're waiting for something elsewhere to happen (e.g.,
// watching a resource for changes).
//
// In JavaScript, deferred values will often end up being represented
// by promises; but the protocol allows other representations, and in
// particular, some requests may result in an sequence of values
// rather than just a single value. The protocol also allows for
// cancelling a deferred value.
package deferred
import (
"sync"
)
var (
globalDeferreds = &deferreds{}
)
// Register schedules an action to be performed later, with the result
// sent to `resolver`, using the global deferred scheduler.
func Register(p performFunc, r resolver) Serial {
return globalDeferreds.Register(p, r)
}
// Wait blocks until all outstanding deferred values in the global
// scheduler are fulfilled.
func Wait() {
globalDeferreds.Wait()
}
// Serial is a serial number used to identify deferreds between Go and
// JavaScript.
type Serial uint64
type deferreds struct {
serialMu sync.Mutex
serial Serial
outstanding sync.WaitGroup
}
// responder is the interface for a deferred request to use to send
// its response.
type resolver interface {
Error(Serial, error)
Data(Serial, []byte)
End(Serial)
}
type performFunc func() ([]byte, error)
// Register adds a request to those being tracked, and returns the
// serial number to give back to the runtime.
func (d *deferreds) Register(perform performFunc, r resolver) Serial {
d.serialMu.Lock()
s := d.serial
d.serial++
d.serialMu.Unlock()
d.outstanding.Add(1)
go func(s Serial) {
defer d.outstanding.Done()
b, err := perform()
if err != nil {
r.Error(s, err)
return
}
r.Data(s, b)
}(s)
return s
}
// Wait blocks until all outstanding deferred requests are fulfilled.
func (d *deferreds) Wait() {
d.outstanding.Wait()
}
|
package backup
import (
"context"
"go.mongodb.org/mongo-driver/bson/primitive"
"golang.org/x/oauth2"
)
func tokenSource(ctx context.Context, id primitive.ObjectID, s *Service,
ts oauth2.TokenSource) *dbTokenSource {
return &dbTokenSource{
ctx: ctx,
id: id,
src: ts,
service: s,
}
}
type dbTokenSource struct {
ctx context.Context
id primitive.ObjectID
src oauth2.TokenSource
service *Service
}
func (ts *dbTokenSource) Token() (*oauth2.Token, error) {
tok, err := ts.src.Token()
if err != nil {
return nil, err
}
user, err := ts.service.db.Users().FindByID(ts.ctx, ts.id)
if err != nil {
return nil, err
}
if tokenNeedsUpdate(user.Token, tok) {
if err := ts.service.db.Users().SaveToken(ts.ctx, user.ID, tok); err != nil {
return nil, err
}
}
return tok, nil
}
func tokenNeedsUpdate(old *oauth2.Token, new *oauth2.Token) bool {
return old == nil || new == nil ||
old.AccessToken != new.AccessToken ||
old.RefreshToken != new.RefreshToken
}
|
package domain
import "time"
type SessionStorageService interface {
// Close closes the storage connection
Close() error
// CreateSession creates a new session. It errors if a valid session already exists.
CreateSession(accountID string, sessionKey string, expirationDuration time.Duration) error
// FetchPossiblyExpiredSession returns a session row by account ID regardless of wether it is expired
// This is potentially dangerous, it is only intended to be used during the new login flow, never to check
// on a valid session for authentication purposes.
FetchPossiblyExpiredSession(accountID string) (Session, error)
// DeleteSession removes a session record from the db
DeleteSession(sessionKey string) error
// ExtendAndFetchSession fetches session data from the db
// On success it returns the session
// On failure, it can return ErrValidSessionNotFound, ErrSessionExpired, or an unexpected error
ExtendAndFetchSession(sessionKey string, expirationDuration time.Duration) (Session, error)
}
|
//---devendo exercicios 3, 4 e 5
|
package im
import (
"github.com/astaxie/beego"
)
func init() {
// Mapping user routing
// deprecated
// beego.Router("/api/v1/im/user/:uid/register", &Controller{}, "post:RegisterUsers")
// beego.Router("/api/v1/im/user/:uid", &Controller{}, "get:GetSessionByUID")
beego.Router("/api/v1/im/user/:uid/sendmessage", &Controller{}, "post:PostMessageToUserByID")
beego.Router("/api/v1/im/register", &Controller{}, "post:RegisterUsers")
// Mapping session routing
beego.Router("/api/v1/im/session", &Controller{}, "delete:DeleteSession")
beego.Router("/api/v1/im/session/:sid", &Controller{}, "put:CreateSessionById;delete:DeleteSessionByID")
// Mapping message routing
beego.Router("/api/v1/im/message", &Controller{}, "put:ReqMsgCtl")
// query
// deprecated
// beego.Router("/api/v1/im/session", &Controller{}, "get:GetAllSession")
// beego.Router("/api/v1/im/session/:sid", &Controller{}, "get:GetSessionByID")
// beego.Router("/api/v1/im/session/:sid/user", &Controller{}, "get:GetUsersBySessionID")
// beego.Router("/api/v1/im/session/:sid/user/:uid", &Controller{}, "put:PutSessionByUID")
// beego.Router("/api/v1/im/session/:sid/user/:uid", &Controller{}, "delete:DeleteSessionByUID")
// beego.Router("/api/v1/im/session/:sid/sendmessage", &Controller{}, "post:PostMessageToSessionByID")
}
|
package dcmdata
import (
"log"
"github.com/grayzone/godcm/ofstd"
)
type DcmItem struct {
DcmObject
elementList *DcmList
lastElementComplete bool
fStartPosition int
}
func NewDcmItem(tag DcmTag, len uint32) *DcmItem {
return &DcmItem{*NewDcmObject(tag, len), nil, true, 0}
}
/** Virtual object copying. This method can be used for DcmObject
* and derived classes to get a deep copy of an object. Internally
* the assignment operator is called if the given DcmObject parameter
* is of the same type as "this" object instance. If not, an error
* is returned. This function permits copying an object by value
* in a virtual way which therefore is different to just calling the
* assignment operator of DcmElement which could result in slicing
* the object.
* @param rhs - [in] The instance to copy from. Has to be of the same
* class type as "this" object
* @return EC_Normal if copying was successful, error otherwise
*/
func (item *DcmItem) CopyFrom(rhs DcmObject) ofstd.OFCondition {
if &rhs != &item.DcmObject {
if rhs.Ident() != item.Ident() {
return EC_IllegalCall
}
item.DcmObject = rhs
}
return ofstd.EC_Normal
}
/** calculate the value length (without attribute tag, VR and length field)
* of this DICOM element when encoded with the given transfer syntax and
* the given encoding type for sequences.
* If length encodig is set to be explicit and the item content is larger
* than the available 32-bit length field, then undefined length is
* returned. If "dcmWriteOversizedSeqsAndItemsUndefined" is disabled,
* also the internal DcmObject errorFlag is set to
* EC_SeqOrItemContentOverflow.
* @param xfer transfer syntax for length calculation
* @param enctype sequence encoding type for length calculation
* @return value length of DICOM element
*/
func (item *DcmItem) GetLength(xfer E_TransferSyntax, enctype E_EncodingType) uint32 {
var itemlen uint32
if item.elementList.Empty() != true {
item.elementList.Seek(ELP_first)
for item.elementList.Seek(ELP_next) != nil {
do := item.elementList.Get(ELP_atpos)
sublen := do.CalcElementLength(xfer, enctype)
/* explicit length: be sure that total size of contained elements fits into item's
32 Bit length field. If not, switch encoding automatically to undefined
length for this item. Nevertheless, any contained elements will be
written with explicit length if possible.
*/
if (enctype == EET_ExplicitLength) && (ofstd.Check32BitAddOverflow(sublen, itemlen)) {
if DcmWriteOversizedSeqsAndItemsUndefined {
log.Println("DcmItem: Explicit length of item exceeds 32-Bit length field, trying to encode with undefined length")
} else {
log.Println("DcmItem: Explicit length of item exceeds 32-Bit length field, aborting write")
item.errorFlag = EC_SeqOrItemContentOverflow
}
return DCM_UndefinedLength
} else {
itemlen = itemlen + sublen
}
}
}
return itemlen
}
/** calculate the length of this DICOM element when encoded with the
* given transfer syntax and the given encoding type for sequences.
* For elements, the length includes the length of the tag, length field,
* VR field and the value itself, for items and sequences it returns
* the length of the complete item or sequence including delimitation tags
* if applicable.
* If length encodig is set to be explicit and the total item size is
* larger than the available 32-bit length field, then undefined length
* is returned. If "dcmWriteOversizedSeqsAndItemsImplicit" is disabled,
* also the internal DcmObject errorFlag is set to EC_SeqOrItemContentOverflow
* in case the item content (excluding tag header etc.) is already too
* large.
* @param xfer transfer syntax for length calculation
* @param enctype sequence encoding type for length calculation
* @return length of DICOM element
*/
func (item *DcmItem) CalcElementLength(xfer E_TransferSyntax, enctype E_EncodingType) uint32 {
xferSyn := NewDcmXfer(xfer)
/* Length of item's start header */
headersize := xferSyn.SizeofTagHeader(item.GetVR())
/* Length of item's content, i.e. contained elements */
itemlen := item.GetLength(xfer, enctype)
/* Since the item's total length can exceed the maximum length of 32 bit, it is
* always necessary to check for overflows. The approach taken is not elegant
* but should work...
*/
if (itemlen == DCM_UndefinedLength) || ofstd.Check32BitAddOverflow(itemlen, headersize) {
return DCM_UndefinedLength
}
itemlen = itemlen + xferSyn.SizeofTagHeader(item.GetVR())
if enctype == EET_UndefinedLength { // add bytes for closing item tag marker if necessary
if ofstd.Check32BitAddOverflow(itemlen, 8) {
return DCM_UndefinedLength
} else {
itemlen = itemlen + 8
}
}
return itemlen
}
func (item *DcmItem) Clear() ofstd.OFCondition {
item.errorFlag = ofstd.EC_Normal
if item.elementList != nil {
item.elementList.DeleteAllElements()
}
item.setLengthField(0)
return item.errorFlag
}
func (item *DcmItem) ComputeGroupLengthAndPadding(glenc E_GrpLenEncoding, padenc E_PaddingEncoding, xfer E_TransferSyntax, enctype E_EncodingType, padlen uint32, subPadlen uint32, instanceLength uint32) ofstd.OFCondition {
if (padenc == EPD_withPadding && ((padlen%2) != 0 || (subPadlen%2) != 0)) || ((glenc == EGL_recalcGL || glenc == EGL_withGL || padenc == EPD_withPadding) && xfer == EXS_Unknown) {
return EC_IllegalCall
}
if glenc == EGL_noChange && padenc == EPD_noChange {
return ofstd.EC_Normal
}
err := ofstd.EC_Normal
if item.elementList.Empty() {
return err
}
xferSyn := NewDcmXfer(xfer)
seekmode := ELP_next
item.elementList.Seek(ELP_first)
lastGrp := uint16(0x0000)
beginning := true
for err.Good() && (item.elementList.Seek(seekmode) != nil) {
seekmode = ELP_next
d := item.elementList.Get(ELP_atpos)
if d.GetVR() == EVR_SQ {
templen := instanceLength + xferSyn.SizeofTagHeader(EVR_SQ)
t := NewDcmItem(d.tag, 0)
t.ComputeGroupLengthAndPadding(glenc, padenc, xfer, enctype, subPadlen, subPadlen, templen)
}
if !err.Good() {
continue
}
if ((glenc == EGL_withGL || glenc == EGL_withoutGL) && d.GetETag() == 0x0000) || (padenc != EPD_noChange && d.GetTag().DcmTagKey == DCM_DataSetTrailingPadding) {
item.elementList.Remove()
seekmode = ELP_atpos
} else if glenc == EGL_withGL || glenc == EGL_recalcGL {
actGrp := d.GetGTag()
if actGrp != lastGrp || beginning {
beginning = false
if d.GetETag() == 0x0000 && d.Ident() != EVR_UL {
item.elementList.Remove()
tagUL := NewDcmTagWithGEV(actGrp, 0x0000, DcmVR{EVR_UL})
obj := NewDcmObject(*tagUL, 0)
item.elementList.Insert(obj, ELP_prev)
// obj.SetParent(*obj)
} else if glenc == EGL_withGL {
tagUL := NewDcmTagWithGEV(actGrp, 0x0000, DcmVR{EVR_UL})
obj := NewDcmObject(*tagUL, 0)
item.elementList.Insert(obj, ELP_prev)
}
}
}
}
return err
}
|
package actions
import (
"os"
)
type ActionFunc func()
func Quit(){
os.Exit(0)
}
func Ignore(){
}
|
package dummy
import (
"io/ioutil"
"math/rand"
"time"
"github.com/sherifabdlnaby/prism/pkg/component"
"github.com/sherifabdlnaby/prism/pkg/config"
"github.com/sherifabdlnaby/prism/pkg/payload"
"github.com/sherifabdlnaby/prism/pkg/response"
"go.uber.org/zap"
)
//Dummy Dummy ProcessReadWrite that does absolutely nothing to the image
type Dummy struct {
logger zap.SugaredLogger
}
type internalImage struct {
internal []byte
}
// NewComponent Return a new Base
func NewComponent() component.Base {
return &Dummy{}
}
//Decode Simulate Decoding the Image
func (d *Dummy) Decode(in payload.Bytes, data payload.Data) (payload.DecodedImage, response.Response) {
// create internal decoded object (varies with each plugin)`
out := internalImage{
internal: in,
}
// Return it as it is (dummy).
return out, response.ACK
}
//DecodeStream Simulate Decoding the Image
func (d *Dummy) DecodeStream(in payload.Stream, data payload.Data) (payload.DecodedImage, response.Response) {
var imgBytes []byte
var err error
imgBytes, err = ioutil.ReadAll(in)
if err != nil {
return nil, response.Error(err)
}
// create internal decoded object (varies with each plugin)`
out := internalImage{
internal: imgBytes,
}
// Return it as it is (dummy).
return out, response.ACK
}
//process Simulate Processing the Image
func (d *Dummy) Process(in payload.DecodedImage, data payload.Data) (payload.DecodedImage, response.Response) {
//literally do nothing lol
time.Sleep(1000 + time.Duration(rand.Intn(1500))*time.Millisecond)
return in, response.ACK
}
//Encode Simulate Encoding the Image
func (d *Dummy) Encode(in payload.DecodedImage, data payload.Data) (payload.Bytes, response.Response) {
// Since in this dummy case we have processed output as a whole, we can just pass it to next node.
Payload := in.(internalImage).internal
return Payload, response.ACK
}
//Init Initialize Plugin based on parsed config
func (d *Dummy) Init(config config.Config, logger zap.SugaredLogger) error {
d.logger = logger
return nil
}
//Start start the plugin to begin receiving input
func (d *Dummy) Start() error {
d.logger.Info("Started Dummy processor.")
return nil
}
//Stop Stop plugin gracefully
func (d *Dummy) Stop() error {
return nil
}
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// PodRebalancerSpec defines the desired state of PodRebalancer
type PodRebalancerSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// TODO
BalanceTargetRef BalanceTargetRefSpec `json:"balanceTargetRef,omitempty"`
// TODO
ReplicaThreshold *int32 `json:"replicaThreshold,omitempty"`
// TODO
CloudProvider string `json:"cloudProvider,omitempty"`
// TODO
NodeGroupAutoDiscovery []string `json:"nodeGroupAutoDiscovery,omitempty"`
}
// TODO
type BalanceTargetRefSpec struct {
metav1.TypeMeta `json:",inline"`
Name string `json:"name,omitempty"`
}
// PodRebalancerStatus defines the observed state of PodRebalancer
type PodRebalancerStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "make" to regenerate code after modifying this file
// TODO
// +optional
Target []corev1.ObjectReference `json:"target,omitempty"`
}
// +kubebuilder:object:root=true
// PodRebalancer is the Schema for the podrebalancers API
type PodRebalancer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PodRebalancerSpec `json:"spec,omitempty"`
Status PodRebalancerStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// PodRebalancerList contains a list of PodRebalancer
type PodRebalancerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []PodRebalancer `json:"items"`
}
func init() {
SchemeBuilder.Register(&PodRebalancer{}, &PodRebalancerList{})
}
|
package goauth
import (
"io"
"net/http"
"net/http/httptest"
)
type testCase struct {
method string
url string
body io.Reader
handler http.HandlerFunc
request func(r *http.Request)
expect func(r *httptest.ResponseRecorder)
}
func testCases(tcs []testCase) {
for _, tc := range tcs {
w := httptest.NewRecorder()
r, err := http.NewRequest(tc.method, tc.url, tc.body)
if err != nil {
panic(err)
}
tc.request(r)
tc.handler(w, r)
tc.expect(w)
}
}
|
/*
In The Netherlands we have PostNL, the postal company. They use KixCodes, it's a fast way to deliver letters and packages that can be scanned during the process.
Kix Code
https://www.postnl.nl/Images/KIX-code-van-PostNL_tcm10-8633.gif
The code is a combination of: Postal code, House/box/call number and House appendage / suffix
If there is a character between the house number and the suffix, we need to replace that with an X. Eventually, the code will be printed in the KixCode font.
Examples
kixCode(`PostNL, Postbus 30250, 2500 GG ’s Gravenhage`) ➞ `2500GG30250`
kixCode(`Liesanne B Wilkens, Kogge 11-1, 1657 KA Abbekerk`) ➞ `1657KA11X1`
kixCode(`Dijk, Antwoordnummer 80430, 2130 VA Hoofddorp`) ➞ `2130VA80430`
Notes
Your function will get an address line (string) separated by comma's.
The input format will always be the same.
Watch out for the different suffixes!
*/
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
assert(kixcode("PostNL, Postbus 30250, 2500 GG 's Gravenhage") == "2500GG30250")
assert(kixcode("De Jong, Havendijk 13 hs, 1231 FZ POSTDAM") == "1231FZ13XHS")
assert(kixcode("B. Bartelds, Boerheem 46, 9421 MC Bovensmilde") == "9421MC46")
assert(kixcode("Huisman, Koninginneweg 182 B, 3331 CH Zwijndrecht") == "3331CH182XB")
assert(kixcode("Liesanne B Wilkens, Kogge 11-1, 1657 KA Abbekerk") == "1657KA11X1")
assert(kixcode("Dijk, Antwoordnummer 80430, 2130 VA Hoofddorp") == "2130VA80430")
assert(kixcode("Van Eert, Dirk van Heinsbergstraat 200-A, 5575 BM Luyksgestel") == "5575BM200XA")
assert(kixcode("B.C. Dieker, Korhoenlaan 130b, 3847 LN Harderwijk") == "3847LN130B")
assert(kixcode("Mahir F Schipperen, IJsselmeerlaan 31, 8304 DE Emmeloord") == "8304DE31")
assert(kixcode("Jet de Wit, Wielingenstraat 129/7, 3522 PG Utrecht") == "3522PG129X7")
}
func kixcode(s string) string {
t := strings.Split(s, ",")
if len(t) != 3 {
return ""
}
var (
i int
p string
)
n, err := fmt.Sscanf(t[2], "%v %v", &i, &p)
if n != 2 || err != nil {
return ""
}
for j, r := range t[1] {
if unicode.IsDigit(r) {
t[1] = t[1][j:]
break
}
}
t[1] = strings.Replace(t[1], "-", "X", -1)
t[1] = strings.Replace(t[1], "/", "X", -1)
t[1] = strings.Replace(t[1], " ", "X", -1)
t[1] = strings.ToUpper(t[1])
return fmt.Sprintf("%v%v%v", i, p, t[1])
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
|
package memorydll
//#include<stdlib.h>
// extern void * MemoryLoadLibrary(const void *);
// extern void * MemoryGetProcAddress(void *, char *);
// extern void MemoryFreeLibrary(void *);
import "C"
import (
"unsafe"
"errors"
"syscall"
"fmt"
)
type Handle uintptr
// A DLL implements access to a single DLL.
type DLL struct {
Name string
Handle Handle
}
// DLLError describes reasons for DLL load failures.
type DLLError struct {
Err error
ObjName string
Msg string
}
func (e *DLLError) Error() string { return e.Msg }
// FindProc searches DLL d for procedure named name and returns *Proc
// if found. It returns an error if search fails.
func (d *DLL) FindProc(name string) (proc *Proc, err error) {
return memoryGetProcAddress(d, name)
}
// MustFindProc is like FindProc but panics if search fails.
func (d *DLL) MustFindProc(name string) *Proc {
p, e := d.FindProc(name)
if e != nil {
panic(e)
}
return p
}
// Release unloads DLL d from memory.
func (d *DLL) Release() {
memoryFreeLibrary(d)
}
// A Proc implements access to a procedure inside a DLL.
type Proc struct {
Dll *DLL
Name string
addr uintptr
}
// Addr returns the address of the procedure represented by p.
// The return value can be passed to Syscall to run the procedure.
func (p *Proc) Addr() uintptr {
return p.addr
}
// Call executes procedure p with arguments a. It will panic, if more then 15 arguments
// are supplied.
//
// The returned error is always non-nil, constructed from the result of GetLastError.
// Callers must inspect the primary return value to decide whether an error occurred
// (according to the semantics of the specific function being called) before consulting
// the error. The error will be guaranteed to contain syscall.Errno.
func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) {
switch len(a) {
case 0:
return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0)
case 1:
return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0)
case 2:
return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0)
case 3:
return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2])
case 4:
return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0)
case 5:
return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0)
case 6:
return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5])
case 7:
return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0)
case 8:
return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0)
case 9:
return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8])
case 10:
return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0)
case 11:
return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0)
case 12:
return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11])
case 13:
return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0)
case 14:
return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0)
case 15:
return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14])
default:
panic("Call " + p.Name + " with too many arguments " + fmt.Sprintf("%d",len(a)) + ".")
}
return
}
//remembe to release when this dll is useless
func NewDLL(dlldata []byte,dllname string) (*DLL,error){
dlldataPointer:=unsafe.Pointer(&dlldata[0])
handle:=C.MemoryLoadLibrary(dlldataPointer);
if handle!=nil{
return &DLL{
Name:dllname,
Handle:Handle(handle),
},nil
}else{
e:=errors.New("dll data error");
return nil, &DLLError{
Err: e,
ObjName: dllname,
Msg: "Failed to load " + dllname + ": " + e.Error(),
}
}
}
func memoryGetProcAddress(dll *DLL, procname string ) (proc *Proc, err error) {
cname:=C.CString(procname)
defer C.free(unsafe.Pointer(cname))
addr:=C.MemoryGetProcAddress(unsafe.Pointer(dll.Handle),cname);
if addr!=nil{
return &Proc{
Dll:dll,
Name:procname,
addr:uintptr(addr),
},nil
}
e:=errors.New("no such function");
return nil, &DLLError{
Err: e,
ObjName: procname,
Msg: "Failed to find " + procname + " procedure in " + dll.Name + ": " + e.Error(),
}
}
//remember free!
func memoryFreeLibrary(dll * DLL){
C.MemoryFreeLibrary(unsafe.Pointer(dll.Handle));
}
|
package key
import (
"github.com/giantswarm/apiextensions/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/aws-operator/service/controller/clusterapi/v29/templates/cloudconfig"
)
// NOTE that code below is deprecated and needs refactoring.
func CloudConfigSmallTemplates() []string {
return []string{
cloudconfig.Small,
}
}
func StatusAWSConfigNetworkCIDR(customObject v1alpha1.AWSConfig) string {
return customObject.Status.Cluster.Network.CIDR
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.