text
stringlengths
11
4.05M
package main import ( "flag" "fmt" "os" "path/filepath" "github.com/takatoh/mkdocindex/indexmaker" ) const ( progVersion = "v0.6.0" ) func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, `Usage: %s [options] [dir] Options: `, os.Args[0]) flag.PrintDefaults() } opt_distributed := flag.Bool("d", false, "Generate distributed HTML.") opt_version := flag.Bool("v", false, "Show version.") flag.Parse() if *opt_version { fmt.Println(progVersion) os.Exit(0) } var dir string if len(flag.Args()) > 0 { dir = flag.Args()[0] } else { dir = "." } root, _ := filepath.Abs(dir) maker := indexmaker.New(root) if *opt_distributed { maker.Make() } else { maker.MakeMonolithic() } }
package session import ( "os" "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/btnguyen2k/prom" ) func _createAwsDynamodbConnect(t *testing.T, testName string) *prom.AwsDynamodbConnect { awsRegion := strings.ReplaceAll(os.Getenv("AWS_REGION"), `"`, "") awsAccessKeyId := strings.ReplaceAll(os.Getenv("AWS_ACCESS_KEY_ID"), `"`, "") awsSecretAccessKey := strings.ReplaceAll(os.Getenv("AWS_SECRET_ACCESS_KEY"), `"`, "") if awsRegion == "" || awsAccessKeyId == "" || awsSecretAccessKey == "" { t.Skipf("%s skipped", testName) return nil } cfg := &aws.Config{ Region: aws.String(awsRegion), Credentials: credentials.NewEnvCredentials(), } if awsDynamodbEndpoint := strings.ReplaceAll(os.Getenv("AWS_DYNAMODB_ENDPOINT"), `"`, ""); awsDynamodbEndpoint != "" { cfg.Endpoint = aws.String(awsDynamodbEndpoint) if strings.HasPrefix(awsDynamodbEndpoint, "http://") { cfg.DisableSSL = aws.Bool(true) } } adc, err := prom.NewAwsDynamodbConnect(cfg, nil, nil, 10000) if err != nil { t.Fatalf("%s/%s failed: %s", testName, "NewAwsDynamodbConnect", err) } return adc } const tableNameDynamodb = "exter_test_session" var setupTestDynamodb = func(t *testing.T, testName string) { testAdc = _createAwsDynamodbConnect(t, testName) testAdc.DeleteTable(nil, tableNameDynamodb) err := prom.AwsDynamodbWaitForTableStatus(testAdc, tableNameDynamodb, []string{""}, 1*time.Second, 10*time.Second) if err != nil { t.Fatalf("%s failed: %s", testName, err) } err = InitSessionTableAwsDynamodb(testAdc, tableNameDynamodb) if err != nil { t.Fatalf("%s failed: %s", testName, err) } } var teardownTestDynamodb = func(t *testing.T, testName string) { if testAdc != nil { defer func() { defer func() { testAdc = nil }() testAdc.Close() }() } } /*----------------------------------------------------------------------*/ func TestNewSessionDaoAwsDynamodb(t *testing.T) { testName := "TestNewSessionDaoAwsDynamodb" teardownTest := setupTest(t, testName, setupTestDynamodb, teardownTestDynamodb) defer teardownTest(t) sessDao := NewSessionDaoAwsDynamodb(testAdc, tableNameDynamodb) if sessDao == nil { t.Fatalf("%s failed: nil", testName) } } func TestSessionDaoAwsDynamodb_Save(t *testing.T) { testName := "TestSessionDaoAwsDynamodb_Save" teardownTest := setupTest(t, testName, setupTestDynamodb, teardownTestDynamodb) defer teardownTest(t) sessDao := NewSessionDaoAwsDynamodb(testAdc, tableNameDynamodb) doTestSessionDao_Save(t, testName, sessDao) items, err := testAdc.ScanItems(nil, tableNameDynamodb, nil, "") if err != nil { t.Fatalf("%s failed: %s", testName, err) } if len(items) != 1 { t.Fatalf("%s failed: expected 1 item inserted but received %#v", testName, len(items)) } } func TestSessionDaoAwsDynamodb_Get(t *testing.T) { testName := "TestSessionDaoAwsDynamodb_Get" teardownTest := setupTest(t, testName, setupTestDynamodb, teardownTestDynamodb) defer teardownTest(t) sessDao := NewSessionDaoAwsDynamodb(testAdc, tableNameDynamodb) doTestSessionDao_Get(t, testName, sessDao) } func TestSessionDaoAwsDynamodb_Delete(t *testing.T) { testName := "TestSessionDaoAwsDynamodb_Delete" teardownTest := setupTest(t, testName, setupTestDynamodb, teardownTestDynamodb) defer teardownTest(t) sessDao := NewSessionDaoAwsDynamodb(testAdc, tableNameDynamodb) doTestSessionDao_Delete(t, testName, sessDao) items, err := testAdc.ScanItems(nil, tableNameDynamodb, nil, "") if err != nil { t.Fatalf("%s failed: %s", testName, err) } if len(items) != 0 { t.Fatalf("%s failed: expected 0 item inserted but received %#v", testName, len(items)) } } func TestSessionDaoAwsDynamodb_Update(t *testing.T) { testName := "TestSessionDaoAwsDynamodb_Update" teardownTest := setupTest(t, testName, setupTestDynamodb, teardownTestDynamodb) defer teardownTest(t) sessDao := NewSessionDaoAwsDynamodb(testAdc, tableNameDynamodb) doTestSessionDao_Update(t, testName, sessDao) items, err := testAdc.ScanItems(nil, tableNameDynamodb, nil, "") if err != nil { t.Fatalf("%s failed: %s", testName, err) } if len(items) != 1 { t.Fatalf("%s failed: expected 1 item inserted but received %#v", testName, len(items)) } }
package mysql type Mysql struct { } func (this *Mysql) Connect(host string, port int) { }
/* Package edf aims to implement an interface to the EDF+ standard as described on http://www.edfplus.info/ for the Go language. The objective is to read a file and provide some kind of access to the stored data stored. It was written by Cristiano Silva Jr. while working at the Laboratory of Neuroscience and Behavior from the University of Brasilia and was released under the GPL license. */ package edf import "math" func elvis(pred bool, a int16, b int16) int16 { if pred { return a } return b } func identifyOverflow(inlet []int16) []int16 { var last int16 limit := len(inlet) outlet := make([]int16, limit) for i := 0; i < limit; i++ { it := inlet[i] diff := float64(it) - float64(last) if math.Abs(diff) > 16000 { // half int16 is a hell of a diff outlet[i] = elvis(diff > 0, 500, -500) } else { outlet[i] = 0 } last = it } return outlet } func rearrange(inlet []int16) []int16 { limit := len(inlet) overflow := identifyOverflow(inlet) midlet := make([]int, limit) step := int(math.Pow(2, 15)) - 1 factor := 0 for i := 0; i < limit; i++ { if overflow[i] < 0 { factor += step } else if overflow[i] > 0 { factor -= step } midlet[i] = int(inlet[i]) + factor } return convert(midlet) } func convert(midlet []int) []int16 { limit := len(midlet) outlet := make([]int16, limit) for i := 0; i < limit; i++ { outlet[i] = int16(midlet[i] / 2.0) } return outlet }
package mill import ( "bytes" "encoding/json" "image" "path/filepath" "strings" "time" "github.com/rwcarlsen/goexif/exif" ) type ImageExifSchema struct { Created time.Time `json:"created,omitempty"` Name string `json:"name"` Ext string `json:"extension"` Width int `json:"width"` Height int `json:"height"` Format string `json:"format"` Latitude float64 `json:"latitude,omitempty"` Longitude float64 `json:"longitude,omitempty"` } type ImageExif struct{} func (m *ImageExif) ID() string { return "/image/exif" } func (m *ImageExif) Encrypt() bool { return true } func (m *ImageExif) Pin() bool { return false } func (m *ImageExif) AcceptMedia(media string) error { return accepts([]string{ "image/jpeg", "image/png", "image/gif", }, media) } func (m *ImageExif) Options(add map[string]interface{}) (string, error) { return hashOpts(make(map[string]string), add) } func (m *ImageExif) Mill(input []byte, name string) (*Result, error) { conf, formatStr, err := image.DecodeConfig(bytes.NewReader(input)) if err != nil { return nil, err } format := Format(formatStr) var created time.Time var lat, lon float64 exf, err := exif.Decode(bytes.NewReader(input)) if err == nil { createdTmp, err := exf.DateTime() if err == nil { created = createdTmp } latTmp, lonTmp, err := exf.LatLong() if err == nil { lat, lon = latTmp, lonTmp } } res := &ImageExifSchema{ Created: created, Name: name, Ext: strings.ToLower(filepath.Ext(name)), Format: string(format), Width: conf.Width, Height: conf.Height, Latitude: lat, Longitude: lon, } data, err := json.Marshal(res) if err != nil { return nil, err } return &Result{File: data}, nil }
package main import "fmt" var soma = func(x, y int) int { return x + y } var subtracao = func(x, y int) int { return x - y } func main() { fmt.Println(soma(5, 9)) fmt.Println(subtracao(5, 9)) }
package main import ( "crypto/tls" "mumbletest/mumblebot" "os" ) func main() { mumbleServerAddress := os.Getenv("MUMBLETEST_SERVER_ADDR") mumbleUsername := os.Getenv("MUMBLETEST_USERNAME") mumblePassword := os.Getenv("MUMBLETEST_PASSWORD") tlsConfig := &tls.Config{ InsecureSkipVerify: true, } mumbleBot := mumblebot.MumbleBot{ ServerIP: mumbleServerAddress, Username: mumbleUsername, Password: mumblePassword, TLSConfig: tlsConfig, } if err := mumbleBot.Start(); err != nil { panic(err) } }
// Copyright 2018 Lars Hoogestraat // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package handler_test import ( "bytes" "context" "database/sql" "fmt" "io" "io/ioutil" "log" "mime/multipart" "net/http" "net/http/httptest" "net/url" "os" "path/filepath" "testing" "git.hoogi.eu/snafu/go-blog/crypt" "git.hoogi.eu/snafu/go-blog/database" "git.hoogi.eu/snafu/go-blog/logger" "git.hoogi.eu/snafu/go-blog/mail" "git.hoogi.eu/snafu/go-blog/middleware" "git.hoogi.eu/snafu/go-blog/models" "git.hoogi.eu/snafu/go-blog/settings" "git.hoogi.eu/snafu/session" _ "github.com/mattn/go-sqlite3" ) var ctx *middleware.AppContext var db *sql.DB func setup(t *testing.T) { logger.InitLogger(ioutil.Discard, "Debug") db, err := sql.Open("sqlite3", ":memory:") if err != nil { t.Fatal(err) } err = database.InitTables(db) if err != nil { t.Fatal(err) } err = fillSeeds(db) if err != nil { t.Fatal(err) } cfg, err := settings.LoadConfig("../go-blog.conf") if err != nil { t.Fatal(err) } cfg.File.Location = os.TempDir() userService := &models.UserService{ Datasource: &models.SQLiteUserDatasource{ SQLConn: db, }, Config: cfg.User, } userInviteService := &models.UserInviteService{ Datasource: &models.SQLiteUserInviteDatasource{ SQLConn: db, }, UserService: userService, } articleService := &models.ArticleService{ AppConfig: cfg.Application, Datasource: &models.SQLiteArticleDatasource{ SQLConn: db, }, } siteService := &models.SiteService{ Datasource: &models.SQLiteSiteDatasource{ SQLConn: db, }, } fileService := &models.FileService{ Config: cfg.File, Datasource: &models.SQLiteFileDatasource{ SQLConn: db, }, } categoryService := &models.CategoryService{ Datasource: &models.SQLiteCategoryDatasource{ SQLConn: db, }, } tokenService := &models.TokenService{ Datasource: &models.SQLiteTokenDatasource{ SQLConn: db, }, } mailer := &models.Mailer{ Sender: MockSMTP{}, AppConfig: &cfg.Application, } sessionService := session.Service{ Path: "/admin", Name: "test-session", HTTPOnly: true, Secure: true, SessionProvider: session.NewInMemoryProvider(), IdleSessionTTL: 10, } ctx = &middleware.AppContext{ UserService: userService, UserInviteService: userInviteService, ArticleService: articleService, CategoryService: categoryService, SiteService: siteService, FileService: fileService, TokenService: tokenService, SessionService: &sessionService, Mailer: mailer, ConfigService: cfg, } } func teardown() { if db != nil { db.Close() } } func fillSeeds(db *sql.DB) error { salt := crypt.GenerateSalt() saltedPassword := append([]byte("123456789012"), salt[:]...) password, err := crypt.CryptPassword([]byte(saltedPassword)) if err != nil { return err } _, err = db.Exec("INSERT INTO user (id, username, email, display_name, salt, password, active, is_admin, last_modified) VALUES (1, 'alice', 'alice@example.org', 'Alice Schneier', ?, ?, 1, 1, date('now'))", string(salt), password) if err != nil { return err } _, err = db.Exec("INSERT INTO user (id, username, email, display_name, salt, password, active, is_admin, last_modified) VALUES (2, 'bob', 'bob@example.org', 'Bob Stallman', ?, ?, 1, 0, date('now'))", string(salt), string(password)) if err != nil { return err } _, err = db.Exec("INSERT INTO user (id, username, email, display_name, salt, password, active, is_admin, last_modified) VALUES (3, 'mallory', 'mallory@example.org', 'Mallory Pike', ?, ?, 0, 1, date('now'))", string(salt), string(password)) if err != nil { return err } _, err = db.Exec("INSERT INTO user (id, username, email, display_name, salt, password, active, is_admin, last_modified) VALUES (4, 'eve', 'eve@example.org', 'Mallory Pike', ?, ?, 0, 0, date('now'))", string(salt), string(password)) if err != nil { return err } return nil } func dummyAdminUser() *models.User { u, _ := ctx.UserService.GetByID(1) return u } func dummyUser() *models.User { u, _ := ctx.UserService.GetByID(2) return u } func setHeader(r *http.Request, key, value string) { r.Header.Set("X-Unit-Testing-Value-"+key, value) } type MockSMTP struct{} func (sm MockSMTP) Send(m mail.Mail) error { return nil } func (sm MockSMTP) SendAsync(m mail.Mail) { } func addValue(m url.Values, key, value string) { m.Add(key, value) } func addCheckboxValue(m url.Values, key string, value bool) { if value { m.Add(key, "on") } m.Add(key, "off") } func postMultipart(path string, mp []multipartRequest) (*http.Request, error) { buf := &bytes.Buffer{} mw := multipart.NewWriter(buf) defer mw.Close() for _, v := range mp { fh, err := os.Open(v.file) if err != nil { return nil, err } defer fh.Close() fw, err := mw.CreateFormFile(v.key, filepath.Base(fh.Name())) if err != nil { return nil, err } _, err = io.Copy(fw, fh) if err != nil { return nil, err } } req, err := http.NewRequest("POST", path, buf) if err != nil { return nil, err } req.Header.Set("Content-Type", mw.FormDataContentType()) return req, nil } func post(path string, values url.Values) (*http.Request, error) { var b bytes.Buffer b.WriteString(values.Encode()) req, err := http.NewRequest("POST", path, &b) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded") return req, nil } func get(path string, values url.Values) (*http.Request, error) { var b bytes.Buffer b.WriteString(values.Encode()) req, err := http.NewRequest("GET", path, &b) if err != nil { return nil, err } return req, nil } //reqUser the user which should be added to the context type reqUser int const ( rGuest = iota rAdminUser rUser rInactiveAdminUser rInactiveUser ) //request used to build an http.Request with specified values //url will not really considered as the requests are not send, the *http.Request is just passed directly to the controllers //pathvar is an array of key/value pairs used as dynamic query parameters such as /article/{id} type request struct { url string user reqUser method string values url.Values pathVar []pathVar multipartReq []multipartRequest } type multipartRequest struct { key string file string } type pathVar struct { key string value string } func (r request) buildRequest() *http.Request { var req *http.Request var err error if len(r.multipartReq) > 0 { req, err = postMultipart(r.url, r.multipartReq) } else if r.method == http.MethodPost { req, err = post(r.url, r.values) } else { req, err = get(r.url, r.values) } if err != nil { log.Print(err) } if r.pathVar != nil { for _, v := range r.pathVar { setHeader(req, v.key, v.value) } } var user *models.User if r.user == rGuest { return req } else { user, _ = ctx.UserService.GetByID(int(r.user)) recorder := httptest.NewRecorder() session := ctx.SessionService.Create(recorder, req) session.SetValue("userid", user.ID) cookie := recorder.Result().Cookies()[0] req.AddCookie(cookie) } reqCtx := context.WithValue(req.Context(), middleware.UserContextKey, user) req = req.WithContext(reqCtx) return req } type responseWrapper struct { template *middleware.Template response *httptest.ResponseRecorder } func (r responseWrapper) getTemplateError() error { return r.template.Err } func (r responseWrapper) isCodeSuccess() bool { return r.response.Result().StatusCode == http.StatusOK } func (r responseWrapper) getStatus() int { return r.response.Result().StatusCode } func (r responseWrapper) getCookie(name string) (*http.Cookie, error) { for _, c := range r.response.Result().Cookies() { if c.Name == name { return c, nil } } return nil, fmt.Errorf("cookie %s not found", name) }
package main import ( "bytes" "flag" "io" "log" "os" "os/exec" "path/filepath" ) func main() { dir := flag.String("dir", "/tmp/bench", "Benchmark dir") flag.Parse() err := os.MkdirAll(*dir, 0775) if err != nil { log.Fatalf("error: %s", err) } if len(os.Args) == 1 { log.Fatalf("<name> required") } old := filepath.Join(*dir, os.Args[1]) if len(os.Args) > 2 { new := filepath.Join(*dir, os.Args[2]) cmd := exec.Command("benchcmp", old, new) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { log.Fatalf("error: %s", err) return } return } cmd := exec.Command("go", "test", "-run=NONE", "-bench=.", "./...") b, err := cmd.CombinedOutput() if err != nil { log.Fatalf("error: %s", err) } f, err := os.Create(old) if err != nil { log.Fatalf("error: %s", err) } if _, err := io.Copy(f, bytes.NewReader(b)); err != nil { log.Fatalf("error: %s", err) } if _, err := io.Copy(os.Stdout, bytes.NewReader(b)); err != nil { log.Fatalf("error: %s", err) } }
package file import ( "fmt" "os" "path/filepath" "strings" ) var scanFileSuffixes = []string{".v", ".sv", ".svkey"} var fileChannel = make(chan PathedFile, 100) var scanCompleteChannel = make(chan bool, 1) type PathedFile struct { info os.FileInfo path string } func Scan(path string) (*chan PathedFile, *chan bool) { go doScan(path) return &fileChannel, &scanCompleteChannel } func doScan(rootPath string) { err := filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error { fmt.Println(path) if err != nil { return filepath.SkipDir } if isFileExtMatched(info.Name()) { fileChannel <- PathedFile{info, path} } return nil }) if err != nil { panic(err) } defer func() { scanCompleteChannel <- true }() } func isFileExtMatched(filename string) bool { for _, value := range scanFileSuffixes { if strings.HasSuffix(filename, value) { return true } } return false }
package user import ( "encoding/json" "math" "reflect" "sync" "sync/atomic" "testing" "github.com/Quaqmre/mirjmessage/logger" "github.com/Quaqmre/mirjmessage/mock" ) func TestNewUser(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) tests := []struct { name string input string expectedResult int32 }{ { name: "firstuser", input: "user1", expectedResult: 1, }, { name: "seconduser", input: "user2", expectedResult: 2, }, } for _, test := range tests { ex, err := u.NewUser(test.input, "arat") if err != nil { t.Error("expected nil error but returned:", err) } if ex.UniqID != test.expectedResult { t.Error("expected uniqname ali1 but returned:", ex.UniqID) } } } func TestNewUser_return_error(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) expected := ErrorInvalidContext _, err := u.NewUser("", "") if err != expected { t.Errorf("expected %s but returned %s", expected.Error(), err.Error()) } } func TestMakeUniqName_with_max_int32(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) a := int32(math.MaxInt32) u.atomicCounter = &a _, err := u.NewUser("ali", "arat") if err == nil { t.Error("expected error but returned:", err) } } func TestAtomic_Increase_with_multiple_goroutine(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) func() { var wg sync.WaitGroup for i := 0; i < 1000; i++ { wg.Add(1) go func(j int) { defer wg.Done() u.NewUser("user"+string(j), "pass") }(i) } wg.Wait() }() lastNewUser, _ := u.NewUser("test", "deneme") if lastNewUser.UniqID != 1001 { t.Error("expected count 1001 but returned:", lastNewUser.UniqID) } } func TestAtomic_Increase_generete_uniq_Id(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) count := int32(0) loopcount := 10000 func() { var wg sync.WaitGroup for i := 1; i < loopcount; i++ { wg.Add(1) go func(j int) { defer wg.Done() user, _ := u.NewUser("user"+string(j), "pass") atomic.AddInt32(&count, user.UniqID) }(i) } wg.Wait() }() expectedCount := loopcount * (loopcount - 1) / 2 if count != int32(expectedCount) { t.Error("expected total count:", expectedCount, "but returned total count:", count) } } func TestUserService_Get(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) expected, _ := u.NewUser("deneme", "sifre") newUser := u.Get(expected.UniqID) if !reflect.DeepEqual(expected, newUser) { t.Error("genereted user and getting user is not equal") } } func TestUserService_Marshall(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) expected := User{Name: "akif", Password: "123", UniqID: 1} testMarshal, _ := json.Marshal(expected) parsedUser, _ := u.Marshall(testMarshal) if expected != *parsedUser { t.Errorf("expected User string %v,returned %v", expected, parsedUser) } } func TestUserService_Marshall_return_error(t *testing.T) { var mockedlogger logger.Service = mock.NewMockedLogger() var u *UserService = newUserService(mockedlogger) testMarshal := `{"User":"akif"` _, err := u.Marshall([]byte(testMarshal)) if err == nil { t.Error("expected err but returned err=nil") } }
package main import ( "bufio" "database/sql" "fmt" _ "github.com/go-sql-driver/mysql" "github.com/moraes/config" "os" "time" ) func Query(instruction *string, db *sql.DB) (string, error) { query, err := db.Query(*instruction) if err != nil { return "", err } var dbRes string for query.Next() { query.Scan(&dbRes) } return dbRes, err } func LoadConfig() string { cfg, err := os.Open("config.yml") if err != nil { fmt.Fprintf(os.Stderr, "Error on open config file!\n") os.Exit(1) } cfg_str := "" buff := bufio.NewScanner(cfg) for buff.Scan() { cfg_str = cfg_str + buff.Text() + "\n" } return cfg_str } func main() { if len(os.Args) < 2 { fmt.Fprintf(os.Stderr, "Usage: %s [sql_script1, sql_script2, sql_script3...]\n", os.Args[0]) os.Exit(1) } cfg, err := config.ParseYaml(LoadConfig()) if err != nil { fmt.Fprintf(os.Stderr, "Error on load config: %s\n", err) os.Exit(1) } user, _ := cfg.String("database.user") pass, _ := cfg.String("database.pass") host, _ := cfg.String("database.host") port, _ := cfg.String("database.port") db_name, _ := cfg.String("database.db_name") db_type, _ := cfg.String("database.db_type") db, _ := sql.Open(db_type, fmt.Sprintf("%s:%s@(%s:%s)/%s", user, pass, host, port, db_name)) err = db.Ping() if err != nil { fmt.Println("Database not responding, check your config file or either server is up and running") os.Exit(1) } // Opening script(s) start := time.Now() for _, file := range os.Args[1:] { fileFd, err := os.Open(file) if err != nil { fmt.Fprintf(os.Stderr, "Cant open file %s\n", file) os.Exit(1) } scan := bufio.NewScanner(fileFd) query := "" for scan.Scan() { line := scan.Text() query = query + line if len(query) > 0 && query[len(query)-1] == ';' { _, err := Query(&query, db) if err != nil { fmt.Fprintf(os.Stderr, "ERROR: %s\n", err) os.Exit(1) } query = "" } } } fmt.Printf("Finished in %s\n", time.Since(start)) defer db.Close() }
package main import ( "os" "path/filepath" "reflect" "testing" "time" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" "github.com/stretchr/testify/assert" v1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" "k8s.io/apimachinery/pkg/util/clock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestOptions_checkGitOperator(t *testing.T) { // getting the current namespace is found from a local kube config file err := os.Setenv("KUBECONFIG", filepath.Join("test_data", "test-config")) assert.NoError(t, err) type fields struct { sr *v1.Deployment } tests := []struct { name string fields fields want []string wantErr bool }{ {name: "no_errors", fields: fields{sr: &v1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: operatorDeployment, Namespace: "cheese", }, Spec: v1.DeploymentSpec{Replicas: int32Ptr(2)}, Status: v1.DeploymentStatus{ReadyReplicas: 2}, }}}, {name: "no_git_operator", fields: fields{sr: &v1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "cheese", }, }}, want: []string{"failed to find jx-git-operator in namespace cheese"}}, {name: "no_ready_pods", fields: fields{sr: &v1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: operatorDeployment, Namespace: "cheese", }, Spec: v1.DeploymentSpec{Replicas: int32Ptr(2)}, }}, want: []string{"ready pods (0) to not match the expected number (2)"}}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { client := fake.NewSimpleClientset(tt.fields.sr) o := Options{ client: client, } got := o.checkGitOperator("cheese") if (err != nil) != tt.wantErr { t.Errorf("findErrors() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("findErrors() got = %v, want %v", got, tt.want) } }) } } func TestOptions_checkBootJob(t *testing.T) { // getting the current namespace is found from a local kube config file err := os.Setenv("KUBECONFIG", filepath.Join("test_data", "test-config")) assert.NoError(t, err) now := time.Now() fakeClock := clock.NewFakeClock(now) currentTime := metav1.NewTime(fakeClock.Now()) time10MinsAgo := metav1.NewTime(now.Add(-10 * time.Minute)) time20MinsAgo := metav1.NewTime(now.Add(-20 * time.Minute)) type fields struct { objects []runtime.Object minsInTheFuture time.Duration } tests := []struct { name string fields fields want []string wantErr bool }{ {name: "no_boot_job", fields: fields{objects: []runtime.Object{&batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "cheese", }, }}}, want: []string{"failed to find any boot jobs in namespace cheese"}}, {name: "boot_job_not_started", fields: fields{objects: []runtime.Object{&batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "cheese", Labels: map[string]string{"app": "jx-boot"}, }, }}}, want: []string{"latest boot job foo has not started, it could be stuck"}}, {name: "boot_job_running_more_than_default_exceeded_time", fields: fields{objects: []runtime.Object{&batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "cheese", Labels: map[string]string{"app": "jx-boot"}, }, Status: batchv1.JobStatus{ StartTime: &currentTime, }, }}, minsInTheFuture: 35}, want: []string{"latest boot job foo has been running for more than 30m0s, it could be stuck"}}, {name: "sort_get_latest_boot_job", fields: fields{objects: []runtime.Object{&batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "cheese", Labels: map[string]string{"app": "jx-boot"}, }, Status: batchv1.JobStatus{ StartTime: &time10MinsAgo, Failed: 1, }, }, &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "cheese", Labels: map[string]string{"app": "jx-boot"}, }, Status: batchv1.JobStatus{ StartTime: &time20MinsAgo, Failed: 1, }, }}, minsInTheFuture: 10}, want: []string{"latest boot job foo has a failed run"}}, {name: "sort_get_latest_boot_job_change_start_time_order", fields: fields{objects: []runtime.Object{&batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "cheese", Labels: map[string]string{"app": "jx-boot"}, }, Status: batchv1.JobStatus{ StartTime: &time20MinsAgo, Failed: 1, }, }, &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "bar", Namespace: "cheese", Labels: map[string]string{"app": "jx-boot"}, }, Status: batchv1.JobStatus{ StartTime: &time10MinsAgo, Failed: 1, }, }}, minsInTheFuture: 10}, want: []string{"latest boot job bar has a failed run"}}, {name: "no_errors", fields: fields{objects: []runtime.Object{&batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "cheese", Labels: map[string]string{"app": "jx-boot"}, }, Status: batchv1.JobStatus{ StartTime: &currentTime, }, }}, minsInTheFuture: 10}, want: nil}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.fields.minsInTheFuture != 0 { t2 := now.Add(tt.fields.minsInTheFuture * time.Minute) fakeClock.SetTime(t2) } client := fake.NewSimpleClientset(tt.fields.objects...) o := Options{ client: client, clock: fakeClock, } got, err := o.checkBootJob("cheese") if (err != nil) != tt.wantErr { t.Errorf("findErrors() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("findErrors() got = %v, want %v", got, tt.want) } }) } } func int32Ptr(i int32) *int32 { return &i }
package app ///go:generate mockgen -destination=testing.generated.go -package=app -source=app.go App ///go:generate mockgen -destination=testing.generated.go -package=app github.com/powerman/bug-gomock/app App ///go:generate mockgen -destination=testing.generated.go -package=app -self_package=github.com/powerman/bug-gomock/app -source=app.go App ///go:generate mockgen -destination=testing.generated.go -package=app -self_package=github.com/powerman/bug-gomock/app github.com/powerman/bug-gomock/app App import ( "context" ) // Ctx is a synonym for convenience. type Ctx = context.Context type Nothing struct{} type nothing struct{} type App interface { // Noop(Ctx) // source: doesn't work, reflect: works // Noop(context.Context) // source: work, reflect: works // Noop(Nothing) // source: doesn't work, reflect: doesn't work // Noop(nothing) // source: work, reflect: doesn't work }
package channels import ( "fmt" "sync" "time" ) // InitSync2 init func InitSync2() { var wg sync.WaitGroup wg.Add(1) // waiting for one go routine go func() { var value = 10 var result = 0 goChan := make(chan int) mainChan := make(chan string) calculateSquare := func() { time.Sleep(time.Second * 3) result = value * value goChan <- result } reportResult := func() { fmt.Println(value, " squared is ", <-goChan) // blocks until it can read something from the goChan mainChan <- "We are now done!" } go calculateSquare() go reportResult() <-mainChan // blocks until it can read something from mainChan wg.Done() }() wg.Wait() }
package main import ( b64 "encoding/base64" "encoding/json" "flag" "fmt" "log" "net/http" "os" "strings" "time" "github.com/ckin-it/minedive/minedive" "github.com/go-redis/redis" "github.com/gorilla/handlers" "github.com/gorilla/mux" ) var s minedive.MinediveServer var port int var rdb *redis.Client var rcmd redis.Cmdable var dataSource string var queueName string func signalingDispatch() { for { r := rcmd.BRPop(300*time.Millisecond, "Q_"+queueName) for _, a := range r.Val() { var m minedive.Cell err := json.Unmarshal([]byte(a), &m) if err != nil { log.Println(err) } else { s.FwdToTarget(&m) } } } } func minediveDispatch(cli *minedive.MinediveClient, m minedive.Cell) { switch m.Type { case "sub": cli.Name = m.D0 case "offer": if m.D3 == queueName || m.D3 == "" { s.FwdToTarget(&m) } else { b, err := json.Marshal(m) if err != nil { log.Println("remarshal failed") } rcmd.LPush("Q_"+queueName, b64.StdEncoding.EncodeToString(b)) } case "answer": if m.D3 == queueName || m.D3 == "" { s.FwdToTarget(&m) } else { b, err := json.Marshal(m) if err != nil { log.Println("remarshal failed") } rcmd.LPush("Q_"+queueName, b) } default: log.Println(m.Type) } } func runMined(port int) { dsn := strings.Split(dataSource, "://") switch dsn[0] { case "redis": rdb = redis.NewClient(&redis.Options{ Addr: dsn[1], Password: "", // no password set DB: 0, // use default DB }) rcmd = rdb a := rcmd.Time().String() log.Println("redis time:", a) defer rdb.Close() default: log.Fatalf("%s not implemented\n", dsn[0]) } s.InitMinediveServer() s.Dispatch = minediveDispatch mux := mux.NewRouter() mux.HandleFunc("/ws", s.MinediveAccept) loggedRouter := handlers.CustomLoggingHandler(os.Stdout, mux, ProxyFormatter) hs := &http.Server{ ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, ReadHeaderTimeout: 20 * time.Second, Addr: fmt.Sprintf(":%d", port), Handler: loggedRouter, } var err error err = hs.ListenAndServe() if err != nil { panic("ListenAndServe: " + err.Error()) } } func init() { flag.IntVar(&port, "port", 6501, "Listen port") flag.StringVar(&dataSource, "source", "redis://127.0.0.1:6379", "source selection (full DSN, only redis supported)") flag.StringVar(&queueName, "q", "ws://localhost:6501/ws", "queue name") } func main() { flag.Parse() runMined(port) }
package bgo import ( "os" stack "github.com/Gurpartap/logrus-stack" tf "github.com/pickjunk/bgo/text_formatter" log "github.com/sirupsen/logrus" ) // Logger struct type Logger struct { *log.Logger } // Log instance var Log = initLogger() func initLogger() *Logger { if os.Getenv("ENV") == "production" { log.SetFormatter(&log.JSONFormatter{}) } else { log.SetFormatter(&tf.TextFormatter{}) log.SetLevel(log.DebugLevel) } log.SetOutput(os.Stdout) callerLevels := []log.Level{} stackLevels := []log.Level{log.PanicLevel, log.FatalLevel, log.ErrorLevel} stackHook := stack.NewHook(callerLevels, stackLevels) log.AddHook(stackHook) return &Logger{ log.StandardLogger(), } }
package main import( "fmt" "hashtable" "strconv" ) func main(){ var ht hashtable.HashTable ht.InitHashtable(4,1) fmt.Println() fmt.Println("------------------------------------------------------------------------------------") for i:=0;i<10;i++{ ht.Put(i,strconv.Itoa(i)+" hello") fmt.Println() fmt.Println("哈希表结点个数:",ht.GetCount()) fmt.Println("哈希表的容量:",ht.GetCapacity()) fmt.Println("哈希表阈值:",ht.GetThreshold()) fmt.Println("哈希表tablesize:",ht.GetTablesize()) fmt.Println() } fmt.Println() fmt.Println("哈希表结点个数为:",ht.GetCount()) fmt.Println() for i:=0;i<10;i++{ fmt.Println(i,"---->",ht.Get(i)) } fmt.Println() fmt.Println("key为20的value:",ht.Get(20)) fmt.Println("key为8的value:",ht.Get(8)) }
package config import ( "github.com/spf13/viper" ) // ViperProvider represents the structure to manage // configurations based on github.com/spf13/viper type ViperProvider struct { v *viper.Viper } // NewViperProvider creates a new configuration provider // based on github.com/spf13/viper func NewViperProvider(v *viper.Viper) (*ViperProvider, error) { p := &ViperProvider{ v: v, } return p, nil } // Get returns key from the configuration and will not cast it func (p *ViperProvider) Get(key string) interface{} { return p.v.Get(key) } // GetStringSlice returns the value associated with the key as a slice of strings. func (p *ViperProvider) GetStringSlice(key string) []string { return p.v.GetStringSlice(key) } // GetContentMap returns the complete content of the provider data source as a map func (p *ViperProvider) GetContentMap() map[string]interface{} { // TODO Implement return map[string]interface{}{} } // GetString returns key from the configuration as a casted String func (p *ViperProvider) GetString(key string) string { return p.v.GetString(key) }
package main import ( "net/http" ) func main() { http.HandleFunc("/", DisplayHandler) http.ListenAndServe(":5000", nil) }
package main type obj = map[string]interface{} type array = []interface{}
/* * Tencent is pleased to support the open source community by making 蓝鲸 available. * Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. * Licensed under the MIT License (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * http://opensource.org/licenses/MIT * Unless required by applicable law or agreed to in writing, software distributed under * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package sys import "bscp.io/pkg/iam/client" // GenerateCommonActions generate all the common actions registered to IAM. func GenerateCommonActions() []client.CommonAction { CommonActions := make([]client.CommonAction, 0) CommonActions = append(CommonActions, genBizCommonActions()...) return CommonActions } // genBizCommonActions 推荐权限,业务查看、业务运维 func genBizCommonActions() []client.CommonAction { return []client.CommonAction{ { Name: "业务查看", EnglishName: "business view", Actions: []client.ActionWithID{ { BusinessViewResource, }, { AppView, }, { CredentialView, }, }, }, { Name: "业务运维", EnglishName: "business ops", Actions: []client.ActionWithID{ { BusinessViewResource, }, { AppCreate, }, { AppView, }, { AppEdit, }, { AppDelete, }, { GenerateRelease, }, { ReleasePublish, }, { CredentialCreate, }, { CredentialView, }, { CredentialEdit, }, { CredentialDelete, }, }, }, } }
package controller import ( "encoding/json" "log" "net/http" service "github.com/darkarchana/darkarchana-backend/service/serviceimpl" "github.com/darkarchana/darkarchana-backend/view" ) // Heroes : Heroes API func Heroes() http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { var clientReq view.HeroesRequest var status view.Status if r.Method == http.MethodGet { err := json.NewDecoder(r.Body).Decode(&clientReq) if err != nil { log.Print(err) status.Code = http.StatusBadRequest status.Response = err } switch clientReq.Request { case "findOne": data, err := service.HeroesServiceImpl().FindOne(clientReq) if err != nil { log.Print(err) status.Code = http.StatusBadRequest status.Response = err } else { status.Code = http.StatusOK status.Response = data } case "findAll": data, err := service.HeroesServiceImpl().FindAll(clientReq) if err != nil { log.Print(err) status.Code = http.StatusBadRequest status.Response = err } else { status.Code = http.StatusOK status.Response = data } default: status.Code = http.StatusBadRequest status.Response = "Request is not Valid" } w.Header().Set("Content-Type", "application/json") w.WriteHeader(status.Code) if err != nil { log.Print(err) status.Response = err } else { if status.Code != http.StatusBadRequest { json.NewEncoder(w).Encode(status.Response) } else { json.NewEncoder(w).Encode(status) } } } } }
package controllers import ( "log" "mick/models" "github.com/jinzhu/gorm" ) func ConnDataBase() (*gorm.DB, error) { db, err := gorm.Open("sqlite3", "./photos.db") if err != nil { log.Println(err) } // init db.DB() db.DB().Ping() db.DB().SetMaxIdleConns(10) db.DB().SetMaxOpenConns(100) db.LogMode(true) db.AutoMigrate(&models.Photo{}, &models.Album{}, &models.Admin{}) return db, err }
// Package tokens generates and validates JSON Web Signatures package tokens import ( "crypto/rsa" "encoding/json" "fmt" "io" "io/ioutil" "time" "gopkg.in/square/go-jose.v1" ) // Encoder helps create JSON Web Signatures for any payload. type Encoder struct { signer jose.Signer pub *rsa.PublicKey } // NewRSAEncoder creates a Encoder. // // The private key must be of a RSA private-public key pair. func NewRSAEncoder(priv io.Reader) (*Encoder, error) { raw, err := ioutil.ReadAll(priv) if err != nil { return nil, fmt.Errorf("cannot read private key: %s", err) } interm, err := jose.LoadPrivateKey(raw) if err != nil { return nil, fmt.Errorf("cannot parse private key: %s", err) } privKey, ok := interm.(*rsa.PrivateKey) if !ok { return nil, fmt.Errorf("cannot handle key type %T", interm) } s, err := jose.NewSigner(jose.PS256, privKey) if err != nil { return nil, fmt.Errorf("could not create signer: %s", err) } return &Encoder{ signer: s, pub: &privKey.PublicKey, }, nil } // Encode creates an Encoded JSON Web Signature for the given payload. // // The payload must be a pointer to the interface and must be JSON encodable. func (s *Encoder) Encode(payload interface{}, expiry time.Time) (string, error) { content := &tokenContent{ Expiry: expiry, Payload: payload, } raw, err := json.Marshal(content) if err != nil { return "", fmt.Errorf("could not marshal payload: %s", err) } jws, err := s.signer.Sign(raw) if err != nil { return "", fmt.Errorf("could not sign payload: %s", err) } token, err := jws.CompactSerialize() if err != nil { return "", fmt.Errorf("could not serialize: %s", err) } return token, nil } // Decoder creates a Decoder that can parse the tokens created by this Encoder. func (s *Encoder) Decoder() *Decoder { return &Decoder{s.pub} } // Decoder decodes signed tokens. type Decoder struct { pub *rsa.PublicKey } // NewRSADecoder creates a new Decoder given the public key. // // The public key must be of a RSA private-public key pair. func NewRSADecoder(pub io.Reader) (*Decoder, error) { raw, err := ioutil.ReadAll(pub) if err != nil { return nil, fmt.Errorf("cannot read public key: %s", err) } interm, err := jose.LoadPublicKey(raw) if err != nil { return nil, fmt.Errorf("cannot parse public key: %s", err) } key, ok := interm.(*rsa.PublicKey) if !ok { return nil, fmt.Errorf("cannot handle key type %T", interm) } return &Decoder{key}, nil } // DecodeAsOf validates the token as of the given time and extracts the payload from it. // // The payload must be a pointer to the interface and must be JSON decodable. func (v *Decoder) DecodeAsOf(when time.Time, token string, payload interface{}) error { jws, err := jose.ParseSigned(token) if err != nil { return fmt.Errorf("cannot parse token: %s", err) } raw, err := jws.Verify(v.pub) if err != nil { return fmt.Errorf("could not verify token: %s", err) } content := tokenContent{ Payload: payload, } err = json.Unmarshal(raw, &content) if err != nil { return fmt.Errorf("could not decode payload: %s", err) } if when.After(content.Expiry) { return fmt.Errorf("token has expired") } return nil } // Decode validates the token at the current time and extracts the payload from it. // // The payload must be a pointer to the interface and must be JSON decodable. func (v *Decoder) Decode(token string, payload interface{}) error { return v.DecodeAsOf(time.Now(), token, payload) } type tokenContent struct { Expiry time.Time `json:"expiry"` Payload interface{} `json:"payload"` }
package mod import ( "flag" "fmt" "github.com/beego/bee/cmd/commands" "github.com/beego/bee/cmd/commands/version" "github.com/beego/bee/utils" "os" "path" "path/filepath" "strings" ) var m = ` module {{.appname}} go 1.12 ` var mod = &commands.Command{ CustomFlags: true, UsageLine: "mod", Short: "create mod go application", Long: ` bee mod -app application_name" `, PreRun: func(cmd *commands.Command, args []string) { version.ShowShortVersionBanner() }, Run: modApp, } var appname string func init() { fs := flag.NewFlagSet("mod", flag.ContinueOnError) fs.StringVar(&appname, "app", "", "Connection string used by the driver to connect to a database instance.") mod.Flag = *fs commands.AvailableCommands = append(commands.AvailableCommands, mod) } func modApp(cmd *commands.Command, args []string) int { cmd.Flag.Parse(args) fmt.Println(args) app, _ := os.Getwd() app = filepath.Join(app, appname) os.MkdirAll(app, 0755) utils.WriteToFile(path.Join(app, "go.mod"), strings.Replace(m, "{{.appname}}", appname, -1)) return 0 }
package main import ( "cm_liveme_im/libs/bufio" "cm_liveme_im/libs/bytepool" "cm_liveme_im/libs/bytes" aesext "cm_liveme_im/libs/crypto/aes" rsaext "cm_liveme_im/libs/crypto/rsa" "cm_liveme_im/libs/define" "cm_liveme_im/libs/proto" itime "cm_liveme_im/libs/time" "encoding/json" "fmt" pb "github.com/golang/protobuf/proto" "io" "net" "sync/atomic" "time" log "github.com/thinkboy/log4go" ) var ( LargeMsgBodyPool *bytepool.BytePool ) // InitTCP listen all tcp.bind and start accept connections. func InitTCP(addrs []string, coreNum int) (err error) { var ( bind string listener *net.TCPListener addr *net.TCPAddr ) LargeMsgBodyPool = &bytepool.BytePool{} LargeMsgBodyPool.Init(0, Conf.MsgBodyPooledMaxSize) // start the listening for _, bind = range addrs { if addr, err = net.ResolveTCPAddr("tcp4", bind); err != nil { log.Error("net.ResolveTCPAddr(\"tcp4\", \"%s\") error(%v)", bind, err) return } if listener, err = net.ListenTCP("tcp4", addr); err != nil { log.Error("net.ListenTCP(\"tcp4\", \"%s\") error(%v)", bind, err) return } log.Info("start tcp listen: \"%s\"", bind) // split N core to accept for i := 0; i < coreNum; i++ { go acceptTCP(DefaultServer, listener) } } return } // Accept accepts connections on the listener and serves requests // for each incoming connection. Accept blocks; the caller typically // invokes it in a go statement. func acceptTCP(server *Server, lis *net.TCPListener) { var ( conn *net.TCPConn err error r int ) for { if conn, err = lis.AcceptTCP(); err != nil { // if listener close then return log.Error("listener.Accept(\"%s\") error(%v)", lis.Addr().String(), err) return } if err = conn.SetKeepAlive(server.Options.TCPKeepalive); err != nil { log.Error("conn.SetKeepAlive() error(%v)", err) return } if err = conn.SetReadBuffer(server.Options.TCPRcvbuf); err != nil { log.Error("conn.SetReadBuffer() error(%v)", err) return } if err = conn.SetWriteBuffer(server.Options.TCPSndbuf); err != nil { log.Error("conn.SetWriteBuffer() error(%v)", err) return } go serveTCP(server, conn, r) if r++; r == maxInt { r = 0 } } } func serveTCP(server *Server, conn *net.TCPConn, r int) { var ( // timer tr = server.round.Timer(r) rp = server.round.Reader(r) wp = server.round.Writer(r) // ip addr lAddr = conn.LocalAddr().String() rAddr = conn.RemoteAddr().String() ) log.Debug("new tcp connection to \"%s\" from \"%s\"", lAddr, rAddr) atomic.AddUint64(&StatSpd.CurConn, 1) server.serveTCP(conn, rp, wp, tr) atomic.AddUint64(&StatSpd.CurConn, ^uint64(0)) } // TODO linger close? func (server *Server) serveTCP(conn *net.TCPConn, rp, wp *bytes.Pool, tr *itime.Timer) { var ( err error key string hb time.Duration // heartbeat p *proto.Proto b *Bucket trd *itime.TimerData rb = rp.Get() wb = wp.Get() ch = NewChannel(server.Options.SvrProto) rr = &ch.Reader wr = &ch.Writer ) ch.Reader.ResetBuffer(conn, rb.Bytes()) ch.Writer.ResetBuffer(conn, wb.Bytes()) // auth started with a timeout trd = tr.Add(server.Options.HandshakeTimeout, func() { log.Warn("shutdown tcp connection from \"%s\" due to auth timeout", conn.RemoteAddr().String()) conn.Close() }) if key, hb, err = server.authTCP(rr, wr, &ch.TmpProto); err == nil { ch.TmpProto.Operation = define.OP_AUTH_REPLY } else { ch.TmpProto.Operation = define.OP_AUTH_DENY } ch.TmpProto.Body = nil if errWrt := ch.TmpProto.WriteTCP(wr); errWrt != nil { log.Debug("Unable to write auth reply to client connection") if err == nil { err = errWrt } } if errFl := wr.Flush(); errFl != nil { log.Debug("Unable to flush auth reply client connection") if err == nil { err = errFl } } if err != nil { conn.Close() rp.Put(rb) wp.Put(wb) tr.Del(trd) log.Trace("connection[%s] auth failed(%v), shutdown tcp connection", key, err) return } // auth finally passed, lazy init ch.CliProto to ease the memory overhead of those failed to auth connections ch.InitCliProto(server.Options.CliProto) b = server.Bucket(key) b.Put(key, ch, tr) // set up the heartbeat timeout trd.Key = key tr.SetEx(trd, hb, func() { log.Warn("shutdown tcp connection from \"%s\" due to heartbeat timeout", conn.RemoteAddr().String()) conn.Close() }) // auth passed, start dispatch goroutine go server.dispatchTCP(key, conn, wr, wp, wb, ch) for { if p, err = ch.CliProto.Set(); err != nil { p = &ch.TmpProto } if err = p.ReadTCP(rr, LargeMsgBodyPool, Conf.MsgBodyValidMaxSize); err != nil { break } if p == &ch.TmpProto { if p, err = ch.CliProto.Set(); err != nil { // There is no room to prepare a response for it, just drop it. // The client should use Proto.SeqId to discover those missing responses // and implement their error handling logic. if ch.TmpProto.BodySupportReuse { LargeMsgBodyPool.Put(ch.TmpProto.Body) ch.TmpProto.Body = nil } log.Debug("Connection[%s] dropped a message because \"cli.proto\" is full(%v)", key, err) continue } *p = ch.TmpProto } var sliceToRecycle []byte = nil if p.BodySupportReuse { sliceToRecycle = p.Body } if p.Operation == define.OP_HEARTBEAT { atomic.AddUint64(&StatCnt.HBRecv, 1) tr.Set(trd, hb) p.Body = nil p.Operation = define.OP_HEARTBEAT_REPLY log.Debug("connection[%s] heartbeat received", key) } else { atomic.AddUint64(&StatCnt.MsgRecv, 1) err = server.operator.Operate(p, ch) } if sliceToRecycle != nil { LargeMsgBodyPool.Put(sliceToRecycle) } if err != nil { break } // NOTE: make sure p.Body is pointing at some other place than the one // ReadTCP returned with a false p.BodySupportReuse before calling SetAdv. ch.CliProto.SetAdv() ch.Signal() // might block(could be a problem?) } if err != nil && err != io.EOF { log.Error("connection[%s] tcp error(%v)", key, err) } b.Del(ch) tr.Del(trd) rp.Put(rb) conn.Close() ch.Close() if err = server.operator.Disconnect(key); err != nil { log.Error("connection[%s] operator do disconnect error(%v)", key, err) } log.Trace("connection[%s] server tcp goroutine exit", key) return } // dispatch accepts connections on the listener and serves requests // for each incoming connection. dispatch blocks; the caller typically // invokes it in a go statement. func (server *Server) dispatchTCP(key string, conn *net.TCPConn, wr *bufio.Writer, wp *bytes.Pool, wb *bytes.Buffer, ch *Channel) { var ( p *proto.Proto err error ) log.Trace("connection[%s] start dispatch tcp goroutine", key) for { p = ch.Ready() switch p { case proto.ProtoFinish: log.Debug("connection[%s] will exit dispatch goroutine", key) goto failed case proto.ProtoKick: // best-effort user notification for a kick-out if err = p.WriteTCP(wr); err == nil { wr.Flush() } log.Warn("connection[%s] get kicked out of dispatch goroutine", key) goto failed case proto.ProtoReady: // ringbuffer message for responding to requesting client for { if p, err = ch.CliProto.Get(); err != nil { // must reset err to nil 'cause this is a normal exit err = nil break } if p.Operation == define.OP_HEARTBEAT_REPLY { atomic.AddUint64(&StatCnt.HBReply, 1) } else { atomic.AddUint64(&StatCnt.MsgSent, 1) } if err = p.WriteTCP(wr); err != nil { goto failed } p.Body = nil // avoid memory leak ch.CliProto.GetAdv() } default: // job RPC pushed messages var increment uint64 = 1 if p.Operation == define.OP_RAW { if batchSize, ok := p.BundleData.(uint16); ok { increment = uint64(batchSize) } } atomic.AddUint64(&StatCnt.MsgSent, increment) if err = p.WriteTCP(wr); err != nil { goto failed } } // only hungry flush response if err = wr.Flush(); err != nil { break } } failed: if err != nil { log.Error("connection[%s] tcp dispatch routine error(%v); unsent Protos count(%d); bufio writer unsent bytes(%d)", key, err, ch.Buffered(), wr.Buffered()) } conn.Close() // will make sure the request reading goroutine breaks and sends a proto.ProtoFinish here for p != proto.ProtoFinish { // drain the buffered Protos p = ch.Ready() } wp.Put(wb) log.Trace("connection[%s] dispatch goroutine exit", key) return } // auth for cm_liveme_im handshake with client, use rsa. // TODO: consider move those cpu consuming codes to a limited number of goroutines, otherwise they will hog up // the go runtime 'cause while calculation goroutines can not be schedule and switched. func (server *Server) authTCP(rr *bufio.Reader, wr *bufio.Writer, p *proto.Proto) (key string, heartbeat time.Duration, err error) { if err = p.ReadTCP(rr, LargeMsgBodyPool, Conf.MsgBodyValidMaxSize); err != nil { return } if p.Operation != define.OP_AUTH { log.Warn("auth operation requires a matching opreation code instead of %d", p.Operation) err = ErrOperation return } authSecret := &proto.AuthSecret{} if err = pb.Unmarshal(p.Body, authSecret); err != nil { log.Warn("auth operation failed due to protobuf unmarshal failure") return } var authInfoJsonBytes []byte switch *authSecret.Version { case 0: // without any encryption authInfoJsonBytes = authSecret.AuthInfo case 1: var aesKey []byte if aesKey, err = rsaext.Decrypt(authSecret.AesKey, Conf.AuthRSAPrivateKey); err != nil { log.Warn("auth operation failed due to decrypt AES key failed") return } if authInfoJsonBytes, err = aesext.CbcDecryptWithPKCS7UnPadding(aesKey, authSecret.AuthInfo); err != nil { log.Warn("auth operation failed due to failed to decrypt AuthInfo with the aes key provided") return } default: err = fmt.Errorf("auth operation failed due to unsupported AuthSecret version: %d", authSecret.Version) log.Warn(err) } authInfo := &proto.AuthInfo{} if err = json.Unmarshal(authInfoJsonBytes, authInfo); err != nil { log.Warn("auth operation failed due to unmarshal AuthInfo failed") return } key, heartbeat, err = server.operator.Connect(p, authInfo) atomic.AddUint64(&StatCnt.Auth, 1) return }
package lib import ( "bufio" "errors" "os" "encoding/json" "fmt" "github.com/goml/gobrain" ) // private method func bin(n int) []float64 { f := [8]float64{} // MAX model 8개 for i := uint(0); i < 8; i++ { f[i] = float64((n >> i) & 1) // shift로 이진수 형태로 저장 } return f[:] // 전체 내용을 출력하기위해 [:]사용(기존 형태는 [8]로 지정된 형태이므로 []float64형태의 값으로 return 불가) } // public method(just a function with receiver argument) func LoadModel() (*gobrain.FeedForward, []string, error) { f, err := os.Open("labels.txt") if err != nil { return nil, nil, err } defer f.Close() labels := []string{} scanner := bufio.NewScanner(f) for scanner.Scan() { labels = append(labels, scanner.Text()) } if scanner.Err() != nil { return nil, nil, err } if len(labels) == 0 { return nil, nil, errors.New("No labels found") } // brain = 모델 f, err = os.Open("brain.json") if err != nil { return nil, labels, nil } defer f.Close() ff := &gobrain.FeedForward{} err = json.NewDecoder(f).Decode(ff) if err != nil { return nil, labels, err } return ff, labels, nil } // public method(just a function with receiver argument) func MakeModel(labels []string) (*gobrain.FeedForward, error) { ff := &gobrain.FeedForward{} patterns := [][][]float64{} // 3차원 모델패턴 slice for i, c := range labels { //Label slice나누기(i = 번호, category = 값) bset, err := loadImageSet(c) // 이미지를 2차원 슳라이스로 리턴 - 이미지 숫자만큼 값이 지정됨 if err != nil { return nil, err } // 모든 이미지가 패턴에 {{이미지 값}{라벨 값}}의 형태로 저장됨 for _, b := range bset { patterns = append(patterns, [][]float64{b, bin(i)}) // 3차원 패턴 슬라이드에 2차원 이미지 슬라이드를 추가 , 저장할 모델의 위치값를 binary형태로 지정(3차원 슬라이스로 전부 지정됨) } // fmt.Println(bin(i)) // [0 0 0 0 0 0 0 0], [1 0 0 0 0 0 0 0], [0 1 0 0 0 0 0 0]... 이런식으로 각 이미지의 위치가 지정됨. 결과에선 각 위치에따른 일치 퍼센트가 나옴 } if len(patterns) == 0 || len(patterns[0][0]) == 0 { return nil, errors.New("No images found") } fmt.Println("training now... please wait...") ff.Init(len(patterns[0][0]), 40, len(patterns[0][1])) // input(입력) = 이미지 값, hidden(기억) = 기억노드 수 지정, output(출력) = 라벨값 ff.Train(patterns, 1000, 0.6, 0.4, false) // 학습(패턴, 학습횟수, 상수, 상수, 에러값 출력 플래그) return ff, nil } // public method func SaveModel(ff *gobrain.FeedForward) error { f, err := os.Create("brain.json") if err != nil { return err } defer f.Close() return json.NewEncoder(f).Encode(ff) }
package slack import ( "encoding/json" "net/http" "testing" ) func TestListEventAuthorizations(t *testing.T) { http.HandleFunc("/apps.event.authorizations.list", testListEventAuthorizationsHandler) once.Do(startServer) api := New("", OptionAppLevelToken("test-token"), OptionAPIURL("http://"+serverAddr+"/")) authorizations, err := api.ListEventAuthorizations("1-message-T012345678-DR12345678") if err != nil { t.Errorf("Failed, but should have succeeded") } else if len(authorizations) != 1 { t.Errorf("Didn't get 1 authorization") } else if authorizations[0].UserID != "U123456789" { t.Errorf("User ID is wrong") } } func testListEventAuthorizationsHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") response, _ := json.Marshal(listEventAuthorizationsResponse{ SlackResponse: SlackResponse{Ok: true}, Authorizations: []EventAuthorization{ { UserID: "U123456789", TeamID: "T012345678", }, }, }) w.Write(response) } func TestUninstallApp(t *testing.T) { http.HandleFunc("/apps.uninstall", testUninstallAppHandler) once.Do(startServer) api := New("test-token", OptionAPIURL("http://"+serverAddr+"/")) err := api.UninstallApp("", "") if err != nil { t.Errorf("Failed, but should have succeeded") } } func testUninstallAppHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") response, _ := json.Marshal(SlackResponse{Ok: true}) w.Write(response) }
// Copyright 2023 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package logging provides types and functiosn for handling logging GCP resources. package alpha import ( "bytes" "context" "fmt" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" ) // do makes a request to delete a log bucket if the name of the bucket is not // "_Default" or "_Required" func (op *deleteLogBucketOperation) do(ctx context.Context, r *LogBucket, c *Client) error { _, err := c.GetLogBucket(ctx, r) if err != nil { if dcl.IsNotFound(err) { c.Config.Logger.Infof("LogBucket not found, returning. Original error: %v", err) return nil } c.Config.Logger.Warningf("GetLogBucket checking for existence. error: %v", err) return err } if r.Name != nil && (*r.Name == "_Default" || *r.Name == "_Required") { return nil } u, err := r.deleteURL(c.Config.BasePath) if err != nil { return err } // Delete should never have a body body := &bytes.Buffer{} _, err = dcl.SendRequest(ctx, c.Config, "DELETE", u, body, c.Config.RetryProvider) if err != nil { return fmt.Errorf("failed to delete LogBucket: %w", err) } return nil } func equalsLogMetricMetricDescriptorLabelsValueType(m, n *LogMetricMetricDescriptorLabelsValueTypeEnum) bool { if m == nil && n == nil { return true } v := *LogMetricMetricDescriptorLabelsValueTypeEnumRef("STRING") w := *LogMetricMetricDescriptorLabelsValueTypeEnumRef("") if m == nil || *m == w { // m is nil or blank, should compare equal to blank or "STRING" return n == nil || *n == v || *n == w } if n == nil || *n == w { // n is nil or blank, should compare equal to blank or "STRING" return *m == v || *m == w } return *m == *n } func canonicalizeLogMetricMetricDescriptorLabelsValueType(m, n interface{}) bool { if m == nil && n == nil { return true } mVal, _ := m.(*LogMetricMetricDescriptorLabelsValueTypeEnum) nVal, _ := n.(*LogMetricMetricDescriptorLabelsValueTypeEnum) return equalsLogMetricMetricDescriptorLabelsValueType(mVal, nVal) } func equalsLogMetricMetricDescriptorValueType(m, n *LogMetricMetricDescriptorValueTypeEnum) bool { if m == nil && n == nil { return true } v := *LogMetricMetricDescriptorValueTypeEnumRef("STRING") w := *LogMetricMetricDescriptorValueTypeEnumRef("") if m == nil || *m == w { // m is nil or blank, should compare equal to blank or "STRING" return n == nil || *n == v || *n == w } if n == nil || *n == w { // n is nil or blank, should compare equal to blank or "STRING" return *m == v || *m == w } return *m == *n } func canonicalizeLogMetricMetricDescriptorValueType(m, n interface{}) bool { if m == nil && n == nil { return true } mVal, _ := m.(*LogMetricMetricDescriptorValueTypeEnum) nVal, _ := n.(*LogMetricMetricDescriptorValueTypeEnum) return equalsLogMetricMetricDescriptorValueType(mVal, nVal) }
// Copyright 2020-2021 Buf Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package bufmodulecache import ( "context" "testing" "time" "github.com/powerman/buf/internal/buf/bufcore/bufmodule" "github.com/powerman/buf/internal/buf/bufcore/bufmodule/bufmoduletesting" "github.com/powerman/buf/internal/pkg/filelock" "github.com/powerman/buf/internal/pkg/storage" "github.com/powerman/buf/internal/pkg/storage/storagemem" "github.com/powerman/buf/internal/pkg/storage/storageos" "github.com/stretchr/testify/require" "go.uber.org/zap" ) func TestReaderBasic(t *testing.T) { ctx := context.Background() modulePin, err := bufmodule.NewModulePin( "buf.build", "foob", "bar", "v1", bufmoduletesting.TestCommit, bufmoduletesting.TestDigest, time.Now(), ) require.NoError(t, err) readBucket, err := storagemem.NewReadBucket(bufmoduletesting.TestData) require.NoError(t, err) module, err := bufmodule.NewModuleForBucket(ctx, readBucket) require.NoError(t, err) readWriteBucket, fileLocker := newTestBucketAndLocker(t) moduleCacher := newModuleCacher(readWriteBucket, fileLocker) err = moduleCacher.PutModule( context.Background(), modulePin, module, ) require.NoError(t, err) // the delegate uses the cache we just populated delegateModuleReader := newModuleReader(zap.NewNop(), readWriteBucket, moduleCacher, WithFileLocker(fileLocker)) // the main does not, so there will be a cache miss mainReadWriteBucket, mainFileLocker := newTestBucketAndLocker(t) moduleReader := newModuleReader(zap.NewNop(), mainReadWriteBucket, delegateModuleReader, WithFileLocker(mainFileLocker)) getModule, err := moduleReader.GetModule(ctx, modulePin) require.NoError(t, err) getReadBucketBuilder := storagemem.NewReadBucketBuilder() err = bufmodule.ModuleToBucket(ctx, getModule, getReadBucketBuilder) require.NoError(t, err) getReadBucket, err := getReadBucketBuilder.ToReadBucket() require.NoError(t, err) // Verify that the buf.lock file was created. exists, err := storage.Exists(ctx, getReadBucket, bufmodule.LockFilePath) require.NoError(t, err) require.True(t, exists) // Exclude non-proto files for the diff check filteredReadBucket := storage.MapReadBucket(getReadBucket, storage.MatchPathExt(".proto")) diff, err := storage.DiffBytes(ctx, readBucket, filteredReadBucket) require.NoError(t, err) require.Empty(t, string(diff)) _, err = moduleReader.GetModule(ctx, modulePin) require.NoError(t, err) require.Equal(t, 2, moduleReader.getCount()) require.Equal(t, 1, moduleReader.getCacheHits()) } func TestCacherBasic(t *testing.T) { ctx := context.Background() modulePin, err := bufmodule.NewModulePin( "buf.build", "foob", "bar", "v1", bufmoduletesting.TestCommit, bufmoduletesting.TestDigest, time.Now(), ) require.NoError(t, err) readBucket, err := storagemem.NewReadBucket(bufmoduletesting.TestData) require.NoError(t, err) module, err := bufmodule.NewModuleForBucket(ctx, readBucket) require.NoError(t, err) readWriteBucket, fileLocker := newTestBucketAndLocker(t) moduleCacher := newModuleCacher(readWriteBucket, fileLocker) _, err = moduleCacher.GetModule(ctx, modulePin) require.True(t, storage.IsNotExist(err)) err = moduleCacher.PutModule( context.Background(), modulePin, module, ) require.NoError(t, err) getModule, err := moduleCacher.GetModule(ctx, modulePin) require.NoError(t, err) getReadBucketBuilder := storagemem.NewReadBucketBuilder() err = bufmodule.ModuleToBucket(ctx, getModule, getReadBucketBuilder) require.NoError(t, err) getReadBucket, err := getReadBucketBuilder.ToReadBucket() require.NoError(t, err) exists, err := storage.Exists(ctx, getReadBucket, bufmodule.LockFilePath) require.NoError(t, err) require.True(t, exists) } func TestModuleReaderCacherWithDocumentation(t *testing.T) { ctx := context.Background() modulePin, err := bufmodule.NewModulePin( "buf.build", "foob", "bar", "v1", bufmoduletesting.TestCommit, bufmoduletesting.TestDigestWithDocumentation, time.Now(), ) require.NoError(t, err) readBucket, err := storagemem.NewReadBucket(bufmoduletesting.TestDataWithDocumentation) require.NoError(t, err) module, err := bufmodule.NewModuleForBucket(ctx, readBucket) require.NoError(t, err) readWriteBucket, fileLocker := newTestBucketAndLocker(t) moduleCacher := newModuleCacher(readWriteBucket, fileLocker) err = moduleCacher.PutModule( context.Background(), modulePin, module, ) require.NoError(t, err) module, err = moduleCacher.GetModule(ctx, modulePin) require.NoError(t, err) readBucketBuilder := storagemem.NewReadBucketBuilder() require.NoError(t, bufmodule.ModuleToBucket(ctx, module, readBucketBuilder)) readBucket, err = readBucketBuilder.ToReadBucket() require.NoError(t, err) // Verify that the buf.md file was created. exists, err := storage.Exists(ctx, readBucket, bufmodule.DocumentationFilePath) require.NoError(t, err) require.True(t, exists) require.Equal(t, bufmoduletesting.TestModuleDocumentation, module.Documentation()) } func newTestBucketAndLocker(t *testing.T) (storage.ReadWriteBucket, filelock.Locker) { storageosProvider := storageos.NewProvider(storageos.ProviderWithSymlinks()) readWriteBucket, err := storageosProvider.NewReadWriteBucket(t.TempDir()) require.NoError(t, err) fileLocker, err := filelock.NewLocker(t.TempDir()) require.NoError(t, err) return readWriteBucket, fileLocker }
package main import ( "fmt" "math" ) func main() { intSize := 32 << (^uint(0) >> 63) // 32 or 64 MaxInt := 1<<(intSize-1) - 1 fmt.Println(intSize) fmt.Println(MaxInt) fmt.Println(math.Pow(2, 63)) fmt.Println(math.MaxInt) fmt.Println(math.MinInt) fmt.Println(frogPosition(3, [][]int{ {2, 1}, {3, 2}, }, 1, 2)) fmt.Println(frogPosition(7, [][]int{ {1, 2}, {1, 3}, {1, 7}, {2, 4}, {2, 6}, {3, 5}, }, 1, 7)) fmt.Println(frogPosition(7, [][]int{ {1, 2}, {1, 3}, {1, 7}, {2, 4}, {2, 6}, {3, 5}, }, 2, 4)) } func frogPosition(n int, edges [][]int, t int, target int) float64 { mm := make(map[int][]int) for _, e := range edges { mm[e[0]] = append(mm[e[0]], e[1]) mm[e[1]] = append(mm[e[1]], e[0]) } var ans int visit := make(map[int]bool) var travel func(idx int, count int, total int) travel = func(idx int, count int, total int) { if count > t { // 次数到了 return } if idx == target { ans = total return } visit[idx] = true for _, v := range mm[idx] { if !visit[v] { travel(v, count+1, len(mm[idx])*total) } } } travel(1, 0, 1) return 1 / float64(ans) }
// Copyright 2019 John Papandriopoulos. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package zydis // AddressWidth is an enum of processor address widths. type AddressWidth int // AddressWidth enum values. const ( AddressWidth16 AddressWidth = iota AddressWidth32 AddressWidth64 )
package main import "fmt" // 把函数赋值给一个变量, 该变量也可以调用方法 func getSum(n1 int, n2 int) int { return n1 + n2 } // 函数也是一种数据类型, 因此可以作为参数传入, 并且调用 func myFunc(func1 func(int, int) int, num1 int, num2 int) int { return func1(num1, num2) } type myFuncType func(int, int) int func myFunc2(func1 myFuncType, num1 int, num2 int) int { return func1(num1, num2) } // 函数返回值重命名 func gerSumAndSub(n1 int, n2 int) (sum int, sub int) { sum = n1 + n2 sub = n1 - n2 return } func main() { a := getSum fmt.Printf("a的类型是%T, getSum的类型是%T \n", a, getSum) res := a(10, 20) fmt.Println("res=", res) res2 := myFunc(getSum, 20, 40) fmt.Println("res2=", res2) // 自定义数据类型 type myInt int var num1 myInt num1 = 40 fmt.Println("num1=", num1) res3 := myFunc2(getSum, 100, 40) fmt.Println("res3=", res3) sum1, sub1 := gerSumAndSub(100, 40) fmt.Printf("sum1=%v, sub1=%v \n", sum1, sub1) }
package test import ( "database/sql" "testing" ) func TestTransactiontest(t *testing.T) { db, err := sql.Open("mysql", "root@/test") if err != nil { panic(err) } db.Begin() }
package card type Card struct { Issuer string Balance int64 Currency string Number string } type Service struct { BankName string Cards []*Card } func NewService(bankname string) *Service { return &Service { BankName : bankname, } } func (s *Service) GetNewCard(issuer string, balance int64, currency string, number string) *Card { card := &Card{ Issuer: issuer, Balance: balance, Currency: currency, Number: number, } s.Cards = append(s.Cards, card) return card } func (s *Service) CheckNumber(number string) (*Card, bool) { for _, c := range s.Cards { if c.Number == number { return c, true } } return nil, false } func (s *Service) CheckBalance(card *Card, amount int64) (bool) { if card.Balance >= amount { return true } return false }
package model import ( "time" ) type StressHttpTestCase struct { HttpTestCase Duration time.Duration Concurrency int64 RPS int64 } var _ TestCase = &StressHttpTestCase{} func (tc StressHttpTestCase) GetName() string { return tc.Name } func (tc StressHttpTestCase) Run() TestResult { panic("implement me") }
package main import ( "bufio" "bytes" "fmt" "html/template" "log" "net" "strings" ) func main() { listener, err := net.Listen("tcp", ":8080") if err != nil { log.Fatalln(err) } defer listener.Close() for { conn, err := listener.Accept() if err != nil { fmt.Println(err) } go handleConnection(conn) } } func handleConnection(conn net.Conn) { defer conn.Close() // read request url := request(conn) // write response response(conn, url) } func request(conn net.Conn) string { i := 0 scanner := bufio.NewScanner(conn) url := "" for scanner.Scan() { text := scanner.Text() fmt.Println(text) if i == 0 { parts := strings.Fields(text) // fmt.Println("***Method", parts[0]) url = parts[1] } if text == "" { break } i++ } return url } func response(conn net.Conn, url string) { tmpl, err := template.ParseFiles("index.html") buf := new(bytes.Buffer) tmpl.Execute(buf, url) // body, err := ioutil.ReadFile("index.html") if err != nil { log.Println(err) } fmt.Fprint(conn, "HTTP/1.1 200 OK\r\n") fmt.Fprintf(conn, "Content-Length: %d\r\n", buf.Len()) fmt.Fprint(conn, "Content-Type: text/html\r\n") fmt.Fprint(conn, "\r\n") fmt.Fprint(conn, buf.String()) }
package restclient import ( "context" "encoding/json" "errors" "net/http" "time" ) type RestClient struct { HTTPClient *http.Client } type Header struct { Key string Value string } func NewRestClient(time time.Duration) RestClient { return RestClient{ HTTPClient: &http.Client{ Timeout: time, }, } } func (r *RestClient) DoGet(ctx context.Context, url string, response interface{}, additionalHeaders ...Header) error { req, err := http.NewRequest("GET", url, nil) if err != nil { return err } for _, header := range additionalHeaders { req.Header.Add(header.Key, header.Value) } res, err := r.HTTPClient.Do(req) if err != nil { return errors.New("asdca") } //Nos Aseguramos que cierre el body defer res.Body.Close() err = json.NewDecoder(res.Body).Decode(&response) if err != nil { //return errors.NewRestError("error_reading_body", http.StatusInternalServerError) return errors.New("asdca") } if res.StatusCode < http.StatusOK || res.StatusCode >= http.StatusBadRequest { //return errors.NewRestError("rest_client_error", res.StatusCode) return errors.New("asdca") } return nil }
package 动态规划 import "fmt" // -------------------- 贪心动态规划(使用前缀、后缀最小值) ---------------------- const INF = 1000000000 func minFallingPathSum(arr [][]int) int { rows, cols := getRowsAndCols(arr) minSum := get2DSlice(rows, cols) for t := 0; t < cols; t++ { minSum[0][t] = arr[0][t] } for i := 1; i < rows; i++ { prefixMin := getPrefixMin(minSum[i-1]) suffixMin := getSuffixMin(minSum[i-1]) minSum[i][0] = suffixMin[1] + arr[i][0] minSum[i][cols-1] = prefixMin[cols-2] + arr[i][cols-1] for t := 1; t < cols-1; t++ { minSum[i][t] = min(suffixMin[t+1], prefixMin[t-1]) + arr[i][t] } } return min(minSum[rows-1]...) } func getPrefixMin(arr []int) []int { prefixMin := make([]int, len(arr)) prefixMin[0] = arr[0] for i := 1; i < len(arr); i++ { prefixMin[i] = min(prefixMin[i-1], arr[i]) } return prefixMin } func getSuffixMin(arr []int) []int { suffixMin := make([]int, len(arr)) suffixMin[len(arr)-1] = arr[len(arr)-1] for i := len(arr) - 2; i >= 0; i-- { suffixMin[i] = min(suffixMin[i+1], arr[i]) } return suffixMin } func getRowsAndCols(matrix [][]int) (int, int) { if len(matrix) == 0 { return 0, 0 } return len(matrix), len(matrix[0]) } func get2DSlice(rows, column int) [][]int { slice := make([][]int, rows) for i := 0; i < len(slice); i++ { slice[i] = make([]int, column) } return slice } func min(arr ...int) int { if len(arr) == 1 { return arr[0] } a := arr[0] b := min(arr[1:]...) if a > b { return b } return a } // -------------------- 暴力动态规划 ---------------------- const INF = 1000000000 func minFallingPathSum(arr [][]int) int { rows, cols := getRowsAndCols(arr) minSum := get2DSlice(rows, cols) for t := 0; t < cols; t++ { minSum[0][t] = arr[0][t] } for i := 1; i < rows; i++ { for t := 0; t < cols; t++ { minSum[i][t] = INF for k := 0; k < cols; k++ { if t == k { continue } minSum[i][t] = min(minSum[i][t], minSum[i-1][k]+arr[i][t]) } } } return min(minSum[rows-1]...) } func getRowsAndCols(matrix [][]int) (int, int) { if len(matrix) == 0 { return 0, 0 } return len(matrix), len(matrix[0]) } func get2DSlice(rows, column int) [][]int { slice := make([][]int, rows) for i := 0; i < len(slice); i++ { slice[i] = make([]int, column) } return slice } func min(arr ...int) int { if len(arr) == 1 { return arr[0] } a := arr[0] b := min(arr[1:]...) if a > b { return b } return a } /* 题目链接: https://leetcode-cn.com/problems/minimum-falling-path-sum-ii/submissions/ */
package helpers import ( "log" "time" ) // LogStep calls Output to print to the standard logger with the provided message. func LogStep(v ...interface{}) { log.Println(time.Now().Format("02-Jan-2006"), "---- ", v) } // LogError is equivalent to LogStep() followed by a call to os.Exit(1). func LogError(v ...interface{}) { log.Fatalln(time.Now().Format("02-Jan-2006"), "---- ", v) }
package jarviscore import ( "context" "fmt" "io/ioutil" "math/rand" "os" "sync" "testing" "time" jarvisbase "github.com/zhs007/jarviscore/base" coredbpb "github.com/zhs007/jarviscore/coredb/proto" jarviscorepb "github.com/zhs007/jarviscore/proto" "go.uber.org/zap" ) func sendfile2node(ctx context.Context, fn string, destfn string, srcnode JarvisNode, destaddr string, funcOnResult FuncOnProcMsgResult) error { dat, err := ioutil.ReadFile(fn) if err != nil { return err } fd := &jarviscorepb.FileData{ File: dat, Filename: destfn, } return srcnode.SendFile(ctx, destaddr, fd, funcOnResult) } func randfillFileTF(fn string, len int) error { f, err := os.Create(fn) if err != nil { return err } defer f.Close() l4 := len / 4 l1 := len % 4 for i := 0; i < l4; i++ { d := []byte{ byte(rand.Intn(256)), byte(rand.Intn(256)), byte(rand.Intn(256)), byte(rand.Intn(256)), } n, err := f.Write(d) if err != nil { return nil } if n != 4 { return fmt.Errorf("randfillFile len err") } } for i := 0; i < l1; i++ { d := []byte{ byte(rand.Intn(256)), } n, err := f.Write(d) if err != nil { return nil } if n != 1 { return fmt.Errorf("randfillFile len err") } } return nil } func outputErrTF(t *testing.T, err error, msg string, info string) { if err == nil && info == "" { jarvisbase.Error(msg) t.Fatalf(msg) return } else if err == nil { jarvisbase.Error(msg, zap.String("info", info)) t.Fatalf(msg+" info %v", info) return } jarvisbase.Error(msg, zap.Error(err)) t.Fatalf(msg+" err %v", err) } func outputTF(t *testing.T, msg string) { jarvisbase.Info(msg) t.Logf(msg) } // funconcallTF type funconcallTF func(ctx context.Context, err error, obj *objTF) error type mapnodeinfoTF struct { iconn bool connme bool } type nodeinfoTF struct { mapAddr sync.Map numsIConn int numsConnMe int } func (ni *nodeinfoTF) onIConnectNode(node *coredbpb.NodeInfo) error { d, ok := ni.mapAddr.Load(node.Addr) if ok { mni, ok := d.(*mapnodeinfoTF) if !ok { return fmt.Errorf("nodeinfoRF.onIConnectNode:mapAddr2mapnodeinfo err") } if !mni.iconn { mni.iconn = true ni.numsIConn++ } return nil } mni := &mapnodeinfoTF{ iconn: true, } ni.mapAddr.Store(node.Addr, mni) ni.numsIConn++ return nil } func (ni *nodeinfoTF) onNodeConnected(node *coredbpb.NodeInfo) error { d, ok := ni.mapAddr.Load(node.Addr) if ok { mni, ok := d.(*mapnodeinfoTF) if !ok { return fmt.Errorf("nodeinfoRF.onNodeConnected:mapAddr2mapnodeinfo err") } if !mni.connme { mni.connme = true ni.numsConnMe++ } return nil } mni := &mapnodeinfoTF{ connme: true, } ni.mapAddr.Store(node.Addr, mni) ni.numsConnMe++ return nil } type objTF struct { root JarvisNode node1 JarvisNode node2 JarvisNode rootni nodeinfoTF node1ni nodeinfoTF node2ni nodeinfoTF requestnodes bool transferfile1 bool transferfile1ok bool transferfile2 bool transferfile2ok bool err error } func newObjTF() *objTF { return &objTF{ rootni: nodeinfoTF{}, node1ni: nodeinfoTF{}, node2ni: nodeinfoTF{}, requestnodes: false, } } func (obj *objTF) isDone() bool { if obj.rootni.numsConnMe != 2 || obj.rootni.numsIConn != 2 { return false } return obj.transferfile1 && obj.transferfile1ok && obj.transferfile2 && obj.transferfile2ok } func (obj *objTF) oncheck(ctx context.Context, funcCancel context.CancelFunc) error { if obj.rootni.numsConnMe == 2 && obj.node1ni.numsConnMe >= 1 && obj.node1ni.numsIConn >= 1 && obj.node2ni.numsConnMe >= 1 && obj.node2ni.numsIConn >= 1 && !obj.requestnodes { err := obj.node1.GetCoreDB().TrustNode(obj.node2.GetMyInfo().Addr) if err != nil { jarvisbase.Warn("objUN.oncheck:node1.TrustNode", zap.Error(err)) return err } err = obj.node2.GetCoreDB().TrustNode(obj.node1.GetMyInfo().Addr) if err != nil { jarvisbase.Warn("objUN.oncheck:node2.TrustNode", zap.Error(err)) return err } err = obj.node1.RequestNodes(ctx, true, nil) if err != nil { return err } err = obj.node2.RequestNodes(ctx, true, nil) if err != nil { return err } obj.requestnodes = true } if obj.node1ni.numsConnMe == 2 && obj.node2ni.numsConnMe == 2 && obj.node1ni.numsIConn == 2 && obj.node2ni.numsIConn == 2 && !obj.transferfile1 { curresultnums := 0 err := sendfile2node(ctx, "./test/tf001.dat", "./test/node1_tf001.dat", obj.node1, obj.node2.GetMyInfo().Addr, func(ctx context.Context, jarvisnode JarvisNode, lstResult []*JarvisMsgInfo) error { if len(lstResult) > 0 { if lstResult[len(lstResult)-1].Msg != nil { jarvisbase.Info("sendfile2node obj.node1", JSONMsg2Zap("result", lstResult[len(lstResult)-1].Msg)) } } // jarvisbase.Info("obj.node1.RequestFile", jarvisbase.JSON("result", lstResult)) if len(lstResult) > curresultnums { for ; curresultnums < len(lstResult); curresultnums++ { if lstResult[curresultnums].Err != nil { obj.err = fmt.Errorf("lstResult[%v].Err %v", curresultnums, lstResult[curresultnums].Err) curresultnums++ funcCancel() return nil } if lstResult[curresultnums].Msg != nil && lstResult[curresultnums].Msg.MsgType == jarviscorepb.MSGTYPE_REPLY_REQUEST_FILE { fd := lstResult[curresultnums].Msg.GetFile() if fd == nil { obj.err = ErrNoFileData funcCancel() return nil } if fd.Md5String == "" { obj.err = ErrFileDataNoMD5String funcCancel() return nil } if fd.Md5String != GetMD5String(fd.File) { obj.err = ErrInvalidFileDataMD5String funcCancel() return nil } } if IsClientProcMsgResultEnd(lstResult) { // if lstResult[curresultnums].IsEnd() { // if lstResult[curresultnums].JarvisResultType == JarvisResultTypeReplyStreamEnd { // if lstResult[curresultnums].Err == nil && lstResult[curresultnums].Msg == nil { obj.transferfile1ok = true if obj.isDone() { funcCancel() } return nil } } } return nil }) if err != nil { obj.err = err return err } obj.transferfile1 = true } if obj.node1ni.numsConnMe == 2 && obj.node2ni.numsConnMe == 2 && obj.node1ni.numsIConn == 2 && obj.node2ni.numsIConn == 2 && !obj.transferfile2 { curresultnums := 0 err := sendfile2node(ctx, "./test/tf001.dat", "./test/node2_tf001.dat", obj.node2, obj.node1.GetMyInfo().Addr, func(ctx context.Context, jarvisnode JarvisNode, lstResult []*JarvisMsgInfo) error { if len(lstResult) > 0 { if lstResult[len(lstResult)-1].Msg != nil { jarvisbase.Info("sendfile2node obj.node2", JSONMsg2Zap("result", lstResult[len(lstResult)-1].Msg)) } } // jarvisbase.Info("obj.node1.RequestFile", jarvisbase.JSON("result", lstResult)) if len(lstResult) > curresultnums { for ; curresultnums < len(lstResult); curresultnums++ { if lstResult[curresultnums].Err != nil { obj.err = fmt.Errorf("lstResult[%v].Err %v", curresultnums, lstResult[curresultnums].Err) curresultnums++ funcCancel() return nil } if lstResult[curresultnums].Msg != nil && lstResult[curresultnums].Msg.MsgType == jarviscorepb.MSGTYPE_REPLY_REQUEST_FILE { fd := lstResult[curresultnums].Msg.GetFile() if fd == nil { obj.err = ErrNoFileData funcCancel() return nil } if fd.Md5String == "" { obj.err = ErrFileDataNoMD5String funcCancel() return nil } if fd.Md5String != GetMD5String(fd.File) { obj.err = ErrInvalidFileDataMD5String funcCancel() return nil } } if IsClientProcMsgResultEnd(lstResult) { // if lstResult[curresultnums].IsEnd() { // if lstResult[curresultnums].JarvisResultType == JarvisResultTypeReplyStreamEnd { // if lstResult[curresultnums].Err == nil && lstResult[curresultnums].Msg == nil { obj.transferfile2ok = true if obj.isDone() { funcCancel() } return nil } } } return nil }) if err != nil { obj.err = err return err } obj.transferfile2 = true } return nil } func (obj *objTF) onIConn(ctx context.Context, funcCancel context.CancelFunc) error { return obj.oncheck(ctx, funcCancel) } func (obj *objTF) onConnMe(ctx context.Context, funcCancel context.CancelFunc) error { return obj.oncheck(ctx, funcCancel) } func (obj *objTF) makeString() string { return fmt.Sprintf("root(%v %v) node1(%v %v), node2(%v %v) requestnodes %v transferfile1 %v transferfile1ok %v transferfile2 %v transferfile2ok %v root %v node1 %v node2 %v", obj.rootni.numsIConn, obj.rootni.numsConnMe, obj.node1ni.numsIConn, obj.node1ni.numsConnMe, obj.node2ni.numsIConn, obj.node2ni.numsConnMe, obj.requestnodes, obj.transferfile1, obj.transferfile1ok, obj.transferfile2, obj.transferfile2ok, obj.root.BuildStatus(), obj.node1.BuildStatus(), obj.node2.BuildStatus()) } func startTestNodeTF(ctx context.Context, cfgfilename string, ni *nodeinfoTF, obj *objTF, oniconn funconcallTF, onconnme funconcallTF) (JarvisNode, error) { cfg, err := LoadConfig(cfgfilename) if err != nil { return nil, fmt.Errorf("startTestNode load config %v err is %v", cfgfilename, err) } curnode, err := NewNode(cfg) if err != nil { return nil, fmt.Errorf("startTestNode NewNode node %v", err) } curnode.SetNodeTypeInfo("testreqfile", "0.7.22") curnode.RegNodeEventFunc(EventOnIConnectNode, func(ctx context.Context, jarvisnode JarvisNode, node *coredbpb.NodeInfo) error { err := ni.onIConnectNode(node) oniconn(ctx, err, obj) return nil }) curnode.RegNodeEventFunc(EventOnNodeConnected, func(ctx context.Context, jarvisnode JarvisNode, node *coredbpb.NodeInfo) error { err := ni.onNodeConnected(node) onconnme(ctx, err, obj) return nil }) return curnode, nil } func TestTransferFile(t *testing.T) { randfillFileTF("./test/tf001.dat", 2*1024*1024) randfillFileTF("./test/tf002.dat", 10*1024*1024) rootcfg, err := LoadConfig("./test/test5050_transferfileroot.yaml") if err != nil { t.Fatalf("TestTransferFile load config %v err is %v", "./test/test5050_transferfileroot.yaml", err) return } InitJarvisCore(rootcfg, "testnode", "1.2.3") defer ReleaseJarvisCore() obj := newObjTF() ctx, cancel := context.WithTimeout(context.Background(), 40*time.Second) defer cancel() var errobj error oniconn := func(ctx context.Context, err error, obj *objTF) error { if err != nil { errobj = err cancel() return nil } err1 := obj.onIConn(ctx, cancel) if err1 != nil { errobj = err1 cancel() return nil } if obj.isDone() { cancel() return nil } return nil } onconnme := func(ctx context.Context, err error, obj *objTF) error { if err != nil { errobj = err cancel() return nil } err1 := obj.onConnMe(ctx, cancel) if err1 != nil { errobj = err1 cancel() return nil } if obj.isDone() { cancel() return nil } return nil } obj.root, err = startTestNodeTF(ctx, "./test/test5050_transferfileroot.yaml", &obj.rootni, obj, oniconn, onconnme) if err != nil { outputErrTF(t, err, "TestTransferFile startTestNodeTF root", "") return } obj.node1, err = startTestNodeTF(ctx, "./test/test5051_transferfile1.yaml", &obj.node1ni, obj, oniconn, onconnme) if err != nil { outputErrTF(t, err, "TestTransferFile startTestNodeTF node1", "") return } obj.node2, err = startTestNodeTF(ctx, "./test/test5052_transferfile2.yaml", &obj.node2ni, obj, oniconn, onconnme) if err != nil { outputErrTF(t, err, "TestTransferFile startTestNodeTF node2", "") return } go obj.root.Start(ctx) time.Sleep(time.Second * 1) go obj.node1.Start(ctx) go obj.node2.Start(ctx) <-ctx.Done() if errobj != nil { outputErrTF(t, errobj, "TestTransferFile", "") return } if obj.err != nil { outputErrTF(t, obj.err, "TestTransferFile", "") return } if !obj.isDone() { outputErrTF(t, nil, "TestTransferFile no done", obj.makeString()) return } outputTF(t, "TestTransferFile OK") }
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grumpy import ( "regexp" "strings" ) // EncodeDefault is the system default encoding. const EncodeDefault = "utf8" // Error handling modes that dictate the behavior of *Str.Decode and // *Unicode.Encode when they encounter bad chars. const ( // EncodeStrict causes UnicodeError to be raised on bad chars. EncodeStrict = "strict" // EncodeReplace replaces bad chars with "\ufffd". EncodeReplace = "replace" // EncodeIgnore discards bad chars. EncodeIgnore = "ignore" ) var ( // BaseStringType is the object representing the Python 'basestring' // type. BaseStringType = newSimpleType("basestring", ObjectType) encodingGarbageRegexp = regexp.MustCompile(`[^A-Za-z0-9]+`) escapeMap = map[rune]string{ '\\': `\\`, '\'': `\'`, '\n': `\n`, '\r': `\r`, '\t': `\t`, } ) func initBaseStringType(map[string]*Object) { BaseStringType.flags &^= typeFlagInstantiable } func normalizeEncoding(encoding string) string { return strings.ToLower(encodingGarbageRegexp.ReplaceAllString(encoding, "")) } func escapeRune(r rune) []byte { const hexTable = "0123456789abcdef" if r < 0x100 { return []byte{'\\', 'x', hexTable[r>>4], hexTable[r&0x0F]} } if r < 0x10000 { return []byte{'\\', 'u', hexTable[r>>12], hexTable[r>>8&0x0F], hexTable[r>>4&0x0F], hexTable[r&0x0F]} } return []byte{'\\', 'U', hexTable[r>>28], hexTable[r>>24&0x0F], hexTable[r>>20&0x0F], hexTable[r>>16&0x0F], hexTable[r>>12&0x0F], hexTable[r>>8&0x0F], hexTable[r>>4&0x0F], hexTable[r&0x0F]} }
package clientserverpair_test import ( "context" "fmt" "net" "testing" "time" "github.com/rwool/ex/test/helpers/goroutinechecker" "github.com/rwool/ex/log" "github.com/rwool/ex/test/helpers/testlogger" "github.com/rwool/ex/test/helpers/clientserverpair" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestClientServerPair(t *testing.T) { defer goroutinechecker.New(t)() // Note that this test is written mostly with synchronous code. // This will not work with net.Pipe as writes will block waiting for // corresponding reads. // This works assuming that buffering is used for writes and reads. logger, _ := testlogger.NewTestLogger(t, log.Warn) d, l := clientserverpair.New(&clientserverpair.PipeCSPairConfig{ Logger: logger, }) var clientToServerBuf, serverToClientBuf [1024]byte var sConn net.Conn var lErr error accepted := make(chan struct{}) go func() { sConn, lErr = l.Accept() accepted <- struct{}{} }() c, err := d.DialContext(context.Background(), "tcp", "127.0.0.1:22") require.NoError(t, err) select { case <-accepted: require.NoError(t, lErr, "unexpected error reading accepting from Listener") case <-time.After(500 * time.Millisecond): t.Fatal("Server side failed to accept connection") } // Ensure the Conns are usable after closing the Listener. require.NoError(t, l.Close(), "failed to close Listener") _, err = c.Write([]byte("Some Text")) require.NoError(t, err) read, err := sConn.Read(clientToServerBuf[:]) require.NoError(t, err) assert.Equal(t, "Some Text", string(clientToServerBuf[:read])) _, err = fmt.Fprint(sConn, "Hello") require.NoError(t, err) read, err = c.Read(serverToClientBuf[:]) require.NoError(t, err) assert.Equal(t, "Hello", string(serverToClientBuf[:read])) err = l.Close() assert.NoError(t, err) }
package utils import ( "encoding/base64" "github.com/riposa/utils/log" "testing" ) const ( key = "henghajiangwillbeonlineatshanghaiin20180912" ) var ( pkcsLogger = log.New() ) func TestEncrypt(t *testing.T) { byteKey, err := base64.StdEncoding.DecodeString(key + "=") if err != nil { pkcsLogger.Exception(err) } secret, err := Encrypt([]byte("aabbccddeeffgg"), byteKey) if err != nil { pkcsLogger.Exception(err) } pkcsLogger.Info(secret) pkcsLogger.Info(Decrypt(secret, byteKey)) }
package main import ( "testing" "github.com/brigadecore/brigade/sdk/v3/restmachinery" "github.com/brigadecore/brigade/v2/scheduler/internal/lib/queue/amqp" "github.com/stretchr/testify/require" ) // Note that unit testing in Go does NOT clear environment variables between // tests, which can sometimes be a pain, but it's fine here-- so each of these // test functions uses a series of test cases that cumulatively build upon one // another. func TestAPIClientConfig(t *testing.T) { testCases := []struct { name string setup func() assertions func( address string, token string, opts restmachinery.APIClientOptions, err error, ) }{ { name: "API_ADDRESS not set", setup: func() {}, assertions: func( _ string, _ string, _ restmachinery.APIClientOptions, err error, ) { require.Error(t, err) require.Contains(t, err.Error(), "value not found for") require.Contains(t, err.Error(), "API_ADDRESS") }, }, { name: "API_TOKEN not set", setup: func() { t.Setenv("API_ADDRESS", "foo") }, assertions: func( _ string, _ string, _ restmachinery.APIClientOptions, err error, ) { require.Error(t, err) require.Contains(t, err.Error(), "value not found for") require.Contains(t, err.Error(), "API_TOKEN") }, }, { name: "SUCCESS not set", setup: func() { t.Setenv("API_TOKEN", "bar") t.Setenv("API_IGNORE_CERT_WARNINGS", "true") }, assertions: func( address string, token string, opts restmachinery.APIClientOptions, err error, ) { require.NoError(t, err) require.Equal(t, "foo", address) require.Equal(t, "bar", token) require.True(t, opts.AllowInsecureConnections) }, }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { testCase.setup() address, token, opts, err := apiClientConfig() testCase.assertions(address, token, opts, err) }) } } func TestReaderFactoryConfig(t *testing.T) { testCases := []struct { name string setup func() assertions func(amqp.ReaderFactoryConfig, error) }{ { name: "AMQP_ADDRESS not set", setup: func() {}, assertions: func(_ amqp.ReaderFactoryConfig, err error) { require.Error(t, err) require.Contains(t, err.Error(), "value not found for") require.Contains(t, err.Error(), "AMQP_ADDRESS") }, }, { name: "AMQP_USERNAME not set", setup: func() { t.Setenv("AMQP_ADDRESS", "foo") }, assertions: func(_ amqp.ReaderFactoryConfig, err error) { require.Error(t, err) require.Contains(t, err.Error(), "value not found for") require.Contains(t, err.Error(), "AMQP_USERNAME") }, }, { name: "AMQP_PASSWORD not set", setup: func() { t.Setenv("AMQP_USERNAME", "bar") }, assertions: func(_ amqp.ReaderFactoryConfig, err error) { require.Error(t, err) require.Contains(t, err.Error(), "value not found for") require.Contains(t, err.Error(), "AMQP_PASSWORD") }, }, { name: "success", setup: func() { t.Setenv("AMQP_PASSWORD", "bat") }, assertions: func(config amqp.ReaderFactoryConfig, err error) { require.NoError(t, err) require.Equal( t, amqp.ReaderFactoryConfig{ Address: "foo", Username: "bar", Password: "bat", }, config, ) }, }, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { testCase.setup() config, err := readerFactoryConfig() testCase.assertions(config, err) }) } }
package main import "fmt" func verifyClaims(q quilt, claims []claim) { for _, c := range claims { valid := true for y := c.yoffset; y < (c.yoffset + c.height); y++ { for x := c.xoffset; x < (c.xoffset + c.width); x++ { if q.grid[coord{x: x, y: y}] > 1 { valid = false break } } if !valid { break } } if valid { fmt.Println("Valid Claim: ", c) } } } func main2(q quilt, c []claim) { verifyClaims(q, c) }
package redis import ( "crypto/tls" "time" ) type config struct { tls *tls.Config expiry time.Duration } // Option customizes a Backend. type Option func(*config) // WithTLSConfig sets the tls.Config which Backend uses. func WithTLSConfig(tlsConfig *tls.Config) Option { return func(cfg *config) { cfg.tls = tlsConfig } } // WithExpiry sets the expiry for changes. func WithExpiry(expiry time.Duration) Option { return func(cfg *config) { cfg.expiry = expiry } } func getConfig(options ...Option) *config { cfg := new(config) WithExpiry(time.Hour * 24)(cfg) for _, o := range options { o(cfg) } return cfg }
package ccache import ( "time" ) type Entry struct { Key string Value interface{} Expiration *time.Time } // Returns true if the entry has expired. func (entry *Entry) Expired() bool { if entry.Expiration == nil { return false } return entry.Expiration.Before(time.Now()) }
package main // Scan a directory of SGF files for illegal moves. Recursive. import ( "fmt" "os" "path/filepath" sgf ".." ) func main() { if len(os.Args) < 2 { fmt.Printf("Usage: %s <dir>\n", filepath.Base(os.Args[0])) return } filepath.Walk(os.Args[1], handle_file) } func handle_file(path string, _ os.FileInfo, err error) error { // Returning an error halts the whole walk. So don't. if err != nil { fmt.Printf("%v\n", err) return nil } root, err := sgf.LoadMainLine(path) if err != nil { return nil } i := 0 node := root for { child := node.MainChild() if child == nil { break } i++ err := node.Validate() if err != nil { re, _ := root.GetValue("RE") fmt.Printf("%s: Move %d of %d: %v -- %s\n", filepath.Base(path), i, len(node.GetEnd().GetLine()) - 1, err, re) return nil } node = child } return nil }
package common import ( "os" "os/signal" "syscall" "time" log "github.com/cihub/seelog" ) type ExeInterface interface { LogInit() ChooseStrategy() ExeInit() RocketMQStart() } type ExeCommon struct { CfgFilename string RebuildFilename string StrategyInterface Strategies } func RegisterExitSignal() { signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) closeCnt := 0 go func() { for { select { case <-signalChan: if closeCnt == 0 { close(ExitChan) closeCnt++ } else { log.Info("Error ", "close ExitChan twice") } } } }() } func (e* ExeCommon)Run(ein ExeInterface) { ein.LogInit() ConfigInit(e.CfgFilename) RegisterExitSignal() ein.ChooseStrategy() ein.ExeInit() go RebuildTask(e.RebuildFilename) go e.StrategyInterface.GoRecieveMsg() time.Sleep(time.Duration(500) * time.Millisecond) ein.RocketMQStart() }
package bench import ( "database/sql" "fmt" ) func sampleFunc() { fmt.Println("Hello World!") } func RunExhaustive(db *sql.DB) (err error) { //Simple select from the tables selectFromEmployee := "select * from employee" rows, err := db.Query(selectFromEmployee) if err != nil { return err } defer rows.Close() numRows := 0 var fname, lname string var emp_no int64 for rows.Next() { numRows++ rows.Scan(&emp_no, &fname, &lname) } fmt.Println("Extracted ", numRows, " from employees table") return }
package utils import ( "runtime" "sync" "time" ) var statsMux sync.Mutex var startTime = time.Now().Unix() func GetMemory() uint64 { var mem runtime.MemStats runtime.ReadMemStats(&mem) return mem.Sys } func GetUptime() uint64 { return uint64(time.Now().Unix() - startTime) } func GetNumGoRoutine() uint64 { statsMux.Lock() defer statsMux.Unlock() return uint64(runtime.NumGoroutine()) }
package main import ( "fmt" ) // 定义学生的结构体 type Student struct { id int name string age byte addr string } func main() { // 方法一:顺序初始化,每个元素都得初始化 s1 := Student{101, "neil", 'm', "wuhan"} fmt.Println("s1 = ", s1) // 方法二:部分元素初始化,未初始化的值是该类型的默认值 s2 := Student{id: 101, addr: "wuhan"} fmt.Println("s2 = ", s2) // 方法三:指针方法初始化,未初始化的值是该类型的默认值 s3 := &Student{id: 101, addr: "wuhan"} fmt.Println("*s3 = ", *s3) // 结果为: // s1 = {101 neil 109 wuhan} // s2 = {101 0 wuhan} // *s3 = {101 0 wuhan} }
// Package trivial provides trivial functionality. ^_^ package trivial func StringsContain(s []string, e string) bool { for _, x := range s { if x == e { return true } } return false }
package depot import ( "testing" ) func TestAddStock(t *testing.T) { reset() sg := Stock{Name: "Google"} sa := Stock{Name: "Amazon"} sn := Stock{Name: "Netflix"} ss := Stock{Name: "Siemens"} if len(Get()) > 0 { t.Errorf("List of stocks should be empty at the beginning! Got a length of %v ", len(Get())) } Add(sg) if len(Get()) != 1 { t.Errorf("List of stocks should have one entry! Got a length of %v ", len(Get())) } Add(sa) if len(Get()) != 2 { t.Errorf("List of stocks should have two entries! Got a length of %v ", len(Get())) } Add(sn, ss) if len(Get()) != 4 { t.Errorf("List of stocks should have two entries! Got a length of %v ", len(Get())) } } func TestInitDefaultValues(t *testing.T) { reset() //should only test if there is something as these values will change over the time InitializeWithDefaultStocks() if len(Get()) == 0 { t.Errorf("List of stocks should have entries when initialized with default values! Got a length of %v ", len(Get())) } } func TestSum(t *testing.T) { reset() sg := Stock{Name: "Google", Count: 1, Buy: 100} sa := Stock{Name: "Amazon", Count: 2.5, Buy: 10.5} sn := Stock{Name: "Netflix", Count: 3, Buy: 0.3} ss := Stock{Name: "Siemens", Count: 0.2, Buy: 1000} Add(sg, sa, sn, ss) sum := SumBuy() if sum != 327.15 { t.Errorf("Expected a sum of %v. Got %v.", 327.15, sum) } }
package main import ( firestore "cloud.google.com/go/firestore" "context" "encoding/json" firebase "firebase.google.com/go" "fmt" "github.com/gorilla/mux" "google.golang.org/api/option" "log" "net/http" "os" "time" ) const FulfillmentCol = "fulfillments" const HomeAutoCol = "home-automation" var opt = option.WithCredentialsFile(os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")) type Fulfillment struct { ID string `json:"id"` Title string `json:"title"` Body string `json:"body"` } func main() { router := mux.NewRouter() router.HandleFunc("/fulfillment", fulfillmentHandler).Methods("POST") port := os.Getenv("PORT") if port == "" { port = "8080" } log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), router)) } func getNowInMillisecond() int64 { return time.Now().UnixNano() / 1000000 } func panicOnError(err error) { if err != nil { panic(err) } } func endcodePostAndWrite(w http.ResponseWriter, i interface{}) { w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(i) } func setupFirestore() (context.Context, *firestore.Client) { ctx := context.Background() app, err := firebase.NewApp(ctx, nil, opt) panicOnError(err) client, err := app.Firestore(ctx) panicOnError(err) return ctx, client } func handleTypeCheck(ok bool, msg string) { if !ok { fmt.Println(msg) } } func handleLighting(octx []interface{}) { if len(octx) == 0 { fmt.Println("No output context detected") return } context, _ := octx[0].(map[string]interface{}) params, _ := context["parameters"].(map[string]interface{}) room, ok := params["MyRooms.original"].(string) handleTypeCheck(ok, "Room is not OK!") state, ok := params["State.original"].(string) handleTypeCheck(ok, "State is not OK!") ctx, client := setupFirestore() defer client.Close() _, err := client.Collection(HomeAutoCol).Doc("lighting").Update( ctx, []firestore.Update{{Path: room, Value: state}}, ) panicOnError(err) } func parseIntent(intent map[string]interface{}, octx []interface{}) { if intent["displayName"] == "Lighting" { handleLighting(octx) } } func fulfillmentHandler(w http.ResponseWriter, r *http.Request) { var bodyJson map[string]interface{} err := json.NewDecoder(r.Body).Decode(&bodyJson) if err != nil { fmt.Println("Error:", err) } ctx, client := setupFirestore() defer client.Close() jsonBytes, err := json.Marshal(bodyJson) panicOnError(err) _, _, err = client.Collection(FulfillmentCol).Add( ctx, map[string]interface{}{ "fulfillment": string(jsonBytes), "timestamp": getNowInMillisecond(), }, ) panicOnError(err) qresult, _ := bodyJson["queryResult"].(map[string]interface{}) intent, _ := qresult["intent"].(map[string]interface{}) octx, _ := qresult["outputContexts"].([]interface{}) parseIntent(intent, octx) endcodePostAndWrite(w, &map[string]interface{}{ "greeting": "hello dialogflow", }) }
//Author: Shenung Fouamvung package main //MIPS processor simulation writen in Go Lang //must have Go Lang installed to compile and run //takes in 2 16bit binary values and computes them using logic gates to come to a result //to run, CD to the file directory and use the command in command line terminal "go run main.go" //example inputs: // a: 000000000001010 // b: 000000000001101 //output: // result: 0000000000010111 // overflow: 0 import ( "CSCI-117/project/components" "fmt" ) func main() { var a, b, ac = make([]byte, 16), make([]byte, 16), make([]byte, 16) fmt.Println("Enter 16bit binary [A]: ") fmt.Scanln(&a) fmt.Println("Enter 16bit binary [B]: ") fmt.Scanln(&b) fmt.Println("Enter 16bit AC value: ") fmt.Scanln(&ac) fmt.Println(a) fmt.Println(b) results, overflow := components.ALU16bit(a, b, 10) for i := range results { fmt.Print(results[i]) } fmt.Println() fmt.Println("overflow:", overflow) fmt.Println(components.ALU16bitMult(a, b, ac)) }
package ch01 // Find the largest element in the list func Largest(in []int) int { var max int if len(in) == 0 { return -1 } for _, value := range in { if value > max { max = value } } return max }
package heap func LookupMethodInClass(class *Class, name, descriptor string) *Method { for c:=class;c!=nil;c=c.superClass{ for _,method := range c.methods{ if method.name == name && method.descriptor == descriptor { return method } } } return nil } func lookupMethodInInterfaces(ifaces []*Class, name, descriptor string) *Method { for _,iface := range ifaces{ for _,method := range iface.methods{ if method.name == name && method.descriptor == descriptor { return method } } method := lookupMethodInInterfaces(iface.interfaces,name,descriptor) if method!=nil { return method } } return nil }
func isInterleave(s1 string, s2 string, s3 string) bool { if len(s1)+len(s2)!=len(s3){ return false } var dfs func(c1, c2, c3 int) bool dfs = func(c1, c2, c3 int) bool { if c3 == len(s3) { return true } if (c1 == len(s1) && s3[c3] != s2[c2]) || (c2 == len(s2) && s3[c3] != s1[c1]) { return false } if (c1 < len(s1) && s3[c3] == s1[c1] && dfs(c1+1, c2, c3+1)) || (c2 < len(s2) && s3[c3] == s2[c2] && dfs(c1, c2+1, c3+1) ) { return true } return false } return dfs(0, 0, 0) }
package mycocontext // key is used for setting and getting from mycocontext.Context in this project. type key int // These are keys for the context that floats around. const ( // keyHyphaName is for storing current hypha name as a string here. keyHyphaName key = iota // keyInputBuffer is for storing *bytes.Buffer with unread bytes of the source document. keyInputBuffer // KeyRecursionLevel stores current level of transclusion recursion. keyRecursionLevel // keyWebSiteURL )
package config import ( "os" "path" "testing" "time" "github.com/stretchr/testify/assert" "github.com/instructure-bridge/muss/testutil" ) func TestSecretCommands(t *testing.T) { var secretCmdPath string if dir, err := os.Getwd(); err != nil { t.Fatalf("failed to get working dir: %s", err) } else { secretCmdPath = path.Join(dir, "..", "testdata", "bin", "some-secret") } testutil.WithTempDir(t, func(tmpdir string) { findCacheRoot() os.Unsetenv("MUSS_TEST_PASSPHRASE") cfg := &ProjectConfig{ SecretPassphrase: "$MUSS_TEST_PASSPHRASE", SecretCommands: map[string]*SecretCommand{ "some": &SecretCommand{ Exec: []string{secretCmdPath, "something"}, EnvCommands: []*EnvCommand{ &EnvCommand{ Exec: []string{secretCmdPath, "pre-cmd"}, Varname: "MUSS_TEST_PASSPHRASE", }, }, }, }, } varname := "MUSS_TEST_SECRET_VAR" os.Unsetenv(varname) secretSpec := map[string]interface{}{ "some": []string{"green"}, "varname": varname, } logvarname := "MUSS_TEST_SECRET_LOG" os.Setenv(logvarname, "shhh") secret, err := parseSecret(cfg, secretSpec) if err != nil { t.Fatalf("error preparing secret env file: %s", err) } secretCacheFile := path.Join(secretDir, genFileName([]string{secretCmdPath, "something", "green"})) secretLog := "secret-log.txt" testutil.NoFileExists(t, secretCacheFile) os.Setenv(varname, "oops") testLoadSecret(t, secret) assert.Equal(t, os.Getenv(varname), "oops", "existing var not overwritten") testutil.NoFileExists(t, secretLog) // secret not called expSecret := "secret is [something green]" os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.FileExists(t, secretCacheFile) assert.Equal(t, "shhh p\nshhh s\n", testutil.ReadFile(t, secretLog), "pre-cmd and secret each called once") os.Setenv(logvarname, "again") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\n", testutil.ReadFile(t, secretLog), "neither called again (cached)") os.Setenv("MUSS_TEST_PASSPHRASE", "invalidate!") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\n", testutil.ReadFile(t, secretLog), "secret called again (invalid cache)") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\n", testutil.ReadFile(t, secretLog), "secret cached") appendToTestFile(t, secretCacheFile, "x") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\n", testutil.ReadFile(t, secretLog), "cache corrupted") os.Setenv(logvarname, "still") testutil.WriteFile(t, secretCacheFile, "x") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\nstill s\n", testutil.ReadFile(t, secretLog), "cache corrupted") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\nstill s\n", testutil.ReadFile(t, secretLog), "cached again") cfg.SecretCommands["some"].Cache = "24h" secret, err = parseSecret(cfg, secretSpec) if err != nil { t.Fatalf("error preparing secret: %s", err) } os.Setenv(logvarname, "more") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\nstill s\n", testutil.ReadFile(t, secretLog), "cached") touch := time.Now().Add(-86401 * time.Second) if err := os.Chtimes(secretCacheFile, touch, touch); err != nil { t.Fatal(err) } os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\nstill s\nmore s\n", testutil.ReadFile(t, secretLog), "past cache duration") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\nstill s\nmore s\n", testutil.ReadFile(t, secretLog), "cached again") os.Setenv(logvarname, "none") cfg.SecretCommands["some"].Cache = "none" secret, err = parseSecret(cfg, secretSpec) if err != nil { t.Fatalf("error preparing secret: %s", err) } os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\nstill s\nmore s\nnone s\n", testutil.ReadFile(t, secretLog), "no caching") os.Unsetenv(varname) testLoadSecret(t, secret) assert.Equal(t, expSecret, os.Getenv(varname), "sets env var") assert.Equal(t, "shhh p\nshhh s\nagain s\nagain s\nstill s\nmore s\nnone s\nnone s\n", testutil.ReadFile(t, secretLog), "still no caching") t.Run("multiple vars in one command", func(t *testing.T) { os.Setenv("MUSS_TEST_PASSPHRASE", "howdy") os.Unsetenv("MUSS_TEST_LINE_1_SETUP") os.Unsetenv("MUSS_TEST_LINE_2_SETUP") os.Unsetenv("MUSS_TEST_LINE_1_SECRET") os.Unsetenv("MUSS_TEST_LINE_2_SECRET") cfg := &ProjectConfig{ SecretPassphrase: "$MUSS_TEST_PASSPHRASE", SecretCommands: map[string]*SecretCommand{ "some": &SecretCommand{ Exec: []string{secretCmdPath, "--multi"}, EnvCommands: []*EnvCommand{ &EnvCommand{ Exec: []string{secretCmdPath, "--multi", "SETUP"}, Parse: true, }, }, }, }, } secretSpec := map[string]interface{}{ "some": []string{"SECRET"}, "parse": true, } secret, err := parseSecret(cfg, secretSpec) if err != nil { t.Fatalf("error preparing secret env file: %s", err) } secretLog := "secret-log.txt" os.Remove(secretLog) testutil.NoFileExists(t, secretLog) testLoadSecret(t, secret) assert.Equal(t, "foo bar baz", os.Getenv("MUSS_TEST_LINE_1_SETUP"), "set first env var") assert.Equal(t, "something", os.Getenv("MUSS_TEST_LINE_2_SETUP"), "set second env var") assert.Equal(t, "foo bar baz", os.Getenv("MUSS_TEST_LINE_1_SECRET"), "set first env var") assert.Equal(t, "something", os.Getenv("MUSS_TEST_LINE_2_SECRET"), "set second env var") assert.Equal(t, "multi SETUP\nmulti SECRET\n", testutil.ReadFile(t, secretLog), "run once") testLoadSecret(t, secret) // setup only gets called once and the secret gets cached. assert.Equal(t, "multi SETUP\nmulti SECRET\n", testutil.ReadFile(t, secretLog), "neither runs again") }) t.Run("multiple secrets", func(t *testing.T) { logFile := testutil.TempFile(t, "", "muss-secret-log") logFile.Close() os.Setenv("MUSS_TEST_LOG", logFile.Name()) defer func() { os.Unsetenv("MUSS_TEST_LOG") os.Remove(logFile.Name()) os.Unsetenv("MUSS_TEST_PW") os.Unsetenv("MUSS_TEST_BOX") os.Unsetenv("MUSS_TEST_SAFE") os.Unsetenv("MUSS_TEST_B1") os.Unsetenv("MUSS_TEST_S1") }() script := func(name, result string) []string { return []string{ "/bin/sh", "-c", `n="$1"; shift; echo "$n 1" >> "$MUSS_TEST_LOG"; sleep 1; echo "$n 2" >> "$MUSS_TEST_LOG"; echo "$*"`, "--", name, result, } } os.Setenv("MUSS_TEST_PW", "hi") cfg := newTestConfig(t, map[string]interface{}{ "secret_passphrase": "$MUSS_TEST_PW", "secret_commands": map[string]interface{}{ "box": map[string]interface{}{ "exec": []string{"echo", "box"}, "env_commands": []interface{}{ map[string]interface{}{ "exec": script("box", "1"), "varname": "MUSS_TEST_BOX", }, }, }, "safe": map[string]interface{}{ "exec": []string{"echo", "safe"}, "env_commands": []interface{}{ map[string]interface{}{ "exec": script("safe", "MUSS_TEST_SAFE=2"), "parse": true, }, }, }, }, }) secretSpecs := []map[string]interface{}{ map[string]interface{}{ "box": []string{"a"}, "varname": "MUSS_TEST_B1", }, map[string]interface{}{ "safe": []string{"b"}, "varname": "MUSS_TEST_S1", }, } for _, ss := range secretSpecs { parsed, err := parseSecret(cfg, ss) if err != nil { t.Fatal(err) } cfg.Secrets = append(cfg.Secrets, parsed) } assert.Nil(t, cfg.LoadEnv(), "no errors") assert.Equal(t, os.Getenv("MUSS_TEST_BOX"), "1") assert.Equal(t, os.Getenv("MUSS_TEST_SAFE"), "2") assert.Equal(t, os.Getenv("MUSS_TEST_B1"), "box a") assert.Equal(t, os.Getenv("MUSS_TEST_S1"), "safe b") // Test that each starts and finishes before moving on to the other // but ignore the order in which they run. logged := testutil.ReadFile(t, logFile.Name()) assert.Contains(t, logged, "box 1\nbox 2\n") assert.Contains(t, logged, "safe 1\nsafe 2\n") }) t.Run("passphrase", func(t *testing.T) { cfg := &ProjectConfig{ SecretPassphrase: "$MUSS_TEST_PASSPHRASE", SecretCommands: map[string]*SecretCommand{ "foo": &SecretCommand{ Exec: []string{"echo", "foo"}, EnvCommands: []*EnvCommand{ &EnvCommand{ Exec: []string{"echo", "foo"}, Parse: true, }, }, Passphrase: "$MUSS_TEST_FOO", }, "bar": &SecretCommand{ Exec: []string{"echo", "bar"}, EnvCommands: []*EnvCommand{ &EnvCommand{ Exec: []string{"echo", "foo"}, Parse: true, }, }, }, }, } foo, err := parseSecret(cfg, map[string]interface{}{"foo": []string{"SECRET"}}) if err != nil { t.Fatalf("error preparing secret env file: %s", err) } bar, err := parseSecret(cfg, map[string]interface{}{"bar": []string{"SECRET"}}) if err != nil { t.Fatalf("error preparing secret env file: %s", err) } assert.Equal(t, "$MUSS_TEST_FOO", foo.passphrase, "secret-command-specific") assert.Equal(t, "$MUSS_TEST_PASSPHRASE", bar.passphrase, "global") }) }) t.Run("errors", func(t *testing.T) { os.Unsetenv("MUSS_TEST_PASSPHRASE") varname := "MUSS_TEST_SECRET_VAR" os.Unsetenv(varname) cfg := &ProjectConfig{ SecretCommands: map[string]*SecretCommand{ "some": &SecretCommand{ Exec: []string{secretCmdPath, "something"}, }, }, } secretSpec := map[string]interface{}{ "some": "string", "varname": varname, } assert.Equal(t, "value for secret args must be a list", testSecretError(t, cfg, secretSpec)) secretSpec["some"] = []string{"list"} assert.Equal(t, "a passphrase is required to use secrets", testSecretError(t, cfg, secretSpec)) cfg.SecretCommands["some"].Passphrase = "static" assert.Equal(t, "passphrase should contain a variable so it isn't plain text", testSecretError(t, cfg, secretSpec)) os.Unsetenv("MUSS_TEST_PASSPHRASE") cfg.SecretCommands["some"].Passphrase = "$MUSS_TEST_PASSPHRASE" assert.Equal(t, "a passphrase is required to use secrets", testSecretError(t, cfg, secretSpec)) os.Setenv("MUSS_TEST_PASSPHRASE", "foo") secretSpec["exec"] = []string{"echo", "nerts"} assert.Regexp(t, `secret cannot have multiple commands: ("some" and "exec"|"exec" and "some")`, testSecretError(t, cfg, secretSpec)) cfg.SecretCommands["some"].Exec = []string{secretCmdPath, "--no-var"} secretSpec = map[string]interface{}{ "some": []string{}, } assert.Equal(t, `env command must have either "parse: true" or a "varname"`, testSecretError(t, cfg, secretSpec)) secretSpec["parse"] = true assert.Equal(t, `failed to parse name=value line: NO_EQUAL_SIGN`, testSecretError(t, cfg, secretSpec)) secretSpec["varname"] = "MUSS_TEST_SECRET" assert.Equal(t, `use "parse: true" or "varname", not both`, testSecretError(t, cfg, secretSpec)) delete(secretSpec, "parse") cfg.SecretCommands["some"].Cache = "foo" assert.Equal(t, `time: invalid duration foo`, testSecretError(t, cfg, secretSpec)) cfg.SecretCommands["some"].Cache = "none" cfg.SecretCommands["some"].Passphrase = "" // Test that passphrase can be blank if cache is 'none' (_no_ error). if s, err := parseSecret(cfg, secretSpec); err != nil { t.Fatal(err) } else { if err := loadEnvFromCmds(s); err != nil { t.Fatal(err) } } assert.Equal(t, "NO_EQUAL_SIGN", os.Getenv("MUSS_TEST_SECRET")) os.Unsetenv("MUSS_TEST_SECRET") cfg.SecretCommands["some"].Cache = "" assert.Equal(t, `a passphrase is required to use secrets`, testSecretError(t, cfg, secretSpec)) }) } func testSecretError(t *testing.T, cfg *ProjectConfig, spec map[string]interface{}) string { t.Helper() s, err := parseSecret(cfg, spec) // Some errors don't occur until trying to load it. if err == nil { err = loadEnvFromCmds(s) } if err == nil { t.Fatal("expected err, got nil") } return err.Error() } func testLoadSecret(t *testing.T, secret *secretCmd) { t.Helper() if err := loadEnvFromCmds(secret); err != nil { t.Fatalf("failed to load secret: %s", err) } } func appendToTestFile(t *testing.T, file, suffix string) { t.Helper() f, err := os.OpenFile(file, os.O_APPEND|os.O_WRONLY, 0600) if err != nil { t.Fatal(err) } defer f.Close() if _, err := f.Write([]byte(suffix)); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } }
package snap_test import ( "context" "testing" "time" "github.com/imrenagi/go-payment" "github.com/imrenagi/go-payment/gateway/midtrans/snap" "github.com/imrenagi/go-payment/invoice" midsnap "github.com/midtrans/midtrans-go/snap" "github.com/stretchr/testify/assert" ) func baseCreditCardInvoice() *invoice.Invoice { date := time.Date(2020, 8, 1, 1, 0, 0, 0, time.UTC) dueDate := date.Add(24 * time.Hour) i := invoice.New(date, dueDate) i.SubTotal = 5000 i.UpsertBillingAddress("Foo", "foo@bar.com", "0812312412") i.SetItems(context.TODO(), []invoice.LineItem{ { InvoiceID: 1, Name: "Terjemahan B", Category: "TRANSLATION", MerchantName: "Collegos", Currency: "IDR", UnitPrice: 5000, Qty: 1, }}, ) return i } func TestCreditCardWithoutInstallment(t *testing.T) { inv := baseCreditCardInvoice() inv.ServiceFee = 1000 inv.Payment = &invoice.Payment{ PaymentType: payment.SourceCreditCard, } req, err := snap.NewCreditCard(inv) assert.NoError(t, err) assert.Len(t, req.EnabledPayments, 1) assert.Equal(t, int64(6000), req.TransactionDetails.GrossAmt) assert.Equal(t, 2, len(*req.Items)) assert.Contains(t, req.EnabledPayments, midsnap.PaymentTypeCreditCard) assert.True(t, req.CreditCard.Secure) assert.Equal(t, "bca", req.CreditCard.Bank) } func TestCreditCardWithInstallment(t *testing.T) { inv := baseCreditCardInvoice() inv.InstallmentFee = 2000 inv.Payment = &invoice.Payment{ PaymentType: payment.SourceCreditCard, CreditCardDetail: &invoice.CreditCardDetail{ Installment: invoice.Installment{ Type: payment.InstallmentOffline, Term: 3, }, Bank: payment.BankBCA, }, } req, _ := snap.NewCreditCard(inv) assert.Len(t, req.EnabledPayments, 1) assert.Equal(t, int64(7000), req.TransactionDetails.GrossAmt) assert.Equal(t, 2, len(*req.Items)) assert.Contains(t, req.EnabledPayments, midsnap.PaymentTypeCreditCard) assert.True(t, req.CreditCard.Secure) assert.Equal(t, "bca", req.CreditCard.Bank) assert.True(t, req.CreditCard.Installment.Required) assert.Contains(t, req.CreditCard.Installment.Terms.Offline, int8(3)) assert.Empty(t, req.CreditCard.Installment.Terms.Bni) assert.Empty(t, req.CreditCard.Installment.Terms.Mandiri) }
package test import ( "core" "entity" "testing" ) func Test_Open(t *testing.T) { t.Run("open should fail", func(t *testing.T) { pda := core.PdaProcessor{} str := "{a = a}" isParsingSuccess := pda.Open([]byte(str)) if isParsingSuccess != false { t.Errorf("output for %s is \n %t; want false", str, isParsingSuccess) } }) t.Run("open should work as expected", func(t *testing.T) { pda := core.PdaProcessor{} str := `{"name": "HelloPDA", "states": ["q1", "q2", "q3", "q4"], "input_alphabet": ["0", "1"], "stack_alphabet" : ["0", "1"], "accepting_states": ["q1", "q4"], "start_state": "q1", "transitions": [ ["q1", null, null, "q2", "$"], ["q2", "0", null, "q2", "0"], ["q2", "1", "0", "q3", null], ["q3", "1", "0", "q3", null], ["q3", null, "$", "q4", null]], "eos": "$"}` isParsingSuccess := pda.Open([]byte(str)) if isParsingSuccess != true { t.Errorf("output for %s is \n %t; want true", str, isParsingSuccess) } if pda.GetPDAName() != "HelloPDA" { t.Errorf("Parsing went wrong, PDA name is different") } }) } func Test_Reset(t *testing.T) { t.Run("should reset the pda ", func(t *testing.T) { pda := core.PdaProcessor{ PdaConf: entity.PDAConf{ Name: "Test PDA", States: []string{"q1", "q2", "q3", "q4"}, InputAlphabet: []string{"0", "1"}, StackAlphabet: []string{"0", "1"}, AcceptingStates: []string{"q1", "q4"}, StartState: "q1", Transitions: [][]string{{"q1", "", "", "q2", ""}}, Eos: "$", }, State: "q1", } pda.Stack.Push("a") if pda.Stack.IsEmpty() { t.Errorf("initial Stack is empty") } pda.Reset() if !pda.Stack.IsEmpty() { t.Errorf("stack is not reset") } if pda.GetClock() != 1 { t.Errorf("Start state has not been processed") } }) } func Test_Is_Accepted(t *testing.T) { t.Run("return True if PdaProcessor is currently at an accepting state with empty stack", func(t *testing.T) { pda := core.PdaProcessor{} pda.PdaConf.AcceptingStates = append(pda.PdaConf.AcceptingStates, "q1", "q2") pda.State = "q1" accepted := pda.Is_accepted() if !accepted { t.Errorf("expecting the state to be accepting and stack to be empty but failed") } }) } func Test_Current_State(t *testing.T) { t.Run("check current pda state", func(t *testing.T) { pda := core.PdaProcessor{} state := "q1" pda.State = state got := pda.Current_state() if got != state { t.Errorf("expecting the state to be q1 got %s", got) } }) } func Test_Put(t *testing.T) { t.Run("Put token should return transitions taken - multiple transitions", func(t *testing.T) { pda := core.PdaProcessor{ PdaConf: entity.PDAConf{ Name: "Test PDA", States: []string{"q1", "q2", "q3", "q4", "q5"}, InputAlphabet: []string{"0", "1"}, StackAlphabet: []string{"0", "1"}, AcceptingStates: []string{"q1", "q5"}, StartState: "q1", Transitions: [][]string{ {"q1", "", "", "q2", "$"}, {"q2", "", "", "q3", ""}, {"q3", "0", "", "q3", "0"}, {"q3", "1", "0", "q4", ""}, {"q4", "1", "0", "q4", ""}, {"q4", "", "$", "q5", ""}}, Eos: "$", }, State: "q1", } transitionCount := pda.Put(" ") if transitionCount != 2 || pda.GetClock() != 2 { t.Errorf("Expected transition count to be 2 got %d", transitionCount) } transitionCount = pda.Put("0") if transitionCount != 1 || pda.GetClock() != 3 { t.Errorf("Expected transition count to be 1 got %d", transitionCount) } transitionCount = pda.Put("0") if transitionCount != 1 || pda.GetClock() != 4 { t.Errorf("Expected transition count to be 1 got %d", transitionCount) } }) t.Run("Put token should return transitions taken", func(t *testing.T) { pda := core.PdaProcessor{ PdaConf: entity.PDAConf{ Name: "Test PDA", States: []string{"q1", "q2", "q3", "q4"}, InputAlphabet: []string{"0", "1"}, StackAlphabet: []string{"0", "1"}, AcceptingStates: []string{"q1", "q4"}, StartState: "q1", Transitions: [][]string{{"q1", "", "", "q2", "$"}, {"q2", "0", "", "q2", "0"}, {"q2", "1", "0", "q3", ""}, {"q3", "1", "0", "q3", ""}, {"q3", "", "$", "q4", ""}}, Eos: "$", }, State: "q1", } transitionCount := pda.Put(" ") if transitionCount != 1 || pda.GetClock() != 1 { t.Errorf("Expected transition count to be 1 got %d", transitionCount) } transitionCount = pda.Put("0") if transitionCount != 1 || pda.GetClock() != 2 { t.Errorf("Expected transition count to be 1 got %d", transitionCount) } transitionCount = pda.Put("0") if transitionCount != 1 || pda.GetClock() != 3 { t.Errorf("Expected transition count to be 2 got %d", transitionCount) } }) }
/* * Copyright 2015 Manish R Jain <manishrjain@gmail.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package concurrent import ( "log" "math/rand" "sync/atomic" "unsafe" ) type kv struct { k uint64 v unsafe.Pointer } type bucket struct { elems [8]kv } const ( MUTABLE = iota IMMUTABLE ) type container struct { status int32 sz uint64 list []*bucket numElems uint32 } type Map struct { cs [2]unsafe.Pointer size uint32 } func powOf2(sz int) bool { return sz > 0 && (sz&(sz-1)) == 0 } func initContainer(cs *container, sz uint64) { cs.status = MUTABLE cs.sz = sz cs.list = make([]*bucket, sz) for i := range cs.list { cs.list[i] = new(bucket) } } func NewMap(sz int) *Map { if !powOf2(sz) { log.Fatal("Map can only be created for a power of 2.") } c := new(container) initContainer(c, uint64(sz)) m := new(Map) m.cs[MUTABLE] = unsafe.Pointer(c) m.cs[IMMUTABLE] = nil return m } func (c *container) get(k uint64) unsafe.Pointer { bi := k & (c.sz - 1) b := c.list[bi] for i := range b.elems { e := &b.elems[i] if ek := atomic.LoadUint64(&e.k); ek == k { return e.v } } return nil } func (c *container) getOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer { bi := k & (c.sz - 1) b := c.list[bi] for i := range b.elems { e := &b.elems[i] // Once allocated a valid key, it would never change. So, first check if // it's allocated. If not, then allocate it. If can't, or not allocated, // then check if it's k. If it is, then replace value. Otherwise continue. // This sequence could be problematic, if this happens: // Main thread runs Step 1. Check if atomic.CompareAndSwapUint64(&e.k, 0, k) { // Step 1. atomic.AddUint32(&c.numElems, 1) if atomic.CompareAndSwapPointer(&e.v, nil, v) { return v } return atomic.LoadPointer(&e.v) } if atomic.LoadUint64(&e.k) == k { // Swap if previous pointer is nil. if atomic.CompareAndSwapPointer(&e.v, nil, v) { return v } return atomic.LoadPointer(&e.v) } } return nil } func (m *Map) GetOrInsert(k uint64, v unsafe.Pointer) unsafe.Pointer { if v == nil { log.Fatal("GetOrInsert doesn't allow setting nil pointers.") return nil } // Check immutable first. cval := atomic.LoadPointer(&m.cs[IMMUTABLE]) if cval != nil { c := (*container)(cval) if pv := c.get(k); pv != nil { return pv } } // Okay, deal with mutable container now. cval = atomic.LoadPointer(&m.cs[MUTABLE]) if cval == nil { log.Fatal("This is disruptive in a bad way.") } c := (*container)(cval) if pv := c.getOrInsert(k, v); pv != nil { return pv } // We still couldn't insert the key. Time to grow. // TODO: Handle this case. return nil } func (m *Map) SetNilIfPresent(k uint64) bool { for _, c := range m.cs { if atomic.LoadInt32(&c.status) == 0 { continue } bi := k & (c.sz - 1) b := c.list[bi] for i := range b.elems { e := &b.elems[i] if atomic.LoadUint64(&e.k) == k { // Set to nil. atomic.StorePointer(&e.v, nil) return true } } } return false } func (m *Map) StreamUntilCap(ch chan uint64) { for { ci := rand.Intn(2) c := m.cs[ci] if atomic.LoadInt32(&c.status) == 0 { ci += 1 c = m.cs[ci%2] // use the other. } bi := rand.Intn(int(c.sz)) for _, e := range c.list[bi].elems { if len(ch) >= cap(ch) { return } if k := atomic.LoadUint64(&e.k); k > 0 { ch <- k } } } } func (m *Map) StreamAll(ch chan uint64) { for _, c := range m.cs { if atomic.LoadInt32(&c.status) == 0 { continue } for i := 0; i < int(c.sz); i++ { for _, e := range c.list[i].elems { if k := atomic.LoadUint64(&e.k); k > 0 { ch <- k } } } } }
package huffman import "testing" func TestByte2BinString(t *testing.T) { t.Log(Byte2BinString(143)) }
package main import ( "fmt" "io/ioutil" "os" ) const ( debug = false ) func must(err error) { if err != nil { panic(err) } } //TODO: publish, CHANGE NAME!!! //TODO: make a change to test_mac.sh and check that it works remotely (git stash; check; git stash pop) func slurp(path string) string { buf, err := ioutil.ReadFile(path) if err != nil { fmt.Fprintf(os.Stderr, "could not read %s: %s", path, err) os.Exit(1) } return string(buf) } func main() { if len(os.Args) != 4 { fmt.Fprintf(os.Stderr, "wrong number of arguments\nusage: newlint <before> <after> <source-diff>\n") os.Exit(1) } beforePath := os.Args[1] afterPath := os.Args[2] sourceDiff := os.Args[3] linterOutSecond := parseLinterOut(slurp(afterPath)) da, err := parseDiff(slurp(sourceDiff)) if err != nil { fmt.Fprintf(os.Stderr, "%v\n", err) os.Exit(1) } linterOutFirst := parseLinterOut(slurp(beforePath)) if debug { fmt.Printf("Merge base has %d linter lines in files touched by diff\n", len(linterOutFirst)) } mapToRight(linterOutFirst, da) linterOutFirstMap := make(map[pos]bool) for i := range linterOutFirst { linterOutFirstMap[linterOutFirst[i].pos] = true } bad := false for i := range linterOutSecond { if !linterOutFirstMap[linterOutSecond[i].pos] { fmt.Printf("%s:%d:%s\n", linterOutSecond[i].path, linterOutSecond[i].lineno, linterOutSecond[i].remark) bad = true } } if bad { os.Exit(1) } } func mapToRight(linterOut []linterError, da *diffAlignment) { for i := range linterOut { le := &linterOut[i] fa := da.leftToRight[le.path] if fa == nil { continue } if debug { fmt.Printf("%s -> %s\n", le.path, fa.toPath) } le.path = fa.toPath for j := len(fa.lines) - 1; j >= 0; j-- { if le.lineno >= fa.lines[j][0] { delta := fa.lines[j][1] - fa.lines[j][0] if debug { fmt.Printf("\t%d -> %d\n", le.lineno, le.lineno+delta) } le.lineno += delta break } } } }
package main import ( "bytes" "fmt" "generate/execute" "generate/fetcher" "os" "text/template" ) func main() { conf := fetcher.InitConfigFromJson() tems := execute.GetTemplate(conf) //os.Mkdir("test1/haha", os.ModePerm) for _, tem := range tems { tmpl, err := template.ParseFiles("input/wxworkDao.php") if err != nil { panic(err) } //var content string var buf bytes.Buffer err = tmpl.Execute(&buf, tem) //err = tmpl.Execute(os.Stdin, tem) if err != nil { panic(err) } fileName := "output/" + tem.FileName + ".php" dstFile, err := os.Create(fileName) if err != nil { fmt.Println(err.Error()) return } defer dstFile.Close() dstFile.WriteString(buf.String()) } }
package _897_Increasing_Order_Search_Tree import "fmt" /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func increasingBST(root *TreeNode) *TreeNode { var ( s = []*TreeNode{} ol = []int{} node = root head = &TreeNode{} ) for node != nil || len(s) != 0 { for node != nil { s = append(s, node) node = node.Left } if len(s) != 0 { node = s[len(s)-1] s = s[0 : len(s)-1] ol = append(ol, node.Val) node = node.Right } } fmt.Println(ol) // 构建新的二叉树 tmp := head for _, i := range ol { tmp.Right = &TreeNode{ Val: i, } tmp = tmp.Right } return head.Right }
package main import( "log" "container/list" ) func main(){ //stack l := list.New() l.PushBack(1) l.PushBack(2) l.PushBack(3) log.Println(l) stackEle := l.Back() l.Remove(stackEle) log.Println(stackEle,l) //queue q := list.New() q.PushBack("a") q.PushBack("b") q.PushBack("c") queueEle:=q.Front() q.Remove(queueEle) log.Println(q,queueEle) }
package nacellepg import ( "fmt" "math" "github.com/jmoiron/sqlx" ) type ( PageMeta struct { Page int PageSize int } PagedResultMeta struct { NumPages int `json:"num_pages"` NumResults int `json:"num_results"` } ) func (m *PageMeta) Limit() int { return m.PageSize } func (m *PageMeta) Offset() int { return m.PageSize * (m.Page - 1) } func PagedSelect( db *LoggingDB, meta *PageMeta, baseQuery string, target interface{}, args ...interface{}, ) (*PagedResultMeta, error) { var ( total int countQuery = fmt.Sprintf("select count(*) from (%s) q", baseQuery) ) if err := sqlx.Get(db, &total, countQuery, args...); err != nil { return nil, HandleError(err, "select error") } var ( limitQuery = fmt.Sprintf("%s limit $%d offset $%d", baseQuery, len(args)+1, len(args)+2) limitArgs = append(args, meta.Limit(), meta.Offset()) ) if err := sqlx.Select(db, target, limitQuery, limitArgs...); err != nil { return nil, HandleError(err, "select error") } return &PagedResultMeta{ NumResults: total, NumPages: int(math.Ceil(float64(total) / float64(meta.Limit()))), }, nil }
// +build !windows package internal import ( "github.com/sirupsen/logrus" logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" ) func setupSyslogHook(proto, host string) (logrus.Hook, error) { return logrus_syslog.NewSyslogHook(proto, host, syslog.LOG_DAEMON, "BitMaelum") }
package main import ( "encoding/json" "sub_account_service/number_server/models" "flag" "sub_account_service/number_server/config" "github.com/nsqio/go-nsq" . "sub_account_service/number_server/pkg/nsq" ) type MessageHandler struct { data chan *nsq.Message } func (m *MessageHandler) HandleMessage(message *nsq.Message) error { m.data <- message return nil } func (m *MessageHandler) Process() { for { message := <-m.data order := models.Orders{} json.Unmarshal([]byte(message.Body), &order) models.AddOrder(&order) } } func main() { flag.Parse() models.Setup() consumer := NewConsumer("order", "finance_order", []string{config.Opts().Nsqd_Consumer_Tcp}, []string{}) handler := new(MessageHandler) handler.data = make(chan *nsq.Message, 1000) err := consumer.Start(nsq.HandlerFunc(handler.HandleMessage)) if err != nil { panic(err) } //写入到数据库 handler.Process() }
package auth import ( "crypto/hmac" "crypto/sha256" "encoding/base64" b64 "encoding/base64" "encoding/hex" "fmt" "net/http" "strconv" "time" ) type Auther struct { AccessID string SecretKey string } var UseSignAuthored = true func (a *Auther) Auth(req *http.Request, useSignAuthored bool, auth Auther, reqBody string) { if useSignAuthored { now := time.Now() timeStamp := now.Unix() req.Header.Add("AccessId", auth.AccessID) req.Header.Add("TimeStamp", strconv.Itoa(int(timeStamp))) sign := GenSign(uint64(timeStamp), auth.AccessID, auth.SecretKey, reqBody) req.Header.Add("Sign", sign) } else { author := makeAuthHeader(a.AccessID, a.SecretKey) //log.Printf("author string:%v", author) req.Header.Add("Authorization", author) } //req.Header.Add("Content-Type", "application/json") } func makeAuthHeader(appID, secretKey string) string { base64Str := base64.StdEncoding.EncodeToString( []byte( fmt.Sprintf("%s:%s", appID, secretKey), ), ) return fmt.Sprintf("Basic %s", base64Str) } func GenSign(timeStamp uint64, accessId string, secretKey, requestBody string) string { signBody := strconv.Itoa(int(timeStamp)) + accessId + requestBody // Create a new HMAC by defining the hash type and the key (as byte array) h := hmac.New(sha256.New, []byte(secretKey)) // Write Data to it h.Write([]byte(signBody)) // Get result and encode as hexadecimal string sha := hex.EncodeToString(h.Sum(nil)) //fmt.Println() //fmt.Println("timeStamp: " + strconv.Itoa(int(timeStamp)) + " accessID:" + accessId + " body:" + requestBody) sEnc := b64.StdEncoding.EncodeToString([]byte(sha)) //fmt.Println("final Result " + sEnc) return sEnc }
package boom import ( "sync" "go.mercari.io/datastore/v2" ) // Batch can queue operations on Datastore and process them in batch. // Batch does nothing until you call Exec(). // This helps to reduce the number of RPCs. type Batch struct { m sync.Mutex bm *Boom b *datastore.Batch earlyErrors []error } // Boom object that is the source of the Batch object is returned. func (b *Batch) Boom() *Boom { return b.bm } // Put Entity operation into the queue. // This operation doesn't Put to Datastore immediately. // If a h is provided, it passes the processing result to the handler, and treats the return value as the value of the result of Putting. func (b *Batch) Put(src interface{}, h datastore.BatchPutHandler) { keys, err := b.bm.extractKeys([]interface{}{src}) if err != nil { if h != nil { err = h(nil, err) } if err != nil { b.m.Lock() b.earlyErrors = append(b.earlyErrors, err) b.m.Unlock() } return } b.b.Put(keys[0], src, func(key datastore.Key, err error) error { if err != nil { if h != nil { err = h(key, err) } return err } err = b.bm.setStructKey(src, key) if err != nil { if h != nil { err = h(key, err) } if err != nil { b.m.Lock() b.earlyErrors = append(b.earlyErrors, err) b.m.Unlock() } return err } if h != nil { return h(key, nil) } return nil }) } // Get Entity operation into the queue. func (b *Batch) Get(dst interface{}, h datastore.BatchErrHandler) { keys, err := b.bm.extractKeys([]interface{}{dst}) if err != nil { if h != nil { err = h(err) } if err != nil { b.m.Lock() b.earlyErrors = append(b.earlyErrors, err) b.m.Unlock() } return } b.b.Get(keys[0], dst, h) } // Delete Entity operation into the queue. func (b *Batch) Delete(dst interface{}, h datastore.BatchErrHandler) { keys, err := b.bm.extractKeys([]interface{}{dst}) if err != nil { if h != nil { err = h(err) } if err != nil { b.m.Lock() b.earlyErrors = append(b.earlyErrors, err) b.m.Unlock() } return } b.b.Delete(keys[0], h) } // Exec will perform all the processing that was queued. // This process is done recursively until the queue is empty. // The return value may be MultiError, but the order of contents is not guaranteed. func (b *Batch) Exec() error { err := b.b.Exec(b.bm.Context) b.m.Lock() defer b.m.Unlock() if merr, ok := err.(datastore.MultiError); ok { merr = append(merr, b.earlyErrors...) b.earlyErrors = nil if len(merr) == 0 { return nil } return merr } else if err != nil { return err } else if len(b.earlyErrors) != 0 { errs := b.earlyErrors b.earlyErrors = nil return datastore.MultiError(errs) } return nil }
package main import ( "fmt" "os" "sort" "strings" "text/template" ) type component struct { Name string Doc string Modifiers []modifier Parts []part Elem string Option bool } type modifier struct { Name string Class string Doc string } type part struct { Name string Required bool } var components = []component{ { Name: "accordion", Doc: "creates a list of items that can be shown individually by clicking an item's header.", Elem: "Ul", Option: true, }, { Name: "alert", Doc: "displays success, warning and error messages", Modifiers: modifierByPrefix("uk-alert"), Elem: "Div", Option: true, }, { Name: "article", Doc: "consists of the article itself, a title and meta data", Elem: "Article", Parts: []part{ { Name: "uk-article", }, }, }, { Name: "breadcrumb", Doc: "creates breadcrumbs to show users their location within a website", Elem: "Ul", Parts: []part{ { Name: "uk-breadcrumb", }, }, }, { Name: "button", Doc: "creates nice looking buttons, which come in different styles", Modifiers: modifierByPrefix("uk-button", "uk-width-1-1"), Elem: "Button", Parts: []part{ { Name: "uk-button", }, }, }, { Name: "card", Doc: "creates layout boxes with different styles", Modifiers: modifierByPrefix("uk-card"), Elem: "Div", Parts: []part{ { Name: "uk-card", }, { Name: "uk-card-body", }, }, }, { Name: "container", Doc: "allows you to align and center your page content", Modifiers: modifierByPrefix("uk-container"), Elem: "Div", Parts: []part{ { Name: "uk-container", }, }, }, { Name: "grid", Doc: "creates a fully responsive, fluid and nestable grid layout", Modifiers: modifierByPrefix("uk-grid"), Elem: "Div", Option: true, }, { Name: "leader", Doc: "creates dot leaders for pricing menus or tables of contents", Modifiers: modifierByPrefix("uk-leader"), Elem: "Div", Option: true, }, { Name: "lightbox", Doc: "creates a responsive lightbox gallery with images and videos", Modifiers: modifierByPrefix("uk-lightbox"), Elem: "Div", Option: true, }, { Name: "marker", Doc: "creates a marker icon that can be displayed on top of images", Modifiers: modifierByPrefix("uk-marker"), Elem: "A", Option: true, }, { Name: "section", Doc: "creates horizontal layout sections with different background colors and styles", Modifiers: modifierByPrefix("uk-section", "uk-padding-remove-vertical"), Elem: "Div", Parts: []part{ { Name: "uk-section", }, }, }, } var modifiers = map[string]modifier{ // Alert "uk-alert-primary": { Name: "Primary", Class: "uk-alert-primary", Doc: "gives the message a prominent styling.", }, "uk-alert-success": { Name: "Success", Class: "uk-alert-success", Doc: "indicates success or a positive message.", }, "uk-alert-warning": { Name: "Warning", Class: "uk-alert-warning", Doc: "indicates a message containing a warning.", }, "uk-alert-danger": { Name: "Danger", Class: "uk-alert-danger", Doc: "indicates an important or error message.", }, // Button "uk-button-default": { Name: "Default", Class: "uk-button-default", Doc: "button style.", }, "uk-button-primary": { Name: "Primary", Class: "uk-button-primary", Doc: "indicates the primary action.", }, "uk-button-secondary": { Name: "Secondary", Class: "uk-button-secondary", Doc: "indicates an important action.", }, "uk-button-danger": { Name: "Danger", Class: "uk-button-danger", Doc: "indicates a dangerous or negative action.", }, "uk-button-text": { Name: "Text", Class: "uk-button-text", Doc: "applies an alternative, typographic style.", }, "uk-button-link": { Name: "Link", Class: "uk-button-link", Doc: "makes a <button> look like an <a> element.", }, "uk-button-small": { Name: "Small", Class: "uk-button-small", Doc: "makes a <button> look smaller.", }, "uk-button-large": { Name: "Large", Class: "uk-button-large", Doc: "makes a <button> look larger.", }, // Card "uk-card-default": { Name: "Default", Class: "uk-card-default", Doc: "to create a visually styled box.", }, "uk-card-primary": { Name: "Primary", Class: "uk-card-primary", Doc: "to modify the card and emphasize it with a primary color.", }, "uk-card-secondary": { Name: "Secondary", Class: "uk-card-secondary", Doc: "to modify the card and give it a secondary background color.", }, "uk-card-hover": { Name: "Hover", Class: "uk-card-hover", Doc: "to create a hover effect on the card.", }, "uk-card-small": { Name: "Small", Class: "uk-card-small", Doc: "to apply a smaller padding.", }, "uk-card-large": { Name: "Large", Class: "uk-card-large", Doc: "to apply a larger padding.", }, // Container "uk-container-xsmall": { Name: "XSmall", Class: "uk-container-xsmall", Doc: "for a xsmall container.", }, "uk-container-small": { Name: "Small", Class: "uk-container-small", Doc: "for a small container.", }, "uk-container-large": { Name: "Large", Class: "uk-container-large", Doc: "for a large container.", }, "uk-container-xlarge": { Name: "XLarge", Class: "uk-container-xlarge", Doc: "for a xlarge container.", }, "uk-container-expand": { Name: "Expand", Class: "uk-container-expand", Doc: "if you do not want to limit the container width but still want the dynamic horizontal padding.", }, // Grid "uk-grid-small": { Name: "Small", Class: "uk-grid-small", Doc: "to apply a small gap.", }, "uk-grid-medium": { Name: "Medium", Class: "uk-grid-medium", Doc: "to apply a medium gap like the default one, but without a breakpoint.", }, "uk-grid-large": { Name: "Large", Class: "uk-grid-large", Doc: "to apply a large gap with breakpoints.", }, "uk-grid-collapse": { Name: "Collapse", Class: "uk-grid-collapse", Doc: "to remove the grid gap entirely.", }, "uk-grid-divider": { Name: "Divider", Class: "uk-grid-divider", Doc: "to separate grid cells with lines.", }, "uk-grid-match": { Name: "MatchHeight", Class: "uk-grid-match", Doc: "to match the height of the direct child of each cell.", }, // Section "uk-section-default": { Name: "Default", Class: "uk-section-default", Doc: "adds the default background color of your site.", }, "uk-section-muted": { Name: "Muted", Class: "uk-section-muted", Doc: "adds a muted background color.", }, "uk-section-primary": { Name: "Primary", Class: "uk-section-primary", Doc: "adds a primary background color.", }, "uk-section-secondary": { Name: "Secondary", Class: "uk-section-secondary", Doc: "adds a secondary background color.", }, "uk-section-xsmall": { Name: "XSmall", Class: "uk-section-xsmall", Doc: "to decrease a section's padding to a minimum.", }, "uk-section-small": { Name: "Small", Class: "uk-section-small", Doc: "to decrease a section's padding.", }, "uk-section-large": { Name: "Large", Class: "uk-section-large", Doc: "to increase a section's padding.", }, "uk-section-xlarge": { Name: "XLarge", Class: "uk-section-xlarge", Doc: "to further increase a section's padding.", }, // Padding "uk-padding-remove-vertical": { Name: "RemoveVerticalPadding", Class: "uk-padding-remove-vertical", Doc: "removes top and bottom padding from an element.", }, "uk-width-1-1": { Name: "FullWidth", Class: "uk-width-1-1", Doc: "fills 100% of the available width.", }, } var uikitTmpl = ` // Code generated by go generate; DO NOT EDIT. package uikit import ( "github.com/maxence-charriere/go-app/v7/pkg/app" ) // UI{{title .Name}} is a component that {{.Doc}} type UI{{title .Name}} interface { app.UI // Class adds a CSS class to the {{.Name}}. Class(v string) UI{{title .Name}} // Content sets the main content. Content(elems ...app.UI) UI{{title .Name}} {{if .Option}} // Option sets a component option. Option(k string, v interface{}) UI{{title .Name}} {{end}} {{range $value := .Modifiers}} // {{title $value.Name}} {{$value.Doc}} {{title $value.Name}}() UI{{title $.Name}} {{end}} } type {{.Name}} struct { app.Compo Iclass string Icontent []app.UI {{if .Option}} Ioptions map[string]interface{} {{end}} } // {{title .Name}} returns a {{.Name}} component. func {{title .Name}}() UI{{title .Name}} { {{- if .Option}} return &{{.Name}}{} {{else}} return &{{.Name}}{ Iclass: "{{join .Parts}}", } {{end -}} } func ({{id .Name}} *{{.Name}}) Class(v string) UI{{title .Name}} { if {{id .Name}}.Iclass != "" { {{id .Name}}.Iclass += " " } {{id .Name}}.Iclass += v return {{id .Name}} } func ({{id .Name}} *{{.Name}}) Content(elems ...app.UI) UI{{title .Name}} { {{id .Name}}.Icontent = app.FilterUIElems(elems...) return {{id .Name}} } {{if .Option}} func ({{id .Name}} *{{.Name}}) Option(k string, v interface{}) UI{{title .Name}} { if {{id .Name}}.Ioptions == nil { {{id .Name}}.Ioptions = make(map[string]interface{}, 0) } {{id .Name}}.Ioptions[k] = v return {{id .Name}} } {{end}} {{range $value := .Modifiers}} func ({{id $.Name}} *{{$.Name}}) {{title $value.Name}}() UI{{title $.Name}} { {{id $.Name}}.Class("{{$value.Class}}") return {{id $.Name}} } {{end}} func ({{id .Name}} *{{.Name}}) Render() app.UI { {{- if .Option}} opts, _ := JSONString({{id .Name}}.Ioptions) {{end}} return app.{{.Elem}}(). {{if .Option}} DataSet("uk-{{lower .Name}}", opts). {{end}} Class({{id .Name}}.Iclass). Body({{id .Name}}.Icontent...) } ` func main() { generateUIkitGo() } func generateUIkitGo() { funcMap := template.FuncMap{ "title": strings.Title, "lower": strings.ToLower, "id": func(name string) string { return strings.ToLower(string(name[0])) }, "join": func(parts []part) string { items := make([]string, 0, len(parts)) for _, v := range parts { items = append(items, v.Name) } return strings.Join(items, " ") }, } for _, c := range components { f, err := os.Create(fmt.Sprintf("%s.go", strings.ToLower(c.Name))) if err != nil { panic(err) } defer f.Close() tmpl, err := template.New("component").Funcs(funcMap).Parse(uikitTmpl) if err != nil { panic(err) } err = tmpl.Execute(f, c) if err != nil { panic(err) } } } func modifierByNames(names ...string) []modifier { res := make([]modifier, 0, len(names)) for _, n := range names { mod, ok := modifiers[n] if !ok { panic("unknown modifier: " + n) } res = append(res, mod) } sort.Slice(res, func(i, j int) bool { return strings.Compare(res[i].Name, res[j].Name) <= 0 }) return res } func modifierByPrefix(prefixes ...string) []modifier { res := make([]modifier, 0, len(prefixes)) for _, prefix := range prefixes { for k, mod := range modifiers { if strings.HasPrefix(k, prefix) { res = append(res, mod) } } } sort.Slice(res, func(i, j int) bool { return strings.Compare(res[i].Name, res[j].Name) <= 0 }) return res }
package main import "fmt" func test2() { fmt.Println("小小雪第一次提交") fmt.Println("0609初始化") fmt.Println("小雪开发第a个功能") fmt.Println("小雪开发第b个功能") }
package fitbit import ( "testing" ) func TestErrorResponse(t *testing.T) { }
package main import ( "bufio" "fmt" "github.com/glenbolake/aoc2018" "math" "os" "strconv" "strings" ) type Nanobot struct { id int x, y, z int r int } func (n Nanobot) String() string { return fmt.Sprintf("(%d,%d,%d)%d", n.x, n.y, n.z, n.r) } func (n Nanobot) distTo(other Nanobot) int { return aoc2018.Abs(n.x-other.x) + aoc2018.Abs(n.y-other.y) + aoc2018.Abs(n.z-other.z) } func part1(bots []Nanobot) int { var bestBot Nanobot bestRadius := 0 for _, b := range bots { if b.r > bestRadius { bestRadius = b.r bestBot = b } } return botsInRange(bots, bestBot) } func botsInRange(bots []Nanobot, point Nanobot) int { count := 0 for _, bot := range bots { if bot.distTo(point) <= bot.r { count += 1 } } return count } type NanobotSet struct { data map[Nanobot]struct{} } func (ns *NanobotSet) String() string { ids := make([]string, 0, len(ns.data)) for datum := range ns.data { ids = append(ids, strconv.Itoa(datum.id)) } return "{" + strings.Join(ids, " ") + "}" } func NewSet() *NanobotSet { s := &NanobotSet{} s.data = make(map[Nanobot]struct{}) return s } func (ns *NanobotSet) Add(nanobot Nanobot) { ns.data[nanobot] = struct{}{} } func (ns *NanobotSet) Remove(nanobot Nanobot) { delete(ns.data, nanobot) } func (ns *NanobotSet) Contains(nanobot Nanobot) bool { _, ok := ns.data[nanobot] return ok } func (ns *NanobotSet) Size() int { return len(ns.data) } func (ns *NanobotSet) Without(other *NanobotSet) *NanobotSet { rv := NewSet() for bot := range ns.data { if other.Contains(bot) { continue } rv.Add(bot) } return rv } func (ns *NanobotSet) Intersect(other *NanobotSet) *NanobotSet { rv := NewSet() for bot := range ns.data { if other.Contains(bot) { rv.Add(bot) } } return rv } func (ns *NanobotSet) Union(other *NanobotSet) *NanobotSet { rv := NewSet() for bot := range ns.data { rv.Add(bot) } for bot := range other.data { rv.Add(bot) } return rv } func BronKerbosch(graph map[Nanobot]*NanobotSet) *NanobotSet { R := NewSet() P := NewSet() X := NewSet() for bot := range graph { P.Add(bot) } return BronKerboschInner(graph, R, P, X) } func BronKerboschInner(graph map[Nanobot]*NanobotSet, R, P, X *NanobotSet) *NanobotSet { if P.Size() == 0 && X.Size() == 0 { return R } var pivot Nanobot var numNeighbors int for bot := range P.Union(X).data { neighbors := graph[bot] if neighbors.Size() > numNeighbors { pivot = bot numNeighbors = neighbors.Size() } } vs := P.Without(graph[pivot]) clique := NewSet() for v := range vs.data { vSet := NewSet() vSet.Add(v) newClique := BronKerboschInner(graph, R.Union(vSet), P.Intersect(graph[v]), X.Intersect(graph[v])) if newClique.Size() > clique.Size() { clique = newClique } P.Remove(v) X.Add(v) } return clique } func part2(bots []Nanobot) int { graph := map[Nanobot]*NanobotSet{} edgeCount := 0 for _, b := range bots { graph[b] = NewSet() } for i, bot1 := range bots { for _, bot2 := range bots[i+1:] { dist := bot1.distTo(bot2) if dist <= bot1.r+bot2.r { edgeCount += 1 graph[bot1].Add(bot2) graph[bot2].Add(bot1) } } } clique := BronKerbosch(graph) //fmt.Println(clique) min, max := 0, math.MaxInt32 origin := Nanobot{} for bot := range clique.data { minDist := origin.distTo(bot) - bot.r if minDist > min { min = minDist } maxDist := origin.distTo(bot) + bot.r if maxDist < max { max = maxDist } } return min } func main() { f, _ := os.Open("input/day23.txt") scanner := bufio.NewScanner(bufio.NewReader(f)) var bots []Nanobot i := 0 for scanner.Scan() { b := Nanobot{} fmt.Sscanf(scanner.Text(), "pos=<%d,%d,%d>, r=%d", &b.x, &b.y, &b.z, &b.r) b.id = i i++ bots = append(bots, b) } fmt.Println(part1(bots)) fmt.Println(part2(bots)) }
package ziface type IRequest interface { GetConnection() IConnect GetData() []byte }
package main import ( "context" "math/rand" "strconv" "github.com/aws/aws-lambda-go/lambda" ) func main() { lambda.Start(handler) } type Event struct { BankInfo struct { MinCreditScore string `json:"minCreditScore,omitempty"` BaseRate string `json:"baseRate,omitempty"` MaxLoanAmount string `json:"maxLoanAmount,omitempty"` } `json:"bankInfo,omitempty"` Amount float64 `json:"amount,omitempty"` Term float64 `json:"term,omitempty"` BankName string `json:"bankName"` Credit struct { Score float64 `json:"score,omitempty"` History float64 `json:"history,omitempty"` } `json:"credit,omitempty"` } type Output struct { Rate float64 `json:"rate"` BankName string `json:"bankName"` } func handler(ctx context.Context, event Event) (*Output, error) { maxLoanAmount, err := strconv.ParseFloat(event.BankInfo.MaxLoanAmount, 64) if err != nil { return nil, err } minCreditScore, err := strconv.ParseFloat(event.BankInfo.MinCreditScore, 64) if err != nil { return nil, err } if event.Amount > maxLoanAmount || event.Credit.Score < minCreditScore { return nil, nil } baseRate, err := strconv.ParseFloat(event.BankInfo.BaseRate, 64) if err != nil { return nil, err } rate := baseRate*rand.Float64() + ((1000 - event.Credit.Score) / 100.0) return &Output{Rate: rate, BankName: event.BankName}, nil }
package main import "fmt" func main() { var n int fmt.Scan(&n) a := 0 b := 1 for b < n { fmt.Println(b) a, b = b, a+b } }
package main import "log" type State interface { WriteProgram(work Work) } type Work struct { hour int current State finish bool } func(w *Work)SetState(s State){ w.current = s } func (w *Work)SetHour(hour int){ w.hour = hour } func(w *Work)SetFinishState(finish bool){ w.finish = finish } func (w Work)WriteProgram(){ w.current.WriteProgram(w) } type ForenoonState struct { } func (fs ForenoonState)WriteProgram(work Work){ if work.hour<12{ log.Printf("上午\n") }else{ work.SetState(NoonState{}) work.WriteProgram() } } type NoonState struct { } func (ns NoonState)WriteProgram(work Work){ if work.hour<13{ log.Printf("中午\n") }else{ work.SetState(AfternoonState{}) work.WriteProgram() } } type AfternoonState struct { } func (as AfternoonState)WriteProgram(work Work){ if work.hour<17{ log.Printf("下午\n") }else{ work.SetState(EveningState{}) work.WriteProgram() } } type EveningState struct { } func(es EveningState)WriteProgram(work Work){ if work.finish{ work.SetState(RestState{}) work.WriteProgram() }else{ if work.hour<21{ log.Printf("晚间\n") }else{ work.SetState(SleepingState{}) work.WriteProgram() } } } type SleepingState struct { } func(ss SleepingState)WriteProgram(work Work){ log.Printf("睡着了\n") } type RestState struct { } func(rs RestState)WriteProgram(work Work){ log.Printf("下班回家\n") } func main(){ tWork:=Work{} tState:=ForenoonState{} tWork.SetState(tState) tWork.SetFinishState(true) tWork.SetHour(22) tWork.WriteProgram() }
package chat_server import ( "context" "encoding/json" "fmt" "github.com/go-redis/redis/v8" ) type Command struct { Command string `json:"command"` Data map[string](interface{}) `json:"data"` } type MessageListReply struct { Command string `json:"command"` Messages []Message `json:"messages"` } type Controller struct { rdb *redis.Client } func NewController(rdb *redis.Client) *Controller { return &Controller{ rdb, } } func (c *Controller) HandleIncomingCommand(command Command, client *Client) () { ctx := context.Background() switch(command.Command) { case "chatlog": messages, _ := c.rdb.LRange(ctx, "chatlog", -20, -1).Result() formattedResponse, _ := json.Marshal(MessageListReply{ Command: command.Command, Messages: RedisMessagesUnmarshaller(messages), }) client.send <- formattedResponse case "sendmessage": fmt.Println(command.Data) store, _ := json.Marshal(command.Data) c.rdb.RPush(ctx, "chatlog", store) c.rdb.Publish(ctx, "chatlog_channel", store) } }
package queue type Queue interface { Length() int Capacity() int Front() *Node Rear() *Node Enqueue(value interface{}) bool Dequeue() interface{} }
// Copyright 2017 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. package lvm import ( "fmt" "reflect" "testing" . "github.com/sodafoundation/dock/contrib/drivers/utils/config" "github.com/sodafoundation/dock/pkg/model" pb "github.com/sodafoundation/dock/pkg/model/proto" "github.com/sodafoundation/dock/pkg/utils/config" "github.com/sodafoundation/dock/pkg/utils/exec" ) var fp = map[string]PoolProperties{ "vg001": { StorageType: "block", AvailabilityZone: "default", MultiAttach: true, Extras: model.StoragePoolExtraSpec{ DataStorage: model.DataStorageLoS{ ProvisioningPolicy: "Thin", Compression: false, Deduplication: false, }, IOConnectivity: model.IOConnectivityLoS{ AccessProtocol: "iscsi", MaxIOPS: 7000000, MaxBWS: 600, MinIOPS: 1000000, MinBWS: 100, Latency: 100, }, Advanced: map[string]interface{}{ "diskType": "SSD", "latency": "5ms", }, }, }, } func TestSetup(t *testing.T) { var d = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" var expectedDriver = &Driver{ conf: &LVMConfig{ Pool: fp, TgtBindIp: "192.168.56.105", TgtConfDir: "/etc/tgt/conf.d", EnableChapAuth: true, }, } if err := d.Setup(); err != nil { t.Errorf("Setup lvm driver failed: %+v\n", err) } if !reflect.DeepEqual(d.conf, expectedDriver.conf) { t.Errorf("Expected %+v, got %+v", expectedDriver.conf, d.conf) } } type FakeResp struct { out string err error } func NewFakeExecuter(respMap map[string]*FakeResp) exec.Executer { return &FakeExecuter{RespMap: respMap} } type FakeExecuter struct { RespMap map[string]*FakeResp } func (f *FakeExecuter) Run(name string, args ...string) (string, error) { var cmd = name if name == "env" { cmd = args[1] } v, ok := f.RespMap[cmd] if !ok { return "", fmt.Errorf("can find specified op: %s", args[1]) } return v.out, v.err } func TestCreateVolume(t *testing.T) { var fd = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" fd.Setup() respMap := map[string]*FakeResp{ "lvcreate": {"", nil}, } fd.cli.RootExecuter = NewFakeExecuter(respMap) fd.cli.BaseExecuter = NewFakeExecuter(respMap) opt := &pb.CreateVolumeOpts{ Id: "e1bb066c-5ce7-46eb-9336-25508cee9f71", Name: "test001", Description: "volume for testing", Size: int64(1), PoolName: "vg001", } var expected = &model.VolumeSpec{ BaseModel: &model.BaseModel{}, Name: "test001", Description: "volume for testing", Size: int64(1), Identifier: &model.Identifier{DurableName: "61bb066c5ce746eb933625508cee9f71", DurableNameFormat: "NAA"}, Metadata: map[string]string{ "lvPath": "/dev/vg001/volume-e1bb066c-5ce7-46eb-9336-25508cee9f71", }, } vol, err := fd.CreateVolume(opt) if err != nil { t.Error("Failed to create volume:", err) } vol.Id = "" if !reflect.DeepEqual(vol, expected) { t.Errorf("Expected %+v, got %+v\n", expected, vol) } } func TestCreateVolumeFromSnapshot(t *testing.T) { var fd = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" fd.Setup() respMap := map[string]*FakeResp{ "lvcreate": {"", nil}, "dd": {"", nil}, } fd.cli.RootExecuter = NewFakeExecuter(respMap) fd.cli.BaseExecuter = NewFakeExecuter(respMap) opt := &pb.CreateVolumeOpts{ Id: "e1bb066c-5ce7-46eb-9336-25508cee9f71", Name: "test001", Description: "volume for testing", Size: int64(1), PoolName: "vg001", SnapshotId: "3769855c-a102-11e7-b772-17b880d2f537", SnapshotSize: int64(1), } var expected = &model.VolumeSpec{ BaseModel: &model.BaseModel{}, Name: "test001", Description: "volume for testing", Size: int64(1), Identifier: &model.Identifier{DurableName: "61bb066c5ce746eb933625508cee9f71", DurableNameFormat: "NAA"}, Metadata: map[string]string{ "lvPath": "/dev/vg001/volume-e1bb066c-5ce7-46eb-9336-25508cee9f71", }, } vol, err := fd.CreateVolume(opt) if err != nil { t.Error("Failed to create volume:", err) } vol.Id = "" if !reflect.DeepEqual(vol, expected) { t.Errorf("Expected %+v, got %+v\n", expected, vol) } } func TestDeleteVolume(t *testing.T) { var fd = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" fd.Setup() respMap := map[string]*FakeResp{ "lvdisplay": {"-wi-a-----", nil}, "lvremove": {"", nil}, } fd.cli.RootExecuter = NewFakeExecuter(respMap) fd.cli.BaseExecuter = NewFakeExecuter(respMap) opt := &pb.DeleteVolumeOpts{ Metadata: map[string]string{ "lvPath": "/dev/vg001/test001", }, } if err := fd.DeleteVolume(opt); err != nil { t.Error("Failed to delete volume:", err) } } func TestExtendVolume(t *testing.T) { var fd = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" fd.Setup() respMap := map[string]*FakeResp{ "lvdisplay": {"-wi-a-----", nil}, "lvchange": {"", nil}, "lvextend": {"", nil}, } fd.cli.RootExecuter = NewFakeExecuter(respMap) fd.cli.BaseExecuter = NewFakeExecuter(respMap) opt := &pb.ExtendVolumeOpts{ Id: "591c43e6-1156-42f5-9fbc-161153da185c", Metadata: map[string]string{ "lvPath": "/dev/vg001/test001", }, Size: int64(1), } vol, err := fd.ExtendVolume(opt) if err != nil { t.Error("Failed to extend volume:", err) } if vol.Size != 1 { t.Errorf("Expected %+v, got %+v\n", 1, vol.Size) } } func TestCreateSnapshot(t *testing.T) { var fd = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" fd.Setup() respMap := map[string]*FakeResp{ "lvcreate": {"-wi-a-----", nil}, } fd.cli.RootExecuter = NewFakeExecuter(respMap) fd.cli.BaseExecuter = NewFakeExecuter(respMap) opt := &pb.CreateVolumeSnapshotOpts{ Id: "d1916c49-3088-4a40-b6fb-0fda18d074c3", Name: "snap001", Description: "volume snapshot for testing", Size: int64(1), VolumeId: "bd5b12a8-a101-11e7-941e-d77981b584d8", Metadata: map[string]string{ "lvPath": "/dev/vg001/test001", }, } var expected = &model.VolumeSnapshotSpec{ BaseModel: &model.BaseModel{}, Name: "snap001", Description: "volume snapshot for testing", Size: int64(1), VolumeId: "bd5b12a8-a101-11e7-941e-d77981b584d8", Metadata: map[string]string{ "lvsPath": "/dev/vg001/_snapshot-d1916c49-3088-4a40-b6fb-0fda18d074c3", }, } snp, err := fd.CreateSnapshot(opt) if err != nil { t.Error("Failed to create volume snapshot:", err) } snp.Id = "" snp.Metadata["lvsPath"] = "/dev/vg001/_snapshot-d1916c49-3088-4a40-b6fb-0fda18d074c3" if !reflect.DeepEqual(snp, expected) { t.Errorf("Expected %+v, got %+v\n", expected, snp) } } func TestDeleteSnapshot(t *testing.T) { var fd = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" fd.Setup() lvsResp := ` _snapshot-f0594d2b-ffdf-4947-8380-089f0bc17389 volume-0e2f4a9e-4a94-4d27-b1b4-83464811605c volume-591c43e6-1156-42f5-9fbc-161153da185c root swap_1 ` respMap := map[string]*FakeResp{ "lvs": {lvsResp, nil}, "lvdisplay": {"-wi-a-----", nil}, "lvremove": {"", nil}, } fd.cli.RootExecuter = NewFakeExecuter(respMap) fd.cli.BaseExecuter = NewFakeExecuter(respMap) opt := &pb.DeleteVolumeSnapshotOpts{ Metadata: map[string]string{ "lvsPath": "/dev/vg001/snap001", }, } if err := fd.DeleteSnapshot(opt); err != nil { t.Error("Failed to delete volume snapshot:", err) } } func TestListPools(t *testing.T) { var fd = &Driver{} config.CONF.OsdsDock.Backends.LVM.ConfigPath = "testdata/lvm.yaml" fd.Setup() var vgsResp = ` vg001 18.00 18.00 ahF6kS-QNOH-X63K-avat-6Kag-XLTo-c9ghQ6 ubuntu-vg 127.52 0.03 fQbqtg-3vDQ-vk3U-gfsT-50kJ-30pq-OZVSJH ` respMap := map[string]*FakeResp{ "vgs": {vgsResp, nil}, } fd.cli.RootExecuter = NewFakeExecuter(respMap) fd.cli.BaseExecuter = NewFakeExecuter(respMap) var expected = []*model.StoragePoolSpec{ { BaseModel: &model.BaseModel{}, Name: "vg001", TotalCapacity: int64(18), FreeCapacity: int64(18), AvailabilityZone: "default", StorageType: "block", MultiAttach: true, Extras: model.StoragePoolExtraSpec{ DataStorage: model.DataStorageLoS{ ProvisioningPolicy: "Thin", Compression: false, Deduplication: false, }, IOConnectivity: model.IOConnectivityLoS{ AccessProtocol: "iscsi", MaxIOPS: 7000000, MaxBWS: 600, MinIOPS: 1000000, MinBWS: 100, Latency: 100, }, Advanced: map[string]interface{}{ "diskType": "SSD", "latency": "5ms", }, }, }, } pols, err := fd.ListPools() if err != nil { t.Error("Failed to list pools:", err) } for i := range pols { pols[i].Id = "" } if !reflect.DeepEqual(pols, expected) { t.Errorf("Expected %+v, got %+v\n", expected[0], pols[0]) } }
// Copyright 2020 Readium Foundation. All rights reserved. // Use of this source code is governed by a BSD-style license // that can be found in the LICENSE file exposed on Github (readium) in the project repository. package transactions import ( "database/sql" "errors" "log" "strings" "time" "github.com/readium/readium-lcp-server/config" "github.com/readium/readium-lcp-server/status" ) var NotFound = errors.New("Event not found") type Transactions interface { Get(id int) (Event, error) Add(e Event, eventType int) error GetByLicenseStatusId(licenseStatusFk int) func() (Event, error) CheckDeviceStatus(licenseStatusFk int, deviceId string) (string, error) ListRegisteredDevices(licenseStatusFk int) func() (Device, error) } type RegisteredDevicesList struct { ID string `json:"id"` Devices []Device `json:"devices"` } type Device struct { DeviceId string `json:"id"` DeviceName string `json:"name"` Timestamp time.Time `json:"timestamp"` } type Event struct { ID int `json:"-"` DeviceName string `json:"name"` Timestamp time.Time `json:"timestamp"` Type string `json:"type"` DeviceId string `json:"id"` LicenseStatusFk int `json:"-"` } type dbTransactions struct { db *sql.DB get *sql.Stmt add *sql.Stmt getbylicensestatusid *sql.Stmt checkdevicestatus *sql.Stmt listregistereddevices *sql.Stmt } // Get returns an event by its id // func (i dbTransactions) Get(id int) (Event, error) { records, err := i.get.Query(id) var typeInt int defer records.Close() if records.Next() { var e Event err = records.Scan(&e.ID, &e.DeviceName, &e.Timestamp, &typeInt, &e.DeviceId, &e.LicenseStatusFk) if err == nil { e.Type = status.EventTypes[typeInt] } return e, err } return Event{}, NotFound } // Add adds an event in the database, // The parameter eventType corresponds to the field 'type' in table 'event' // func (i dbTransactions) Add(e Event, eventType int) error { add, err := i.db.Prepare("INSERT INTO event (device_name, timestamp, type, device_id, license_status_fk) VALUES (?, ?, ?, ?, ?)") if err != nil { return err } defer add.Close() _, err = add.Exec(e.DeviceName, e.Timestamp, eventType, e.DeviceId, e.LicenseStatusFk) return err } // GetByLicenseStatusId returns all events by license status id // func (i dbTransactions) GetByLicenseStatusId(licenseStatusFk int) func() (Event, error) { rows, err := i.getbylicensestatusid.Query(licenseStatusFk) if err != nil { return func() (Event, error) { return Event{}, err } } return func() (Event, error) { var e Event var err error var typeInt int if rows.Next() { err = rows.Scan(&e.ID, &e.DeviceName, &e.Timestamp, &typeInt, &e.DeviceId, &e.LicenseStatusFk) if err == nil { e.Type = status.EventTypes[typeInt] } } else { rows.Close() err = NotFound } return e, err } } // ListRegisteredDevices returns all devices which have an 'active' status by licensestatus id // func (i dbTransactions) ListRegisteredDevices(licenseStatusFk int) func() (Device, error) { rows, err := i.listregistereddevices.Query(licenseStatusFk) if err != nil { return func() (Device, error) { return Device{}, err } } return func() (Device, error) { var d Device var err error if rows.Next() { err = rows.Scan(&d.DeviceId, &d.DeviceName, &d.Timestamp) } else { rows.Close() err = NotFound } return d, err } } // CheckDeviceStatus gets the current status of a device // if the device has not been recorded in the 'event' table, typeString is empty. // func (i dbTransactions) CheckDeviceStatus(licenseStatusFk int, deviceId string) (string, error) { var typeString string var typeInt int row := i.checkdevicestatus.QueryRow(licenseStatusFk, deviceId) err := row.Scan(&typeInt) if err == nil { typeString = status.EventTypes[typeInt] } else { if err == sql.ErrNoRows { return typeString, nil } } return typeString, err } // Open defines scripts for queries & create the 'event' table if it does not exist // func Open(db *sql.DB) (t Transactions, err error) { // if sqlite, create the event table in the lsd db if it does not exist if strings.HasPrefix(config.Config.LsdServer.Database, "sqlite") { _, err = db.Exec(tableDef) if err != nil { log.Println("Error creating sqlite event table") return } } // select an event by its id get, err := db.Prepare("SELECT * FROM event WHERE id = ? LIMIT 1") if err != nil { return } getbylicensestatusid, err := db.Prepare("SELECT * FROM event WHERE license_status_fk = ?") // the status of a device corresponds to the latest event stored in the db. checkdevicestatus, err := db.Prepare(`SELECT type FROM event WHERE license_status_fk = ? AND device_id = ? ORDER BY timestamp DESC LIMIT 1`) listregistereddevices, err := db.Prepare(`SELECT device_id, device_name, timestamp FROM event WHERE license_status_fk = ? AND type = 1`) if err != nil { return } t = dbTransactions{db, get, nil, getbylicensestatusid, checkdevicestatus, listregistereddevices} return } const tableDef = "CREATE TABLE IF NOT EXISTS event (" + "id integer PRIMARY KEY," + "device_name varchar(255) DEFAULT NULL," + "timestamp datetime NOT NULL," + "type int NOT NULL," + "device_id varchar(255) DEFAULT NULL," + "license_status_fk int NOT NULL," + "FOREIGN KEY(license_status_fk) REFERENCES license_status(id)" + ");" + "CREATE INDEX IF NOT EXISTS license_status_fk_index on event (license_status_fk);"
package messaging //Queue which can send and receive message type Queue interface { //GetName give the queue name GetName() string //Receive a message from the queue Receive() (WorkItem, error) //Send a message to the destination queue Send(destination string, message WorkItem) error }
package logging import ( "io" "os" "strings" "time" logrus_stack "github.com/Gurpartap/logrus-stack" "github.com/sirupsen/logrus" "github.com/authelia/authelia/v4/internal/configuration/schema" ) // Logger returns the standard logrus logger. func Logger() *logrus.Logger { return logrus.StandardLogger() } // LoggerPrintf returns a new PrintfLogger given a level. func LoggerPrintf(level logrus.Level) (logger *PrintfLogger) { return &PrintfLogger{ level: level, logrus: logrus.StandardLogger(), } } // LoggerCtxPrintf returns a new CtxPrintfLogger given a level. func LoggerCtxPrintf(level logrus.Level) (logger *CtxPrintfLogger) { return &CtxPrintfLogger{ level: level, logrus: logrus.StandardLogger(), } } // InitializeLogger configures the default logger similar to ConfigureLogger but also configures the stack levels hook. func InitializeLogger(config schema.Log, log bool) (err error) { var callerLevels []logrus.Level stackLevels := []logrus.Level{logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel} logrus.AddHook(logrus_stack.NewHook(callerLevels, stackLevels)) return ConfigureLogger(config, log) } // ConfigureLogger configures the default loggers level, formatting, and the output destinations. func ConfigureLogger(config schema.Log, log bool) (err error) { setLevelStr(config.Level, log) switch config.Format { case FormatJSON: logrus.SetFormatter(&logrus.JSONFormatter{}) default: logrus.SetFormatter(&logrus.TextFormatter{}) } var writers []io.Writer switch { case config.FilePath != "": var file *os.File if file, err = os.OpenFile(strings.ReplaceAll(config.FilePath, "%d", time.Now().Format(time.RFC3339)), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600); err != nil { return err } if config.Format != FormatJSON { logrus.SetFormatter(&logrus.TextFormatter{ DisableColors: true, FullTimestamp: true, }) } writers = []io.Writer{file} if config.KeepStdout { writers = append(writers, os.Stdout) } default: writers = []io.Writer{os.Stdout} } logrus.SetOutput(io.MultiWriter(writers...)) return nil } func setLevelStr(level string, log bool) { switch level { case LevelError: logrus.SetLevel(logrus.ErrorLevel) case LevelWarn: logrus.SetLevel(logrus.WarnLevel) case LevelInfo: logrus.SetLevel(logrus.InfoLevel) case LevelDebug: logrus.SetLevel(logrus.DebugLevel) case LevelTrace: logrus.SetLevel(logrus.TraceLevel) default: level = "info (default)" logrus.SetLevel(logrus.InfoLevel) } if log { logrus.Infof("Log severity set to %s", level) } }
package container import ( "go/internal/pkg/config" "github.com/gin-gonic/gin" "go.uber.org/dig" ) func BuildConfigContainer(container *dig.Container) error { //debug mode if err := container.Provide(config.NewAppConfig); err != nil { return err } if err := container.Provide(config.NewDatabase); err != nil { return err } if err := container.Provide(func() *gin.Engine { return gin.Default() }); err != nil { return err } return nil }
package api // Skel 用于示例 type Skel struct { ID int64 `json:"id" bson:"_id"` } // NewSkel 生成skel对象 func NewSkel() *Skel { return &Skel{} }
package datastore import ( "testing" "github.com/google/uuid" "github.com/RicardoCampos/goauth/oauth2" "github.com/stretchr/testify/assert" ) func getToken() oauth2.ReferenceToken { expiry := oauth2.GetNowInEpochTime() + 300000 token, _ := oauth2.NewReferenceToken(uuid.New().String(), "myclient", expiry, "an_access_token") return token } func TestAddTokenWithValidToken(t *testing.T) { // Act repository := NewInMemoryReferenceTokenRepository() token := getToken() // Assert err := repository.AddToken(token) assert.Nil(t, err, "Should not return an error response") } func TestAddTokenWithInvalidToken(t *testing.T) { // Act repository := NewInMemoryReferenceTokenRepository() // Assert err := repository.AddToken(nil) assert.NotNil(t, err, "Should return an error response") } func TestGetTokenWithValidTokenID(t *testing.T) { // Act repository := NewInMemoryReferenceTokenRepository() token := getToken() tokenID := token.TokenID() repository.AddToken(token) // Assert result, ok, err := repository.GetToken(tokenID) assert.True(t, ok, "It should return ok") assert.Nil(t, err, "Should not return an error response") assert.NotNil(t, result, "Should return reference token") assert.Equal(t, tokenID, result.TokenID()) } func TestGetTokenWNotInRepository(t *testing.T) { // Act repository := NewInMemoryReferenceTokenRepository() token := getToken() repository.AddToken(token) // Assert result, ok, err := repository.GetToken("foobar") assert.False(t, ok, "It should return not ok") assert.NotNil(t, err, "Should return an error response") assert.Nil(t, result, "Should not return an item") } func TestGetTokenWithInvalidTokenID(t *testing.T) { // Act repository := NewInMemoryReferenceTokenRepository() token := getToken() repository.AddToken(token) // Assert result, ok, err := repository.GetToken("") assert.False(t, ok, "It should return not ok") assert.NotNil(t, err, "Should return an error response") assert.Nil(t, result, "Should not return a token") }
package main import ( "fmt" "strings" shared "github.com/corymurphy/adventofcode/shared" ) func main() { input := shared.ReadInput("input") part1 := part1(input) part2 := part2(input) fmt.Printf("\nPart 1 answer: %d\n\n", part1) fmt.Printf("\nPart 2 answer: %d\n\n", part2) } func NewGrid(size int) [][]int { // grid := [size][size]int{} grid := make([][]int, size) for i := len(grid) - 1; i >= 0; i-- { row := make([]int, size) if i == 0 { row[0] = 0 } grid[i] = row } // for i := 0; i <= size; i++ { // row := []int{} // for j := 0; j <= size; j++ { // if j == 0 && i == 0 { // row = append(row, 1) // } else { // row = append(row, 0) // } // } // grid = append(grid, row) // } return grid } func PrintGrid(grid [][]int) { for i := 0; i < len(grid); i++ { fmt.Println("") for j := 0; j < len(grid[i]); j++ { fmt.Printf("%d", grid[i][j]) } // fmt.Println("") } fmt.Println("") fmt.Println("") } func PrintGridBackwards(grid [][]int) { // fmt.Println("") for i := len(grid) - 1; i >= 0; i-- { fmt.Println("") for j := 0; j < len(grid[i]); j++ { if grid[i][j] >= 1 { fmt.Printf("%s", "#") } else if grid[i][j] <= -1 { fmt.Printf("%s", "H") } else { fmt.Printf("%s", ".") } // fmt.Printf("%d", grid[i][j]) } // fmt.Println("") } fmt.Println("") fmt.Println("") } func GetBridgeSize(input []string) int { size := 0 for _, row := range input { distance := shared.ToInt(strings.Split(row, " ")[1]) if distance > size { size = distance } } return size } func setStartVisited(grid *[][]int, coor *Coordinates) { (*grid)[coor.Y][coor.X] = 1 } func CountVisited(grid [][]int) int { visited := 0 for _, col := range grid { for _, loc := range col { if loc > 0 { visited++ } } } return visited } func SimulateBridge(input []string, grid [][]int, tailCount int) [][]int { rope := NewCoordinatesList(tailCount + 1) setStartVisited(&grid, rope[0]) for _, item := range input { instruction := NewInstruction(item) Move(&grid, instruction, &rope) } return grid } func part1(input []string) int { grid := NewGrid(GetBridgeSize(input) * 20) grid = SimulateBridge(input, grid, 1) return CountVisited(grid) } func part2(input []string) int { grid := NewGrid(GetBridgeSize(input) * 20) grid = SimulateBridge(input, grid, 9) return CountVisited(grid) }
package compute import ( "encoding/json" "encoding/xml" "fmt" "net/http" "net/url" ) // ServerAntiAffinityRule represents an anti-affinity rule between 2 servers. type ServerAntiAffinityRule struct { // The anti-affinity rule Id. ID string `json:"id"` // The 2 servers that the rule relates to. // // Only ever contains exactly 2 servers. // // This is only declared as an array because that's what the CloudControl API returns. Servers []ServerSummary `json:"serverSummary"` // The network domain's current state. State string `json:"state"` // The network domain's creation timestamp. CreateTime string `json:"created"` // The Id of the data centre in which the network domain is located. DatacenterID string `json:"datacenterId"` } // GetID returns the server anti-affinity rule's Id. func (rule *ServerAntiAffinityRule) GetID() string { return rule.ID } // GetResourceType returns the server anti-affinity rule's resource type. func (rule *ServerAntiAffinityRule) GetResourceType() ResourceType { return ResourceTypeServerAntiAffinityRule } // GetName returns the server anti-affinity rule's name. func (rule *ServerAntiAffinityRule) GetName() string { return rule.ID } // GetState returns the server anti-affinity rule's current state. func (rule *ServerAntiAffinityRule) GetState() string { return rule.State } // IsDeleted determines whether the server anti-affinity rule has been deleted (is nil). func (rule *ServerAntiAffinityRule) IsDeleted() bool { return rule == nil } // ToEntityReference creates an EntityReference representing the CustomerImage. func (rule *ServerAntiAffinityRule) ToEntityReference() EntityReference { name := "" if len(rule.Servers) == 2 { name = fmt.Sprintf("%s/%s", rule.Servers[0].Name, rule.Servers[1].Name, ) } return EntityReference{ ID: rule.ID, Name: name, } } var _ Resource = &ServerAntiAffinityRule{} // ServerAntiAffinityRules represents a page of ServerAntiAffinityRule results. type ServerAntiAffinityRules struct { Items []ServerAntiAffinityRule `json:"antiAffinityRule"` PagedResult } // Request body when creating a new anti-affinity rule. type newServerAntiAffinityRule struct { XMLName xml.Name `xml:"http://oec.api.opsource.net/schemas/server NewAntiAffinityRule"` // The Ids of the servers to which the rule relates. // Each rule can only apply to exactly 2 servers; we only use an array here because CloudControl (bizarrely) uses the same element name for both server Ids. ServerIds []string `xml:"serverId"` } // GetServerAntiAffinityRule retrieves the specified server anti-affinity rule (in the specified network domain). func (client *Client) GetServerAntiAffinityRule(ruleID string, networkDomainID string) (rule *ServerAntiAffinityRule, err error) { organizationID, err := client.getOrganizationID() if err != nil { return nil, err } requestURI := fmt.Sprintf("%s/server/antiAffinityRule?id=%s&networkDomainId=%s", url.QueryEscape(organizationID), url.QueryEscape(ruleID), url.QueryEscape(networkDomainID), ) request, err := client.newRequestV22(requestURI, http.MethodGet, nil) if err != nil { return nil, err } responseBody, statusCode, err := client.executeRequest(request) if err != nil { return nil, err } if statusCode != http.StatusOK { var apiResponse *APIResponseV2 apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode) if err != nil { return nil, err } return nil, apiResponse.ToError("Request failed with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message) } rules := &ServerAntiAffinityRules{} err = json.Unmarshal(responseBody, rules) if err != nil { return nil, err } if rules.IsEmpty() { return nil, nil // Rule not found } return &rules.Items[0], nil } // ListServerAntiAffinityRules lists the server anti-affinity rules in a network domain. func (client *Client) ListServerAntiAffinityRules(networkDomainID string, paging *Paging) (rules *ServerAntiAffinityRules, err error) { organizationID, err := client.getOrganizationID() if err != nil { return nil, err } requestURI := fmt.Sprintf("%s/server/antiAffinityRule?networkDomainId=%s&%s", url.QueryEscape(organizationID), url.QueryEscape(networkDomainID), paging.EnsurePaging().toQueryParameters(), ) request, err := client.newRequestV22(requestURI, http.MethodGet, nil) if err != nil { return nil, err } responseBody, statusCode, err := client.executeRequest(request) if err != nil { return nil, err } if statusCode != http.StatusOK { var apiResponse *APIResponseV2 apiResponse, err = readAPIResponseAsJSON(responseBody, statusCode) if err != nil { return nil, err } return nil, apiResponse.ToError("Request failed with status code %d (%s): %s", statusCode, apiResponse.ResponseCode, apiResponse.Message) } rules = &ServerAntiAffinityRules{} err = json.Unmarshal(responseBody, rules) if err != nil { return nil, err } return rules, nil } // CreateServerAntiAffinityRule creates an anti-affinity rule for the 2 specified servers. // server1Id is the Id of the first server. // server2Id is the Id of the second server. // // Returns the Id of the new anti-affinity rule. func (client *Client) CreateServerAntiAffinityRule(server1Id string, server2Id string) (ruleID string, err error) { organizationID, err := client.getOrganizationID() if err != nil { return "", err } requestURI := fmt.Sprintf("%s/antiAffinityRule", url.QueryEscape(organizationID), ) request, err := client.newRequestV1(requestURI, http.MethodPost, &newServerAntiAffinityRule{ ServerIds: []string{ server1Id, server2Id, }, }) if err != nil { return "", err } responseBody, statusCode, err := client.executeRequest(request) if err != nil { return "", err } if statusCode != http.StatusOK { var apiResponse *APIResponseV1 apiResponse, err = readAPIResponseV1(responseBody, statusCode) if err != nil { return "", err } return "", apiResponse.ToError("Request failed with status code %d (%s): %s", statusCode, apiResponse.ResultCode, apiResponse.Message) } apiResponse := &APIResponseV1{} err = xml.Unmarshal(responseBody, apiResponse) if err != nil { return "", err } newRuleID := apiResponse.GetAdditionalInformation("antiaffinityrule.id") if newRuleID == nil { return "", apiResponse.ToError("Invalid response (missing 'antiaffinityrule.id')") } return *newRuleID, nil } // DeleteServerAntiAffinityRule deletes the specified server anti-affinity rule. func (client *Client) DeleteServerAntiAffinityRule(ruleID string, networkDomainID string) error { organizationID, err := client.getOrganizationID() if err != nil { return err } requestURI := fmt.Sprintf("%s/antiAffinityRule/%s?delete", url.QueryEscape(organizationID), url.QueryEscape(ruleID), ) request, err := client.newRequestV1(requestURI, http.MethodGet, nil) if err != nil { return err } responseBody, statusCode, err := client.executeRequest(request) if err != nil { return err } apiResponse, err := readAPIResponseV1(responseBody, statusCode) if err != nil { return err } if apiResponse.Result != ResultSuccess { return apiResponse.ToError("Request failed with status code %d (%s): %s", statusCode, apiResponse.ResultCode, apiResponse.Message) } return nil }
package db import ( // mysql 驱动 _ "github.com/go-sql-driver/mysql" //gorm 驱动 _ "github.com/jinzhu/gorm/dialects/mysql" "github.com/golang/glog" "github.com/jinzhu/gorm" "sync" "time" "sub_account_service/app_server_v2/model" ) //DbClient 数据库客户端 var DbClient *Db //Db db对象 type Db struct { addr string // the addr of db server Lock sync.RWMutex // lock Client *gorm.DB // mysql client } //InitDb 初始化db func InitDb(addr string) { glog.Infoln("starting db", addr) mydb := &Db{} mydb.addr = addr db, err := gorm.Open("mysql", addr) if err != nil { glog.Errorln("db initing fail", err) return } err = db.DB().Ping() if err != nil { glog.Errorln("db ping fail", err) return } glog.Infoln("InitDB***************connecting db success!") mydb.Client = db DbClient = mydb db.DB().SetMaxIdleConns(10) db.DB().SetMaxOpenConns(100) db.LogMode(false) mydb.AutoCreate() go timer1(addr) } //CreateTable 创建表 func (DB *Db) CreateTable(models interface{}) { DB.Client.CreateTable(models) } func timer1(addr string) { timer1 := time.NewTicker(5 * time.Second) for { select { case <-timer1.C: err := DbClient.Client.DB().Ping() if err != nil { glog.Errorln("mysql connect fail,err:", err) InitDb(addr) } } } } //AutoCreate 自动创建 func (DB *Db) AutoCreate() { glog.Infoln("init AutoMigrate mysql db tables") if mydb := DbClient.Client.AutoMigrate(&Schedule{}); mydb.Error != nil { glog.Errorln("AutoCreate************schedule:", mydb.Error) } //DbClient.Client.AutoMigrate(&Reflash{}) if mydb := DbClient.Client.AutoMigrate(&AppOrder{}); mydb.Error != nil { glog.Errorln("AutoCreate*************appOrder:", mydb, mydb.Error) } if mydb := DbClient.Client.AutoMigrate(&NumberOrder{}); mydb.Error != nil { glog.Errorln("AutoCreate*************NumOrder:", mydb.Error) } if mydb := DbClient.Client.AutoMigrate(&model.Rs{}); mydb.Error != nil { glog.Errorln("AutoCreate****************rs:", mydb.Error) } if mydb := DbClient.Client.AutoMigrate(&Reset{}); mydb.Error != nil { glog.Errorln("AutoCreate******************reset:", mydb.Error) } if mydb := DbClient.Client.AutoMigrate(&model.PaiBan{}); mydb.Error != nil { glog.Errorln("AutoCreate*********************paiban:", mydb.Error) } } //Reset 重置 type Reset struct { gorm.Model SubCode string UserAddr string LastTime time.Time Job string ResetWay int64 //0 不重置, 1每天重置, 2每月1号重置 ResetTime int64 Publisher string }
package main import ( "github.com/beego/beego/v2/server/web" ) func main() { web.Router("/abort", &AbortController{}) web.Run() } type AbortController struct { web.Controller } func (a *AbortController) Get() { a.Abort("401") // None of the following user := a.GetString("user") if user != "" { a.Ctx.WriteString("hello world") return } }
package main import ( "encoding/json" "er" "net/http" "strconv" "sutil" "github.com/gorilla/mux" ) type conf struct { Port int } func main() { _log.Inf("Init SGAS") router := mux.NewRouter() c := conf{} if e := loadConf(&c); e != nil && (e.Code()&er.E_IMPORTANCE) >= er.IMPT_UNRECOVERABLE { _log.Inf("Failed to start SGAS") return } epLogin := "/login" epLogout := "/logout" epVclient := "/vclient" router.HandleFunc(epLogin, loginRest).Methods("POST") router.HandleFunc(epLogout, logoutRest).Methods("POST") router.HandleFunc(epVclient, vclientRest).Methods("POST") _log.Ntf("SGAS start listening Port %v", c.Port) _log.Ntf("SGAS endpoints: %v %v %v", epLogin, epLogout, epVclient) err := http.ListenAndServe(":"+strconv.Itoa(c.Port), router) if err != nil { er.Throw(_E_START_WEB_SERVER, er.EInfo{ "details": "failed to start web", "fail info": err}).To(_log) } } func loginRest(w http.ResponseWriter, r *http.Request) { _log.Trc("LoginRest() enter") defer _log.Trc("LoginRest() leave") sutil.EnableCors(&w) queries := r.URL.Query() un, _ := queries["username"] pw, _ := queries["password"] _log.Inf("Login request: Username %v", un) if len(un) != 1 || len(pw) != 1 { info := "Username or password is missing" w.WriteHeader(http.StatusBadRequest) w.Write([]byte(info)) _log.Ntf(info) return } cid, token, err := Login(un[0], pw[0]) if err != nil { info := "Incorrect username or password" w.WriteHeader(http.StatusBadRequest) w.Write([]byte(info)) _log.Ntf(info) return } w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(strconv.Itoa(cid) + " " + token) _log.Inf("Login successful, cid %v token %v", cid, token) return } func logoutRest(w http.ResponseWriter, r *http.Request) { _log.Trc("LogoutRest() enter") defer _log.Trc("LogoutRest() leave") sutil.EnableCors(&w) queries := r.URL.Query() un, _ := queries["username"] cids, _ := queries["client"] token, _ := queries["token"] _log.Inf("Login request: Username %v, client %v, token %v", un, cids, token) if len(un) != 1 || len(cids) != 1 || len(token) != 1 { info := "Username, client ID or token is missing" w.WriteHeader(http.StatusBadRequest) w.Write([]byte(info)) _log.Ntf(info) return } cid, err := strconv.Atoi(cids[0]) if err != nil { info := "client ID is not a valid integer" w.WriteHeader(http.StatusBadRequest) w.Write([]byte(info)) _log.Ntf(info) return } e := Logout(un[0], cid, token[0]) if e != nil { info := "Incorrect client information" w.WriteHeader(http.StatusBadRequest) w.Write([]byte(info)) _log.Ntf(info) return } w.WriteHeader(http.StatusOK) w.Write([]byte("success")) _log.Inf("Logout successful") return } func vclientRest(w http.ResponseWriter, r *http.Request) { _log.Trc("vclientRest() enter") defer _log.Trc("vclientRest() leave") sutil.EnableCors(&w) queries := r.URL.Query() cids, _ := queries["client"] token, _ := queries["token"] _log.Inf("Validate client request: client %v, token %v", cids, token) if len(cids) != 1 || len(token) != 1 { info := "Client ID or token is missing" w.WriteHeader(http.StatusBadRequest) w.Write([]byte(info)) _log.Ntf(info) return } cid, err := strconv.Atoi(cids[0]) if err != nil { info := "Client ID is not a valid integer" w.WriteHeader(http.StatusBadRequest) w.Write([]byte(info)) _log.Ntf(info) return } username, valid := ValidateToken(cid, token[0]) w.WriteHeader(http.StatusOK) w.Write([]byte(username)) _log.Inf("Validate client ID finish, validity %v", valid) return } func loadConf(c *conf) *er.Err { cfgf := "sgas.conf" _log.Inf("Load settings from %v", cfgf) if e := sutil.LoadConfFile(cfgf, c); e != nil { return er.Throw(_E_LOAD_SETTINGS_FAIL, er.EInfo{ "details": "fail to load settings", "info": e, }).To(_log) } return nil }
// This file was generated for SObject KnowledgeableUser, API Version v43.0 at 2018-07-30 03:47:26.0731645 -0400 EDT m=+12.416236830 package sobjects import ( "fmt" "strings" ) type KnowledgeableUser struct { BaseSObject Id string `force:",omitempty"` RawRank int `force:",omitempty"` SystemModstamp string `force:",omitempty"` TopicId string `force:",omitempty"` UserId string `force:",omitempty"` } func (t *KnowledgeableUser) ApiName() string { return "KnowledgeableUser" } func (t *KnowledgeableUser) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("KnowledgeableUser #%s - %s\n", t.Id, t.Name)) builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id)) builder.WriteString(fmt.Sprintf("\tRawRank: %v\n", t.RawRank)) builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp)) builder.WriteString(fmt.Sprintf("\tTopicId: %v\n", t.TopicId)) builder.WriteString(fmt.Sprintf("\tUserId: %v\n", t.UserId)) return builder.String() } type KnowledgeableUserQueryResponse struct { BaseQuery Records []KnowledgeableUser `json:"Records" force:"records"` }
///////////////////////////////////////////////////////////////////// // arataca89@gmail.com // 20210417 // // func TrimLeftFunc(s string, f func(rune) bool) string // // Retorna s removendo os caracteres especificados conforme a função // f() do início. // package main import ( "fmt" "strings" "unicode" ) func main() { fmt.Print(strings.TrimLeftFunc("¡¡¡Hello, Gophers!!!", func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsNumber(r) })) } // Saída: // Hello, Gophers!!! //