text
stringlengths
11
4.05M
package main import ( "fmt" "io/ioutil" "net/http" "strconv" "time" ) func main() { http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { var body []byte bodyStr := "" sleep := 1 if r.Body != nil { body, _ = ioutil.ReadAll(r.Body) if len(body) > 0 { bodyStr = string(body) } } if t := r.URL.Query().Get("sleep"); t != "" { sleep, _ = strconv.Atoi(t) } if bodyStr == "" { fmt.Printf("Empty body\n") return } topic := "" num := 0 _, err := fmt.Sscanf(bodyStr, "%s %d", &topic, &num) if err != nil { fmt.Printf("Error parsing: %q %s\n", bodyStr, err) return } // Real work goes here time.Sleep(time.Duration(sleep) * time.Second) // Report status url := "http://knkafkaconsole.default.svc.cluster.local" url += fmt.Sprintf("?done=%s", topic) for tries := 0; ; tries++ { res, err := http.Get(url) if err == nil && res != nil && res.StatusCode/100 == 2 { break } // fmt.Printf("Error setting stats: %s %#v\n", err, res) if tries == 3 { fmt.Printf("Error: Gave up trying to send status: %s\n", err) w.WriteHeader(500) return } time.Sleep(time.Duration(tries) * time.Second) } }) fmt.Print("Listening on port 8080\n") http.ListenAndServe(":8080", nil) }
package store import ( "encoding/json" "log" "github.com/go-pg/pg" "github.com/google/uuid" "github.com/gracew/widget/graph/model" "github.com/pkg/errors" ) type Store struct { DB *pg.DB } func (s Store) NewAPI(input model.DefineAPIInput) (*model.API, error) { // janky way of converting from DefineAPIInput -> API bytes, err := json.Marshal(input) if err != nil { return nil, errors.Wrapf(err, "could not marshal input to json") } var api model.API err = json.Unmarshal(bytes, &api) if err != nil { return nil, errors.Wrapf(err, "could not unmarshal bytes as json") } api.ID = uuid.New().String() err = s.DB.Insert(&api) if err != nil { return nil, err } return &api, nil } func (s Store) UpdateAPI(input model.UpdateAPIInput) (*model.API, error) { // janky way of converting from UpdateAPIInput -> API bytes, err := json.Marshal(input) if err != nil { return nil, errors.Wrapf(err, "could not marshal input to json") } var api model.API err = json.Unmarshal(bytes, &api) if err != nil { return nil, errors.Wrapf(err, "could not unmarshal bytes as json") } m := s.DB.Model(&api) if input.Fields != nil { m.Column("fields") } if input.Operations != nil { m.Column("operations") } // TODO(gracew): figure out better way to not clobber name _, err = m.WherePK().Update() if err != nil { return nil, err } return &api, nil } // API fetches an API by ID. func (s Store) API(id string) (*model.API, error) { api := &model.API{ID: id} err := s.DB.Select(api) if err != nil { return nil, errors.Wrapf(err, "failed to fetch API %s", id) } return api, nil } // Apis fetches all APIs. func (s Store) Apis() ([]model.API, error) { var apis []model.API err := s.DB.Model(&apis).Select() if err != nil { return nil, errors.Wrap(err, "failed to fetch APIs") } return apis, nil } func (s Store) DeleteApi(id string) error { api := &model.API{ID: id} err := s.DB.Delete(api) if err != nil { return errors.Wrapf(err, "failed to delete API %s", id) } return nil } func (s Store) SaveAuth(input model.AuthAPIInput) error { var update = make(map[string]*model.AuthPolicy) for _, updateInput := range input.Update { update[updateInput.ActionName] = convertAuthPolicyInput(updateInput.Auth) } auth := &model.Auth{ APIID: input.APIID, Read: convertAuthPolicyInput(input.Read), Update: update, Delete: convertAuthPolicyInput(input.Delete), } _, err := s.DB.Model(auth).OnConflict("(apiid) DO UPDATE").Insert() return err } func convertAuthPolicyInput(input *model.AuthPolicyInput) *model.AuthPolicy { return &model.AuthPolicy{Type: input.Type, UserAttribute: input.UserAttribute, ObjectAttribute: input.ObjectAttribute} } // Auth fetches auth for the specified API. func (s Store) Auth(apiID string) (*model.Auth, error) { auth := &model.Auth{APIID: apiID} err := s.DB.Select(auth) if err != nil { if errors.Is(err, pg.ErrNoRows) { return nil, nil } return nil, errors.Wrapf(err, "failed to fetch auth for API %s", apiID) } return auth, nil } func (s Store) SaveCustomLogic(input model.SaveCustomLogicInput) error { var update = make(map[string]*model.CustomLogic) for _, updateInput := range input.Update { update[updateInput.ActionName] = convertCustomLogicInput(updateInput.CustomLogic) } customLogic := &model.AllCustomLogic{ APIID: input.APIID, Create: convertCustomLogicInput(input.Create), Update: update, Delete: convertCustomLogicInput(input.Delete), } _, err := s.DB.Model(customLogic).OnConflict("(apiid) DO UPDATE").Insert() return err } func convertCustomLogicInput(input *model.CustomLogicInput) *model.CustomLogic { return &model.CustomLogic{Language: input.Language, Before: input.Before, After: input.After} } func (s Store) CustomLogic(apiID string) (*model.AllCustomLogic, error) { customLogic := &model.AllCustomLogic{APIID: apiID} err := s.DB.Select(customLogic) if err != nil { if errors.Is(err, pg.ErrNoRows) { return nil, nil } return nil, errors.Wrapf(err, "failed to fetch custom logic for API %s", apiID) } return customLogic, nil } func (s Store) NewDeploy(deploy *model.Deploy) error { err := s.DB.Insert(deploy) if err != nil { return errors.Wrapf(err, "could not save deploy metadata for api %s", deploy.APIID) } return nil } func (s Store) DeleteDeploy(id string) error { err := s.DB.Delete(&model.Deploy{ID: id}) if err != nil { return errors.Wrapf(err, "could not delete deploy %s", id) } return nil } func (s Store) Deploys(apiID string) ([]model.Deploy, error) { var models []model.Deploy err := s.DB.Model(&models).WhereIn("apiid IN (?)", []string{apiID}).Select() if err != nil { return nil, errors.Wrapf(err, "failed to fetch deploys for API %s", apiID) } return models, nil } func (s Store) SaveDeployStepStatus(deployID string, step model.DeployStep, status model.DeployStatus) { stepStatus := &model.DeployStepStatus{ DeployID: deployID, Step: step, Status: status, } _, err := s.DB.Model(stepStatus).OnConflict("(deploy_id, step) DO UPDATE").Insert() if err != nil { log.Printf("failed to record status for deploy %s, step %s, status %s: %v", deployID, step, status, err) } } func (s Store) DeployStatus(deployID string) ([]model.DeployStepStatus, error) { var steps []model.DeployStepStatus err := s.DB.Model(&steps).WhereIn("deploy_id IN (?)", []string{deployID}).Select() if err != nil { return nil, errors.Wrapf(err, "failed to fetch step statuses for deploy %s", deployID) } return steps, nil }
package utils import ( "github.com/stretchr/testify/assert" "testing" ) func TestGenerateSha256(t *testing.T) { ast := assert.New(t) ast.Equal("2dce505d96a53c5768052ee90f3df2055657518dad489160df9913f66042e160", GenerateSha256("mysecret", "data"), ) }
package pkg import ( "net/http" "path/filepath" "regexp" "github.com/pkg/errors" ) type Headers []string type File struct { RelPath string `yaml:"path"` AbsPath string DirPath string Content string Links Links `yaml:"links"` Headers Headers Status bool Config *FileConfig `yaml:"config"` Stats *FileStats parser *Parser valid *Validator } func NewFile(filePath string, fileLinks Links, config FileConfig) (*File, error) { if match, _ := regexp.MatchString(`.md$`, filePath); !match { return nil, errors.New("The specified file isn't a markdown file") } absPath, _ := filepath.Abs(filePath) if err := fileExists(absPath); err != nil { return nil, err } content, err := readMarkdown(absPath) if err != nil { return nil, err } client := http.Client{} waiter := NewWaiter(config.Backoff) return &File{ RelPath: filePath, AbsPath: absPath, DirPath: filepath.Dir(filePath), Content: content, Links: fileLinks, Config: &config, parser: &Parser{}, valid: NewValidator(client, waiter), }, nil } func (f *File) Run() { f.ExtractLinks(). ExtractHeaders(). ValidateLinks(). ExtractStats() } func (f *File) ExtractLinks() *File { externalLinksToIgnore, internalLinksToIgnore := []string{}, []string{} if f.Config != nil { externalLinksToIgnore = f.Config.ExternalLinksToIgnore internalLinksToIgnore = f.Config.InternalLinksToIgnore } content := f.Content if f.Config != nil && f.Config.AllowCodeBlocks != nil && !*f.Config.AllowCodeBlocks { content = removeCodeBlocks(content) } basePath := "" if f.Config != nil { basePath = f.Config.BasePath } f.Links = f.parser. Links(basePath, content, f.DirPath). AppendConfig(f). RemoveIgnoredLinks(externalLinksToIgnore, internalLinksToIgnore). Filter(func(link Link) bool { if f.Config != nil && f.Config.IgnoreInternal != nil && *f.Config.IgnoreInternal && (link.TypeOf == HashInternalLink || link.TypeOf == InternalLink) { return false } if f.Config != nil && f.Config.IgnoreExternal != nil && *f.Config.IgnoreExternal && link.TypeOf == ExternalLink { return false } return true }) return f } func (f *File) ExtractHeaders() *File { f.Headers = f.parser.Headers(f.Content) return f } func (f *File) ValidateLinks() *File { f.Links = f.valid.Links(f.Links, f.Headers) f.Status = f.Links.CheckStatus() return f } func (f *File) ExtractStats() *File { f.Stats = NewFileStats(f) return f } func (f *File) WriteStats() *File { writeStats(f) return f } func (f *File) Summary() *File { summaryOfFile(f) return f }
package main import ( "encoding/json" "fmt" "github.com/liuzl/goutil/rest" ) func main() { jsonStr := `{"n":"13702032331","msg":"hello"}` jsonStr = `{"status":"ok","message":{"key":"20181009063703:4d819b10a0c780eeb2da8d6a2b2c921d","value":"eyJuIjogIjYyODE1MTkxMDM1MTYiLCAibXNnIjogInRoaXMgaXMganVzdCBhIHRlc3QifQ=="}}` var item map[string]string var ret rest.RestMessage err := json.Unmarshal([]byte(jsonStr), &ret) if err != nil { fmt.Println(err) return } else { fmt.Println(ret) } fmt.Println(ret.Message) b, _ := json.Marshal(ret.Message) err = json.Unmarshal(b, &item) fmt.Println(err, item) }
package integration import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "context" "fmt" "github.com/onsi/gomega/gbytes" "github.com/onsi/gomega/gexec" "io" "os/exec" "strings" "testing" "time" ) func TestStore(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Pachyderm Integration Suite") } var portForwardContextCancel context.CancelFunc var _ = BeforeSuite(func() { startMinikube() waitForPods() pachPortForward() }) var _ = AfterSuite(func() { if portForwardContextCancel != nil { portForwardContextCancel() } }) func startMinikube() { cmd := exec.Command("../../../../bin/start-local-pachyderm.sh") session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) Eventually(session, 5*time.Minute).Should(gbytes.Say("done")) } func waitForPods() { getPodsFunc := func() *gexec.Session { cmd := exec.Command("kubectl", "get", "pods") sess, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) sess.Wait() return sess } Eventually(getPodsFunc, 5*time.Minute, 2*time.Second).Should(gbytes.Say("etcd.*1/1")) Eventually(getPodsFunc, 5*time.Minute, 2*time.Second).Should(gbytes.Say("pachd.*1/1")) } func pachPortForward() { var portForwardContext context.Context portForwardContext = context.Background() portForwardContext, portForwardContextCancel = context.WithCancel(portForwardContext) go func() { defer GinkgoRecover() sess, err := gexec.Start(exec.CommandContext(portForwardContext, "pachctl", "port-forward"), GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) io.Copy(GinkgoWriter, sess.Out) select { case <-portForwardContext.Done(): sess.Kill() } }() } func ListRepos() []string { cmd := exec.Command("/bin/bash", "-c", "pachctl list-repo | tail -n +2 | awk '{print $1}'") sess, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) return strings.Split(string(sess.Wait(2*time.Second).Out.Contents()), "\n") } func ListCommits(repoName string) []string { cmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("pachctl list-commit %s | tail -n +2 | awk '{print $1}'", repoName)) sess, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) return strings.Split(string(sess.Wait(2*time.Second).Out.Contents()), "\n") } func GetFile(repoName, commitId, path string) string { cmd := exec.Command("pachctl", "get-file", repoName, commitId, path) sess, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) Expect(err).ToNot(HaveOccurred()) return string(sess.Wait(2 * time.Second).Out.Contents()) }
package main import ( "encoding/json" "net/http" ) func GetStatusEndpoint(w http.ResponseWriter, r *http.Request) { var status Status status.Msg = "Hello World Frankfurt" status.Version = "1.0.1" json.NewEncoder(w).Encode(status) }
package login import ( "github.com/labstack/echo" "main/config" "main/handlers/awss3" "net/http" ) func LoginPage(c echo.Context) error { return c.Render(http.StatusOK, "login.html", "") } func Login(c echo.Context) error { storageProvider := c.FormValue("storage_provider") ID := c.FormValue("id") SecretKey := c.FormValue("secret_key") Region := c.FormValue("region") if storageProvider != "" && ID != "" && SecretKey != "" { switch storageProvider { case "s3": config.Conf.Init(true, awss3.Handler{}, ID, SecretKey, Region) case "gcs": config.Conf.Init(true, awss3.Handler{}, ID, SecretKey, Region) } } return c.Redirect(http.StatusFound, "/list_buckets") } func Logout(c echo.Context) error { config.Conf.DestroyConfig() return c.Redirect(http.StatusFound, "/login") }
package executor import ( "github.com/meshplus/bitxhub-core/agency" "github.com/meshplus/bitxhub-kit/types" "github.com/meshplus/bitxhub-model/pb" ) type SerialExecutor struct { normalTxs []*types.Hash interchainCounter map[string][]uint64 applyTxFunc agency.ApplyTxFunc boltContracts map[string]agency.Contract } func NewSerialExecutor(f1 agency.ApplyTxFunc, f2 agency.RegisterContractFunc) agency.TxsExecutor { return &SerialExecutor{ applyTxFunc: f1, boltContracts: f2(), } } func init() { agency.RegisterExecutorConstructor("serial", NewSerialExecutor) } func (se *SerialExecutor) ApplyTransactions(txs []*pb.Transaction) []*pb.Receipt { se.interchainCounter = make(map[string][]uint64) se.normalTxs = make([]*types.Hash, 0) receipts := make([]*pb.Receipt, 0, len(txs)) for i, tx := range txs { receipts = append(receipts, se.applyTxFunc(i, tx, nil)) } return receipts } func (se *SerialExecutor) GetBoltContracts() map[string]agency.Contract { return se.boltContracts } func (se *SerialExecutor) AddNormalTx(hash *types.Hash) { se.normalTxs = append(se.normalTxs, hash) } func (se *SerialExecutor) GetNormalTxs() []*types.Hash { return se.normalTxs } func (se *SerialExecutor) AddInterchainCounter(to string, index uint64) { se.interchainCounter[to] = append(se.interchainCounter[to], index) } func (se *SerialExecutor) GetInterchainCounter() map[string][]uint64 { return se.interchainCounter }
package api import "github.com/tedsuo/router" const ( GetContainer = "GetContainer" AllocateContainer = "AllocateContainer" InitializeContainer = "InitializeContainer" RunActions = "RunActions" DeleteContainer = "DeleteContainer" ListContainers = "ListContainers" GetRemainingResources = "GetRemainingResources" GetTotalResources = "GetTotalResources" ) var Routes = router.Routes{ {Path: "/containers", Method: "GET", Handler: ListContainers}, {Path: "/containers/:guid", Method: "GET", Handler: GetContainer}, {Path: "/containers/:guid", Method: "POST", Handler: AllocateContainer}, {Path: "/containers/:guid/initialize", Method: "POST", Handler: InitializeContainer}, {Path: "/containers/:guid/run", Method: "POST", Handler: RunActions}, {Path: "/containers/:guid", Method: "DELETE", Handler: DeleteContainer}, {Path: "/resources/remaining", Method: "GET", Handler: GetRemainingResources}, {Path: "/resources/total", Method: "GET", Handler: GetTotalResources}, }
package main import ( "fmt" "io/ioutil" "log" "strings" ) func partOne(data []string) int { var two, three int for _, line := range data { counter := make(map[rune]int) for _, char := range line { counter[char]++ } var twoExist, threeExist bool for char := range counter { switch counter[char] { case 2: twoExist = true case 3: threeExist = true } } if twoExist { two++ } if threeExist { three++ } } return two * three } func partTwo(data []string) string { var common strings.Builder for _, line1 := range data { for _, line := range data { var diff int for i := 0; i < len(line); i++ { if line1[i] != line[i] { diff++ } } if diff == 1 { for i := 0; i < len(line); i++ { if line1[i] == line[i] { common.WriteByte(line1[i]) } } } if common.Len() != 0 { break } } } return common.String() } func main() { file, err := ioutil.ReadFile("input.txt") if err != nil { log.Fatal(err) } data := strings.Split(string(file), "\n") fmt.Println("1)", partOne(data)) fmt.Println("2)", partTwo(data)) }
package handlers //TODO: implement handlers for your task-related resources here
package main import "fmt" func main() { Go := Course{ Name: "curso desde cero", Price: 30.9, IsFree: false, UserIDs: []uint{10, 14, 17}, Classes: map[uint]string{ 1: "Introduccion", 2: "Estructuras", 3: "Maps", }, } // css := Course{Name: "css desde cero", IsFree: true} // js := Course{} // js.Name = "Curso de JavaScript" // js.UserIDs = []uint{13, 19} //fmt.Println(Go.Name) //fmt.Printf("%+v\n", css) //fmt.Printf("%+v\n", js) //ya no es una funcion global como esta: PrintClasses(Go) //es una funcion, sino un metodo de Course Go.PrintClasses() Go.ChangePrice(13.43) //fmt.Println(Go.Price) //PrintTypesStringToInt() //PrintTypesIntToString() //justPrint() //printInput() //PrintBufio() //whichNumberIsBigger() //helloWorldXTimes(5) //semiWhile(10) //semiWhileWithBreak(13) //printArray() //sliceCapacity() //createSlice() //copyArray() //newStruct(1, "inka", "mejia") //inka := new(User) //inka.set_name("sissi") //fmt.Println(inka.nombre) //crearTutor() admin := Admin{"tomas"} fmt.Println(auth(admin)) }
package main import ( "fmt" "log" "path/filepath" "regexp" "github.com/mrled/caryatid/pkg/caryatid" ) // Test whether a string is a valid URI func testValidUri(uri string) bool { matched, err := regexp.MatchString("^[a-zA-Z0-9]+://", uri) if err != nil { matched = false } return matched } func convertLocalPathToUri(path string) (uri string, err error) { abspath, err := filepath.Abs(path) uri = fmt.Sprintf("file://%v", abspath) return } func getManager(catalogUri string) (manager *caryatid.BackendManager, err error) { var uri string if testValidUri(catalogUri) { uri = catalogUri } else { // Handle a special case where the -catalog is a local path, rather than a file:// URI uri, err = convertLocalPathToUri(catalogUri) if err != nil { log.Printf("Error converting catalog path '%v' to URI: %v", catalogUri, err) return } } log.Printf("Using catalog URI of '%v'", uri) backend, err := caryatid.NewBackendFromUri(uri) if err != nil { log.Printf("Error retrieving backend: %v\n", err) return } manager = caryatid.NewBackendManager(uri, &backend) return } func showAction(catalogUri string) (result string, err error) { manager, err := getManager(catalogUri) if err != nil { return "", err } catalog, err := manager.GetCatalog() if err != nil { return "", err } result = fmt.Sprintf("%v\n", catalog) return } func createTestBoxAction(boxName string, providerName string) (err error) { err = caryatid.CreateTestBoxFile(boxName, providerName, true) if err != nil { log.Printf("Error creating a test box file: %v", err) return } else { log.Printf("Box file created at '%v'", boxName) } return } func addAction(boxPath string, boxName string, boxDescription string, boxVersion string, catalogUri string) (err error) { // TODO: Reduce code duplication between here and packer-post-processor-caryatid digestType, digest, provider, err := caryatid.DeriveArtifactInfoFromBoxFile(boxPath) if err != nil { panic(fmt.Sprintf("Could not determine artifact info: %v", err)) } manager, err := getManager(catalogUri) if err != nil { log.Printf("Error getting a BackendManager") return } err = manager.AddBox(boxPath, boxName, boxDescription, boxVersion, provider, digestType, digest) if err != nil { log.Printf("Error adding box metadata to catalog: %v\n", err) return } log.Println("Box successfully added to backend") catalog, err := manager.GetCatalog() if err != nil { log.Printf("Error getting catalog: %v\n", err) return } log.Printf("New catalog is:\n%v\n", catalog) return } func queryAction(catalogUri string, versionQuery string, providerQuery string) (result caryatid.Catalog, err error) { manager, err := getManager(catalogUri) if err != nil { log.Printf("Error getting a BackendManager") return } catalog, err := manager.GetCatalog() if err != nil { log.Printf("Error getting catalog: %v\n", err) return } queryParams := caryatid.CatalogQueryParams{Version: versionQuery, Provider: providerQuery} result, err = catalog.QueryCatalog(queryParams) if err != nil { log.Printf("Error querying catalog: %v\n", err) return } return } func deleteAction(catalogUri string, versionQuery string, providerQuery string) (err error) { manager, err := getManager(catalogUri) if err != nil { log.Printf("Error getting a BackendManager") return } queryParams := caryatid.CatalogQueryParams{Version: versionQuery, Provider: providerQuery} if err = manager.DeleteBox(queryParams); err != nil { return } return }
package shttp import ( "context" "log" "net/http" "os" "sync/atomic" "unsafe" "github.com/Packet-Clearing-House/DNSAuth/libs/utils" ) type HttpServerConfig struct { Addr string `cfg:"addr; required; netaddr"` Acl []string `cfg:"acl; [\"127.0.0.1\", \"::1\"]"` Password string `cfg:"password; """` } var server *http.Server var mux *AuthMux = NewAuthMux("", "") func Handle(pattern string, handler http.Handler) { mux.Handle(pattern, handler) } func Start(config HttpServerConfig) error { if acl, err := utils.ParseACLFromStrings(config.Acl); err != nil { return err } else { mux.ChangeCreds("", config.Password) newServer := http.Server{ Handler: mux, ErrorLog: log.New(os.Stdout, "", log.LstdFlags), } atomic.SwapPointer((*unsafe.Pointer)((unsafe.Pointer)(&server)), unsafe.Pointer(&newServer)) if listener, err := utils.ACLListen("tcp", config.Addr, acl); err != nil { return err } else { go server.Serve(listener) } } return nil } func Reload(config HttpServerConfig) { Stop() Start(config) } func Stop() { if err := server.Shutdown(context.Background()); err != nil { log.Panic(err) } if err := server.Close(); err != nil { log.Panic(err) } }
// This file was generated for SObject AppDefinition, API Version v43.0 at 2018-07-30 03:47:44.624862783 -0400 EDT m=+30.968631246 package sobjects import ( "fmt" "strings" ) type AppDefinition struct { BaseSObject Description string `force:",omitempty"` DeveloperName string `force:",omitempty"` DurableId string `force:",omitempty"` HeaderColor string `force:",omitempty"` Id string `force:",omitempty"` IsLargeFormFactorSupported bool `force:",omitempty"` IsMediumFormFactorSupported bool `force:",omitempty"` IsNavAutoTempTabsDisabled bool `force:",omitempty"` IsNavPersonalizationDisabled bool `force:",omitempty"` IsOverrideOrgTheme bool `force:",omitempty"` IsSmallFormFactorSupported bool `force:",omitempty"` Label string `force:",omitempty"` LogoUrl string `force:",omitempty"` MasterLabel string `force:",omitempty"` NamespacePrefix string `force:",omitempty"` NavType string `force:",omitempty"` UiType string `force:",omitempty"` UtilityBar string `force:",omitempty"` } func (t *AppDefinition) ApiName() string { return "AppDefinition" } func (t *AppDefinition) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("AppDefinition #%s - %s\n", t.Id, t.Name)) builder.WriteString(fmt.Sprintf("\tDescription: %v\n", t.Description)) builder.WriteString(fmt.Sprintf("\tDeveloperName: %v\n", t.DeveloperName)) builder.WriteString(fmt.Sprintf("\tDurableId: %v\n", t.DurableId)) builder.WriteString(fmt.Sprintf("\tHeaderColor: %v\n", t.HeaderColor)) builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id)) builder.WriteString(fmt.Sprintf("\tIsLargeFormFactorSupported: %v\n", t.IsLargeFormFactorSupported)) builder.WriteString(fmt.Sprintf("\tIsMediumFormFactorSupported: %v\n", t.IsMediumFormFactorSupported)) builder.WriteString(fmt.Sprintf("\tIsNavAutoTempTabsDisabled: %v\n", t.IsNavAutoTempTabsDisabled)) builder.WriteString(fmt.Sprintf("\tIsNavPersonalizationDisabled: %v\n", t.IsNavPersonalizationDisabled)) builder.WriteString(fmt.Sprintf("\tIsOverrideOrgTheme: %v\n", t.IsOverrideOrgTheme)) builder.WriteString(fmt.Sprintf("\tIsSmallFormFactorSupported: %v\n", t.IsSmallFormFactorSupported)) builder.WriteString(fmt.Sprintf("\tLabel: %v\n", t.Label)) builder.WriteString(fmt.Sprintf("\tLogoUrl: %v\n", t.LogoUrl)) builder.WriteString(fmt.Sprintf("\tMasterLabel: %v\n", t.MasterLabel)) builder.WriteString(fmt.Sprintf("\tNamespacePrefix: %v\n", t.NamespacePrefix)) builder.WriteString(fmt.Sprintf("\tNavType: %v\n", t.NavType)) builder.WriteString(fmt.Sprintf("\tUiType: %v\n", t.UiType)) builder.WriteString(fmt.Sprintf("\tUtilityBar: %v\n", t.UtilityBar)) return builder.String() } type AppDefinitionQueryResponse struct { BaseQuery Records []AppDefinition `json:"Records" force:"records"` }
package gitlab_test import ( "bytes" "context" "errors" "fmt" "io/ioutil" "net/http" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/kryabinin/go-gitlab" ) func TestClient_GetUsersByIDs(t *testing.T) { t.Run("positive case", func(t *testing.T) { for _, concurrency := range []int{0, 1, 2, 3, 4} { var ( im = map[int]struct{}{ 5: {}, 10: {}, 15: {}, } ids = make([]int, 0, len(im)) ) httpClient := new(gitlab.MockHTTPClient) for id := range im { ids = append(ids, id) httpClient.On("Do", mock.AnythingOfType("*http.Request")). Return(&http.Response{ Body: ioutil.NopCloser(bytes.NewReader([]byte(fmt.Sprintf("{\"id\": %d}", id)))), StatusCode: http.StatusOK, }, nil). Once() } client := gitlab.NewClient( "test_token", gitlab.WithConcurrency(concurrency), gitlab.WithHttpClient(httpClient), ) users, err := client.GetUsersByIDs(context.Background(), ids) assert.NoError(t, err) assert.Equal(t, len(ids), len(users)) for _, user := range users { _, ok := im[user.ID] assert.True(t, ok) } } }) t.Run("error on getting user", func(t *testing.T) { expErr := errors.New("test error") for _, concurrency := range []int{0, 1, 2, 3, 4} { var ( im = map[int]error{ 6: nil, 11: expErr, 16: nil, } ids = make([]int, 0, len(im)) ) httpClient := new(gitlab.MockHTTPClient) for id, err := range im { ids = append(ids, id) httpClient.On("Do", mock.AnythingOfType("*http.Request")). Return(&http.Response{ Body: ioutil.NopCloser(bytes.NewReader([]byte(fmt.Sprintf("{\"id\": %d}", id)))), StatusCode: http.StatusOK, }, err). Once() } client := gitlab.NewClient( "test_token", gitlab.WithConcurrency(concurrency), gitlab.WithHttpClient(httpClient), ) users, err := client.GetUsersByIDs(context.Background(), ids) assert.True(t, errors.Is(err, expErr)) assert.Equal(t, []gitlab.User(nil), users) } }) } func TestClient_GetUserByID(t *testing.T) { t.Run("positive case", func(t *testing.T) { var ( userID = 5 baseUrl = "http://gitlab.test.com/api/v4" ) httpClient := new(gitlab.MockHTTPClient) httpClient.On("Do", mock.AnythingOfType("*http.Request")).Run(func(args mock.Arguments) { req, ok := args.Get(0).(*http.Request) assert.True(t, ok) assert.Equal(t, baseUrl+"/"+fmt.Sprintf("/users/%d", userID), req.URL.String()) }).Return(&http.Response{ Body: ioutil.NopCloser(bytes.NewReader([]byte(fmt.Sprintf("{\"id\": %d}", userID)))), StatusCode: http.StatusOK, }, nil) client := gitlab.NewClient( "test_token", gitlab.WithBaseUrl(baseUrl), gitlab.WithHttpClient(httpClient), ) user, err := client.GetUserByID(context.Background(), userID) assert.NoError(t, err) assert.Equal(t, userID, user.ID) }) t.Run("error on getting user", func(t *testing.T) { expErr := errors.New("test error") httpClient := new(gitlab.MockHTTPClient) httpClient.On("Do", mock.AnythingOfType("*http.Request")).Return(nil, expErr) client := gitlab.NewClient( "test_token", gitlab.WithHttpClient(httpClient), ) user, err := client.GetUserByID(context.Background(), 5) assert.Error(t, err) assert.True(t, errors.Is(err, expErr)) assert.Equal(t, gitlab.User{}, user) }) t.Run("error on unmarshal response", func(t *testing.T) { httpClient := new(gitlab.MockHTTPClient) httpClient.On("Do", mock.AnythingOfType("*http.Request")).Return(&http.Response{ Body: ioutil.NopCloser(bytes.NewReader([]byte("{"))), StatusCode: http.StatusOK, }, nil) client := gitlab.NewClient( "test_token", gitlab.WithHttpClient(httpClient), ) discussion, err := client.GetUserByID(context.Background(), 10) assert.Error(t, err) assert.Equal(t, gitlab.User{}, discussion) }) }
package main import ( "fmt" "main/decoder" "main/encoder" "main/projector" "strings" ) type Hub struct { Projectors map[string]*projector.Device Encoders map[string]*encoder.Device Decoders map[string]*decoder.Device } func (h *Hub) Add(device string, name string, vars map[string]string) { device = strings.ToLower(device) name = strings.ToLower(name) switch device { case "projector": dev := &projector.Device{ Address: vars["address"], Name: name, } go dev.Run() if h.Projectors == nil { h.Projectors = make(map[string]*projector.Device) } h.Projectors[name] = dev case "encoder": dev := &encoder.Device{ Address: vars["address"], Name: vars["name"], Channel: vars["channel"], } go dev.Run() if h.Encoders == nil { h.Encoders = make(map[string]*encoder.Device) } h.Encoders[name] = dev case "decoder": dev := &decoder.Device{ Address: vars["address"], } go dev.Run() if h.Decoders == nil { h.Decoders = make(map[string]*decoder.Device) } h.Decoders[name] = dev default: fmt.Printf("hub.Add: unknown device type %s\n", device) } } func (h *Hub) RunAll() { for _, dev := range h.Projectors { go dev.Run() } for _, dev := range h.Encoders { go dev.Run() } for _, dev := range h.Decoders { go dev.Run() } } func (h *Hub) GetProjector(name string) *projector.Device { return h.Projectors[name] } func (h *Hub) GetEncoder(name string) *encoder.Device { return h.Encoders[name] } func (h *Hub) GetDecoder(name string) *decoder.Device { return h.Decoders[name] }
package printer import ( "crypto/md5" "encoding/hex" "strings" "github.com/davyxu/tabtoy/v2/i18n" "github.com/davyxu/tabtoy/v2/model" ) const combineFileVersion = 4 type binaryPrinter struct { } func (self *binaryPrinter) Run(g *Globals, outputClass int) *Stream { fileStresam := NewStream() fileStresam.WriteString("TT") fileStresam.WriteInt32(combineFileVersion) fileStresam.WriteString(g.BuildID) const md5base64Len = 32 beginPos := fileStresam.Buffer().Len() + 4 fileStresam.WriteString(strings.Repeat("Z", md5base64Len)) dataPos := fileStresam.Buffer().Len() for index, tab := range g.Tables { if !tab.LocalFD.MatchTag(".bin") { log.Infof("%s: %s", i18n.String(i18n.Printer_IgnoredByOutputTag), tab.Name()) continue } if !writeTableBinary(fileStresam, tab, int32(index)) { return nil } } m := md5.New() m.Write([]byte(fileStresam.Buffer().Bytes()[dataPos:])) checksum := hex.EncodeToString(m.Sum(nil)) checkSumData := fileStresam.Buffer().Bytes()[beginPos : beginPos+32] // 回填checksum copy(checkSumData, []byte(checksum)) return fileStresam } func writeTableBinary(tabStream *Stream, tab *model.Table, index int32) bool { // 遍历每一行 for _, r := range tab.Recs { rowStream := NewStream() // 遍历每一列 for _, node := range r.Nodes { if node.SugguestIgnore { continue } // 子节点数量 if node.IsRepeated { rowStream.WriteInt32(int32(len(node.Child))) } // 普通值 if node.Type != model.FieldType_Struct { for _, valueNode := range node.Child { // 写入字段索引 rowStream.WriteInt32(node.Tag()) rowStream.WriteNodeValue(node.Type, valueNode) } } else { // 遍历repeated的结构体 for _, structNode := range node.Child { structStream := NewStream() // 遍历一个结构体的字段 for _, fieldNode := range structNode.Child { if fieldNode.SugguestIgnore { continue } // 写入字段索引 structStream.WriteInt32(fieldNode.Tag()) // 值节点总是在第一个 valueNode := fieldNode.Child[0] structStream.WriteNodeValue(fieldNode.Type, valueNode) } // 真正写到文件中 rowStream.WriteInt32(node.Tag()) rowStream.WriteInt32(int32(structStream.Len())) rowStream.WriteRawBytes(structStream.Buffer().Bytes()) } } } tabStream.WriteInt32(model.MakeTag(int32(model.FieldType_Table), index)) tabStream.WriteInt32(int32(rowStream.Len())) tabStream.WriteRawBytes(rowStream.Buffer().Bytes()) } return true } func init() { RegisterPrinter("bin", &binaryPrinter{}) }
package systemd import ( "os" "syscall" "testing" ) func TestListenFds(t *testing.T) { var testcases = []struct { ListenPid string pid int ListenFds string GetflFlags uintptr GetflErr syscall.Errno SetflFlags uintptr SetflErr syscall.Errno NumFds int Err bool }{ {ListenPid: "", NumFds: 0, Err: false}, {ListenPid: "unparseable pid", NumFds: 0, Err: true}, {ListenPid: "1234", pid: 5678, NumFds: 0, Err: false}, {ListenPid: "1234", pid: 1234, ListenFds: "", NumFds: 0, Err: false}, {ListenPid: "1234", pid: 1234, ListenFds: "unparseable", NumFds: 0, Err: true}, {ListenPid: "1234", pid: 1234, ListenFds: "-1", NumFds: 0, Err: true}, {ListenPid: "1234", pid: 1234, ListenFds: "0", NumFds: 0, Err: false}, {ListenPid: "1234", pid: 1234, ListenFds: "1", GetflErr: 1, NumFds: 0, Err: true}, {ListenPid: "1234", pid: 1234, ListenFds: "1", GetflErr: 0, GetflFlags: syscall.FD_CLOEXEC, NumFds: 1, Err: false}, {ListenPid: "1234", pid: 1234, ListenFds: "1", GetflErr: 0, GetflFlags: 0, SetflFlags: syscall.FD_CLOEXEC, SetflErr: 1, NumFds: 0, Err: true}, {ListenPid: "1234", pid: 1234, ListenFds: "1", GetflErr: 0, GetflFlags: 0, SetflFlags: syscall.FD_CLOEXEC, SetflErr: 0, NumFds: 1, Err: false}, } for _, tc := range testcases { osm = &mock{ {"Getenv", []interface{}{"LISTEN_PID"}, []interface{}{tc.ListenPid}}, {"Getpid", nil, []interface{}{tc.pid}}, {"Getenv", []interface{}{"LISTEN_FDS"}, []interface{}{tc.ListenFds}}, {"Syscall", []interface{}{uintptr(syscall.SYS_FCNTL), uintptr(3), uintptr(syscall.F_GETFL), uintptr(0)}, []interface{}{tc.GetflFlags, uintptr(0), tc.GetflErr}}, {"Syscall", []interface{}{uintptr(syscall.SYS_FCNTL), uintptr(3), uintptr(syscall.F_SETFL), tc.SetflFlags}, []interface{}{uintptr(0), uintptr(0), tc.SetflErr}}, } num_fds, err := listenFds() if num_fds != tc.NumFds { t.Fail() } if tc.Err != (err != nil) { t.Fail() } } } func TestGetFile(t *testing.T) { var testcases = []struct { Num int Fstat syscall.Stat_t FstatErr syscall.Errno Outfd uintptr Err bool }{ {Num: 1234, FstatErr: 1, Err: true}, {Num: 1234, FstatErr: 0, Fstat: syscall.Stat_t{Mode: 0}, Err: true}, {Num: 1234, FstatErr: 0, Fstat: syscall.Stat_t{Mode: syscall.S_IFIFO}, Err: false, Outfd: 1237}, {Num: 1234, FstatErr: 0, Fstat: syscall.Stat_t{Mode: syscall.S_IFREG}, Err: false, Outfd: 1237}, {Num: 1234, FstatErr: 0, Fstat: syscall.Stat_t{Mode: syscall.S_IFCHR}, Err: false, Outfd: 1237}, } for _, tc := range testcases { osm = &mock{ {"Fstat", []interface{}{1237}, []interface{}{tc.Fstat, tc.FstatErr}}, {"NewFile", []interface{}{uintptr(1237), ""}, []interface{}{os.NewFile(1237, "")}}, } f, err := getFile(tc.Num) if (err != nil) != tc.Err { t.Fail() } else if err == nil && (f == nil || f.Fd() != tc.Outfd) { t.Fail() } } }
package env func GetAppName() string { return appname } func GetNodeId() string { return nodeid }
package core type NodeSummary struct { Id string `json:"id"` Name string `json:"name"` Status string `json:"status"` Datacenter string `json:"datacenter"` NodeClass string `json:"node_class"` Version string `json:"version"` Drain bool `json:"drain"` StatusDescription string `json:"status_description"` } type Node struct { Id string `json:"id"` Name string `json:"name"` Attributes map[string]string `json:"attributes"` Meta map[string]string `json:"meta"` Resources *NodeResources `json:"resources"` Datacenter string `json:"datacenter"` NodeClass string `json:"node_class"` Drain bool `json:"drain"` Status string `json:"status"` StatusDescription string `json:"status_description"` HTTPAddr string `json:"http_addr"` TLSEnabled bool `json:"tls_enabled"` Links map[string]string `json:"links"` } type NodeResources struct { CPU *int `json:"cpu"` Memory *int `json:"memory_mb"` Disk *int `json:"disk_mb"` IOPS *int `json:"iops"` }
/* ** description(""). ** copyright('open-im,www.open-im.io'). ** author("fg,Gordon@tuoyun.net"). ** time(2021/5/27 10:31). */ package http import ( "bytes" "encoding/json" "io/ioutil" "net/http" "time" ) func Get(url string) (response []byte, err error) { client := http.Client{Timeout: 5 * time.Second} resp, err := client.Get(url) if err != nil { return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } return body, nil } //application/json; charset=utf-8 func Post(url string, data interface{}, contentType string) (content []byte, err error) { jsonStr, _ := json.Marshal(data) req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) if err != nil { return nil, err } req.Header.Add("content-type", contentType) defer req.Body.Close() client := &http.Client{Timeout: 5 * time.Second} resp, err := client.Do(req) if err != nil { return nil, err } defer resp.Body.Close() result, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err } return result, nil }
package http import ( "github.com/kataras/iris/v12" "report-api/api" ) func LoadRouter(app *iris.Application) { test(app) } func test(app *iris.Application) { app.Get("/", api.HelloWorld) }
// Copyright 2020 Kuei-chun Chen. All rights reserved. package mdb import ( "encoding/json" "fmt" "log" "net/http" "os" "path/filepath" "sort" "time" "github.com/simagix/gox" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" ) // WiredTigerCache stores wiredTiger cache structure type WiredTigerCache struct { databases []Database numPoints int version string } // NewWiredTigerCache returns *WiredTigerCache func NewWiredTigerCache(version string) *WiredTigerCache { wtc := WiredTigerCache{version: version, numPoints: 10} http.HandleFunc("/wt", gox.Cors(wtc.Handler)) http.HandleFunc("/wt/", gox.Cors(wtc.Handler)) return &wtc } // Start starts a web server and a thread to collect caches func (wtc *WiredTigerCache) Start(client *mongo.Client) { var err error var ss ServerStatus if ss, err = GetServerStatus(client); err != nil { panic(ss) } proc := filepath.Base(ss.Process) if proc != "mongod" && proc != "mongod.exe" { fmt.Printf("connected to %v, exiting...\n", ss.Process) os.Exit(0) } for { if err = wtc.GetAllDatabasesStats(client); err != nil { log.Println(err) } time.Sleep(5 * time.Second) } } // GetAllDatabasesStats returns db info func (wtc *WiredTigerCache) GetAllDatabasesStats(client *mongo.Client) error { var err error dbi := NewDatabaseStats(wtc.version) wtc.databases, err = dbi.GetAllDatabasesStats(client, []string{}) return err } // Handler supports resetful calls func (wtc *WiredTigerCache) Handler(w http.ResponseWriter, r *http.Request) { if r.URL.Path[1:] == "wt/data" { wtc.GetWiredTigerCacheData(w, r) } else if r.URL.Path[1:] == "wt" || r.URL.Path[1:] == "wt/" { w.WriteHeader(http.StatusOK) w.Write([]byte(html)) } else { json.NewEncoder(w).Encode(bson.M{"ok": 1, "message": "hello keyhole!"}) } } // GetWiredTigerCacheData gets WT cache data func (wtc *WiredTigerCache) GetWiredTigerCacheData(w http.ResponseWriter, r *http.Request) { topCaches := []ChartDataPoint{} topDataCache := []ChartDataPoint{} topIndexesCache := []ChartDataPoint{} cacheDataSize := int64(0) cacheIndexesSize := int64(0) for _, database := range wtc.databases { for _, collection := range database.Collections { ns := collection.NS // top storage list stats := collection.Stats if stats.WiredTiger != nil { x := toInt64(stats.WiredTiger["cache"].(bson.M)["bytes currently in the cache"]) cacheDataSize += x topCaches = append(topCaches, ChartDataPoint{label: "D:" + ns, value: x}) if len(topDataCache) < wtc.numPoints { topDataCache = append(topDataCache, ChartDataPoint{label: ns, value: x}) } else if int64(x) > topDataCache[wtc.numPoints-1].value { topDataCache[wtc.numPoints-1] = ChartDataPoint{label: ns, value: int64(x)} } sort.Slice(topDataCache, func(i int, j int) bool { return topDataCache[i].value > topDataCache[j].value }) } if stats.IndexDetails != nil { indexDetails := stats.IndexDetails x := int64(0) for _, v := range indexDetails { if v.(bson.M)["cache"] != nil { x += toInt64(v.(bson.M)["cache"].(bson.M)["bytes currently in the cache"]) } } cacheIndexesSize += x topCaches = append(topCaches, ChartDataPoint{label: "I:" + ns, value: x}) if len(topIndexesCache) < wtc.numPoints { topIndexesCache = append(topIndexesCache, ChartDataPoint{label: ns, value: x}) } else if int64(x) > topIndexesCache[wtc.numPoints-1].value { topIndexesCache[wtc.numPoints-1] = ChartDataPoint{label: ns, value: int64(x)} } sort.Slice(topIndexesCache, func(i int, j int) bool { return topIndexesCache[i].value > topIndexesCache[j].value }) } if len(topCaches) > 100 { sort.Slice(topCaches, func(i int, j int) bool { return topCaches[i].value > topCaches[j].value }) topCaches = topCaches[:80] } } } // top cache usages sort.Slice(topCaches, func(i int, j int) bool { return topCaches[i].value > topCaches[j].value }) var unit string unit, topCaches = getTopChartPoints(topCaches, wtc.numPoints) data := [][]interface{}{{"Name Space", unit}} title := fmt.Sprintf("Data and Indexes in WiredTiger Cache (%s)", unit) for _, v := range topCaches { data = append(data, []interface{}{v.label, v.value}) } doc := bson.M{"top_caches": bson.M{"title": title, "data": data}} // cache distr, indexes vs. data points := []ChartDataPoint{} points = append(points, ChartDataPoint{label: "Data", value: cacheDataSize}) points = append(points, ChartDataPoint{label: "Indexes", value: cacheIndexesSize}) unit, points = getTopChartPoints(points, wtc.numPoints) title = fmt.Sprintf("Data vs. Indexes in WiredTiger Cache (%s)", unit) data = [][]interface{}{{"Name Space", unit}} for _, v := range points { data = append(data, []interface{}{v.label, v.value}) } doc["cache_distr"] = bson.M{"title": title, "data": data} // top data cache unit, topDataCache = getTopChartPoints(topDataCache, wtc.numPoints) data = [][]interface{}{{"Name Space", unit}} title = fmt.Sprintf("Data in WiredTiger Cache (%s)", unit) for _, v := range topDataCache { data = append(data, []interface{}{v.label, v.value}) } doc["top_data_cache"] = bson.M{"title": title, "data": data} // top indexes cache unit, topIndexesCache = getTopChartPoints(topIndexesCache, wtc.numPoints) data = [][]interface{}{{"Name Space", unit}} title = fmt.Sprintf("Indexes in WiredTiger Cache (%s)", unit) for _, v := range topIndexesCache { data = append(data, []interface{}{v.label, v.value}) } doc["top_indexes_cache"] = bson.M{"title": title, "data": data} json.NewEncoder(w).Encode(doc) }
// Copyright (c) 2023 Target Brands, Inc. All rights reserved. // // Use of this source code is governed by the LICENSE file in this repository. package vela import ( "encoding/json" "fmt" "net/http" "net/http/httptest" "reflect" "testing" "github.com/go-vela/server/mock/server" "github.com/go-vela/types/library" ) func TestSchedule_Get(t *testing.T) { s := httptest.NewServer(server.FakeHandler()) c, err := NewClient(s.URL, "", nil) if err != nil { t.Errorf("unable to create test client: %v", err) } var schedule library.Schedule err = json.Unmarshal([]byte(server.ScheduleResp), &schedule) if err != nil { t.Errorf("unable to create test schedule: %v", err) } type args struct { org string repo string schedule string } tests := []struct { failure bool name string args args want *library.Schedule wantResp int }{ { failure: false, name: "success with 200", args: args{ org: "github", repo: "octocat", schedule: "foo", }, want: &schedule, wantResp: http.StatusOK, }, { failure: true, name: "failure with 404", args: args{ org: "github", repo: "octocat", schedule: "not-found", }, want: new(library.Schedule), wantResp: http.StatusNotFound, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { got, gotResp, err := c.Schedule.Get(test.args.org, test.args.repo, test.args.schedule) if test.failure { if err == nil { t.Errorf("Get for %s should have returned err", test.name) } return } if err != nil { t.Errorf("Get for %s returned err: %v", test.name, err) } if !reflect.DeepEqual(gotResp.StatusCode, test.wantResp) { t.Errorf("Get for %s is %v, want %v", test.name, gotResp.StatusCode, test.wantResp) } if !reflect.DeepEqual(got, test.want) { t.Errorf("Get for %s is %v, want %v", test.name, got, test.want) } }) } } func TestSchedule_GetAll(t *testing.T) { s := httptest.NewServer(server.FakeHandler()) c, err := NewClient(s.URL, "", nil) if err != nil { t.Errorf("unable to create test client: %v", err) } var schedules []library.Schedule err = json.Unmarshal([]byte(server.SchedulesResp), &schedules) if err != nil { t.Errorf("unable to create test schedules: %v", err) } type args struct { org string repo string opts *ListOptions } tests := []struct { failure bool name string args args want []library.Schedule wantResp int }{ { failure: false, name: "success with 200", args: args{ org: "github", repo: "octocat", opts: nil, }, want: schedules, wantResp: http.StatusOK, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { got, gotResp, err := c.Schedule.GetAll(test.args.org, test.args.repo, test.args.opts) if test.failure { if err == nil { t.Errorf("GetAll for %s should have returned err", test.name) } return } if err != nil { t.Errorf("GetAll for %s returned err: %v", test.name, err) } if !reflect.DeepEqual(gotResp.StatusCode, test.wantResp) { t.Errorf("GetAll for %s is %v, want %v", test.name, gotResp.StatusCode, test.wantResp) } if !reflect.DeepEqual(got, &test.want) { t.Errorf("GetAll for %s is %v, want %v", test.name, *got, test.want) } }) } } func TestSchedule_Add(t *testing.T) { s := httptest.NewServer(server.FakeHandler()) c, err := NewClient(s.URL, "", nil) if err != nil { t.Errorf("unable to create test client: %v", err) } var schedule library.Schedule err = json.Unmarshal([]byte(server.ScheduleResp), &schedule) if err != nil { t.Errorf("unable to create test schedule: %v", err) } type args struct { org string repo string schedule *library.Schedule } tests := []struct { failure bool name string args args want *library.Schedule wantResp int }{ { failure: false, name: "success with 201", args: args{ org: "github", repo: "octocat", schedule: &library.Schedule{ Active: Bool(true), Name: String("foo"), Entry: String("@weekly"), }, }, want: &schedule, wantResp: http.StatusCreated, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { got, gotResp, err := c.Schedule.Add(test.args.org, test.args.repo, test.args.schedule) if test.failure { if err == nil { t.Errorf("Add for %s should have returned err", test.name) } return } if err != nil { t.Errorf("Add for %s returned err: %v", test.name, err) } if !reflect.DeepEqual(gotResp.StatusCode, test.wantResp) { t.Errorf("Add for %s is %v, want %v", test.name, gotResp.StatusCode, test.wantResp) } if !reflect.DeepEqual(got, test.want) { t.Errorf("Add for %s is %v, want %v", test.name, got, test.want) } }) } } func TestSchedule_Update(t *testing.T) { s := httptest.NewServer(server.FakeHandler()) c, err := NewClient(s.URL, "", nil) if err != nil { t.Errorf("unable to create test client: %v", err) } var schedule library.Schedule err = json.Unmarshal([]byte(server.ScheduleResp), &schedule) if err != nil { t.Errorf("unable to create test schedule: %v", err) } type args struct { org string repo string schedule *library.Schedule } tests := []struct { failure bool name string args args want *library.Schedule wantResp int }{ { failure: false, name: "success with 200", args: args{ org: "github", repo: "octocat", schedule: &library.Schedule{ Active: Bool(true), Name: String("foo"), Entry: String("@weekly"), }, }, want: &schedule, wantResp: http.StatusOK, }, { failure: true, name: "failure with 404", args: args{ org: "github", repo: "octocat", schedule: &library.Schedule{ Name: String("not-found"), }, }, want: new(library.Schedule), wantResp: http.StatusNotFound, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { got, gotResp, err := c.Schedule.Update(test.args.org, test.args.repo, test.args.schedule) if test.failure { if err == nil { t.Errorf("Update for %s should have returned err", test.name) } return } if err != nil { t.Errorf("Update for %s returned err: %v", test.name, err) } if !reflect.DeepEqual(gotResp.StatusCode, test.wantResp) { t.Errorf("Update for %s is %v, want %v", test.name, gotResp.StatusCode, test.wantResp) } if !reflect.DeepEqual(got, test.want) { t.Errorf("Update for %s is %v, want %v", test.name, got, test.want) } }) } } func TestSchedule_Remove(t *testing.T) { s := httptest.NewServer(server.FakeHandler()) c, err := NewClient(s.URL, "", nil) if err != nil { t.Errorf("unable to create test client: %v", err) } type args struct { org string repo string schedule string } tests := []struct { failure bool name string args args want *string wantResp int }{ { failure: false, name: "success with 200", args: args{ org: "github", repo: "octocat", schedule: "foo", }, want: String("schedule foo deleted"), wantResp: http.StatusOK, }, { failure: true, name: "failure with 404", args: args{ org: "github", repo: "octocat", schedule: "not-found", }, want: String("Schedule not-found does not exist"), wantResp: http.StatusNotFound, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { got, gotResp, err := c.Schedule.Remove(test.args.org, test.args.repo, test.args.schedule) if test.failure { if err == nil { t.Errorf("Remove for %s should have returned err", test.name) } return } if err != nil { t.Errorf("Remove for %s returned err: %v", test.name, err) } if !reflect.DeepEqual(gotResp.StatusCode, test.wantResp) { t.Errorf("Remove for %s is %v, want %v", test.name, gotResp.StatusCode, test.wantResp) } if !reflect.DeepEqual(got, test.want) { t.Errorf("Remove for %s is %v, want %v", test.name, got, test.want) } }) } } func ExampleScheduleService_Get() { // create a new vela client for interacting with server c, err := NewClient("http://localhost:8080", "", nil) if err != nil { fmt.Println(err) } // set new token in existing client c.Authentication.SetPersonalAccessTokenAuth("token") // get a schedule from a repo in the server schedule, resp, err := c.Schedule.Get("github", "octocat", "nightly") if err != nil { fmt.Println(err) } fmt.Printf("received response code %d, for schedule %+v", resp.StatusCode, schedule) } func ExampleScheduleService_GetAll() { // create a new vela client for interacting with server c, err := NewClient("http://localhost:8080", "", nil) if err != nil { fmt.Println(err) } // set new token in existing client c.Authentication.SetPersonalAccessTokenAuth("token") // get all the schedules from a repo in the server schedules, resp, err := c.Schedule.GetAll("github", "octocat", nil) if err != nil { fmt.Println(err) } fmt.Printf("received response code %d, for schedules %+v", resp.StatusCode, schedules) } func ExampleScheduleService_Add() { // create a new vela client for interacting with server c, err := NewClient("http://localhost:8080", "", nil) if err != nil { fmt.Println(err) } // set new token in existing client c.Authentication.SetPersonalAccessTokenAuth("token") req := library.Schedule{ Active: Bool(true), Name: String("nightly"), Entry: String("0 0 * * *"), } // create the schedule in the server schedule, resp, err := c.Schedule.Add("github", "octocat", &req) if err != nil { fmt.Println(err) } fmt.Printf("received response code %d, for schedule %+v", resp.StatusCode, schedule) } func ExampleScheduleService_Update() { // create a new vela client for interacting with server c, err := NewClient("http://localhost:8080", "", nil) if err != nil { fmt.Println(err) } // set new token in existing client c.Authentication.SetPersonalAccessTokenAuth("token") req := library.Schedule{ Active: Bool(false), Name: String("nightly"), Entry: String("0 0 * * *"), } // update the schedule in the server schedule, resp, err := c.Schedule.Update("github", "octocat", &req) if err != nil { fmt.Println(err) } fmt.Printf("received response code %d, for schedule %+v", resp.StatusCode, schedule) } func ExampleScheduleService_Remove() { // create a new vela client for interacting with server c, err := NewClient("http://localhost:8080", "", nil) if err != nil { fmt.Println(err) } // set new token in existing client c.Authentication.SetPersonalAccessTokenAuth("token") // remove the schedule from the server schedule, resp, err := c.Schedule.Remove("github", "octocat", "nightly") if err != nil { fmt.Println(err) } fmt.Printf("received response code %d, for step %+v", resp.StatusCode, schedule) }
package criteria import ( "github.com/open-policy-agent/opa/ast" "github.com/pomerium/pomerium/pkg/policy/generator" "github.com/pomerium/pomerium/pkg/policy/parser" ) var corsPreflightBody = ast.Body{ ast.MustParseExpr(`input.http.method == "OPTIONS"`), ast.MustParseExpr(`count(object.get(input.http.headers, "Access-Control-Request-Method", [])) > 0`), ast.MustParseExpr(`count(object.get(input.http.headers, "Origin", [])) > 0`), } type corsPreflightCriterion struct { g *Generator } func (corsPreflightCriterion) DataType() CriterionDataType { return generator.CriterionDataTypeUnused } func (corsPreflightCriterion) Name() string { return "cors_preflight" } func (c corsPreflightCriterion) GenerateRule(_ string, _ parser.Value) (*ast.Rule, []*ast.Rule, error) { rule := NewCriterionRule(c.g, c.Name(), ReasonCORSRequest, ReasonNonCORSRequest, corsPreflightBody) return rule, nil, nil } // CORSPreflight returns a Criterion which returns true if the input request is a CORS preflight request. func CORSPreflight(generator *Generator) Criterion { return corsPreflightCriterion{g: generator} } func init() { Register(CORSPreflight) }
package main import ( "fmt" "sync" "sync/atomic" "time" ) const ( ChildCount = 3 ConsumerPeriod = 3 ) func main() { channelNotify() fmt.Println("main process exit!") } // 不同粒度的运行单位之间的调度通常借用系统底层的信号。上下文切换导致粒度对效率的影响是巨大的,用户态的协程是成本较低的。 // 各种语言都可以有,没有的后期自行扩展都行,无非是看runtime(库)的实现程度。golang的协程从语言设计开始作为一类实体,用法以及推广都会比库友好一点。 // chanel作为调度来使用时是一种语法糖级别的封装,但对可读性和可维护性提升巨大 func channelNotify() { var ops uint64 var wg sync.WaitGroup messages := make(chan int, ChildCount*ConsumerPeriod) // 带缓冲的通道,放满消费品 for i := 0; i < ConsumerPeriod*ChildCount; i++ { messages <- i } defer close(messages) // 设置多个消费者 chans := make([]chan bool, ChildCount) for i := 0; i < ChildCount; i++ { chans[i] = make(chan bool) } for i := 0; i < ChildCount; i++ { wg.Add(1) child := chans[i] go func() { ticker := time.NewTicker(1 * time.Microsecond) for _ = range ticker.C { select { case <-child: fmt.Println("child process interrupt...") wg.Done() return default: if len(messages) > 0 { fmt.Printf("fetch message: %d\n", <-messages) atomic.AddUint64(&ops, 1) fmt.Println("ops:", ops) } } } }() } // 多等1秒,消费完所有物品 time.Sleep((1) * time.Second) for i := 0; i < ChildCount; i++ { close(chans[i]) } wg.Wait() fmt.Println("final ops:", ops) }
package main import ( "encoding/json" "fmt" ) type person struct { Name string `json:"name"` Age int `json:"age"` } func main() { //序列化 p1 := person{ Name: "小明", Age: 18, } b, err := json.Marshal(p1) if err != nil { fmt.Printf("marshal failed,err %v", err) return } fmt.Println(string(b)) fmt.Printf("%#v\n", string(b)) //反序列化 str := []byte(`{"name":"小红","age":18}`) var stud person json.Unmarshal(str, &stud) //函数传值必须要带入内存地址 fmt.Println(stud) fmt.Printf("%#v\n", stud) fmt.Println(p1.Name) }
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // Code generated from the elasticsearch-specification DO NOT EDIT. // https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2 package types import ( "bytes" "encoding/json" "errors" "io" "strconv" ) // AggregationBreakdown type. // // https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/_global/search/_types/profile.ts#L23-L36 type AggregationBreakdown struct { BuildAggregation int64 `json:"build_aggregation"` BuildAggregationCount int64 `json:"build_aggregation_count"` BuildLeafCollector int64 `json:"build_leaf_collector"` BuildLeafCollectorCount int64 `json:"build_leaf_collector_count"` Collect int64 `json:"collect"` CollectCount int64 `json:"collect_count"` Initialize int64 `json:"initialize"` InitializeCount int64 `json:"initialize_count"` PostCollection *int64 `json:"post_collection,omitempty"` PostCollectionCount *int64 `json:"post_collection_count,omitempty"` Reduce int64 `json:"reduce"` ReduceCount int64 `json:"reduce_count"` } func (s *AggregationBreakdown) UnmarshalJSON(data []byte) error { dec := json.NewDecoder(bytes.NewReader(data)) for { t, err := dec.Token() if err != nil { if errors.Is(err, io.EOF) { break } return err } switch t { case "build_aggregation": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.BuildAggregation = value case float64: f := int64(v) s.BuildAggregation = f } case "build_aggregation_count": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.BuildAggregationCount = value case float64: f := int64(v) s.BuildAggregationCount = f } case "build_leaf_collector": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.BuildLeafCollector = value case float64: f := int64(v) s.BuildLeafCollector = f } case "build_leaf_collector_count": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.BuildLeafCollectorCount = value case float64: f := int64(v) s.BuildLeafCollectorCount = f } case "collect": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.Collect = value case float64: f := int64(v) s.Collect = f } case "collect_count": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.CollectCount = value case float64: f := int64(v) s.CollectCount = f } case "initialize": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.Initialize = value case float64: f := int64(v) s.Initialize = f } case "initialize_count": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.InitializeCount = value case float64: f := int64(v) s.InitializeCount = f } case "post_collection": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.PostCollection = &value case float64: f := int64(v) s.PostCollection = &f } case "post_collection_count": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.PostCollectionCount = &value case float64: f := int64(v) s.PostCollectionCount = &f } case "reduce": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.Reduce = value case float64: f := int64(v) s.Reduce = f } case "reduce_count": var tmp interface{} dec.Decode(&tmp) switch v := tmp.(type) { case string: value, err := strconv.ParseInt(v, 10, 64) if err != nil { return err } s.ReduceCount = value case float64: f := int64(v) s.ReduceCount = f } } } return nil } // NewAggregationBreakdown returns a AggregationBreakdown. func NewAggregationBreakdown() *AggregationBreakdown { r := &AggregationBreakdown{} return r }
package main import "fmt" // These values map to rlimit constants defined in linux const ( RlimitCPU = iota // CPU time in sec RlimitFsize // Maximum filesize RlimitData // max data size RlimitStack // max stack size RlimitCore // max core file size RlimitRss // max resident set size RlimitNproc // max number of processes RlimitNofile // max number of open files RlimitMemlock // max locked-in-memory address space RlimitAs // address space limit RlimitLocks // maximum file locks held RlimitSigpending // max number of pending signals RlimitMsgqueue // maximum bytes in POSIX mqueues RlimitNice // max nice prio allowed to raise to RlimitRtprio // maximum realtime priority RlimitRttime // timeout for RT tasks in us ) var rlimitMap = map[string]int{ "RLIMIT_CPU": RlimitCPU, "RLIMIT_FSIZE": RlimitFsize, "RLIMIT_DATA": RlimitData, "RLIMIT_STACK": RlimitStack, "RLIMIT_CORE": RlimitCore, "RLIMIT_RSS": RlimitRss, "RLIMIT_NPROC": RlimitNproc, "RLIMIT_NOFILE": RlimitNofile, "RLIMIT_MEMLOCK": RlimitMemlock, "RLIMIT_AS": RlimitAs, "RLIMIT_LOCKS": RlimitLocks, "RLIMIT_SGPENDING": RlimitSigpending, "RLIMIT_MSGQUEUE": RlimitMsgqueue, "RLIMIT_NICE": RlimitNice, "RLIMIT_RTPRIO": RlimitRtprio, "RLIMIT_RTTIME": RlimitRttime, } func strToRlimit(key string) (int, error) { rl, ok := rlimitMap[key] if !ok { return 0, fmt.Errorf("Wrong rlimit value: %s", key) } return rl, nil }
package utils import "fmt" type Printer interface { Printf(format string, args ...interface{}) } type PrinterImpl struct{} func NewPrinter() Printer { return &PrinterImpl{} } func (p *PrinterImpl) Printf(format string, args ...interface{}) { fmt.Printf(format, args...) }
package main import ( "fmt" "github.com/crosbymichael/boss/config" "github.com/hashicorp/consul/api" ) // Consul is a connection to the local consul agent type Consul struct { client *api.Client } // Register sends the provided service registration to the local agent func (c *Consul) Register(id, name, ip string, s config.Service) error { reg := c.registration(id, name, ip, s) if err := c.client.Agent().ServiceRegister(reg); err != nil { return err } return c.client.Agent().EnableServiceMaintenance(id, "created") } // Deregister sends the provided service registration to the local agent func (c *Consul) Deregister(id string) error { return c.client.Agent().ServiceDeregister(id) } // EnableMaintainance places the specific service in maintainace mode func (c *Consul) EnableMaintainance(id, reason string) error { return c.client.Agent().EnableServiceMaintenance(id, reason) } // DisableMaintainance removes the specific service out of maintainace mode func (c *Consul) DisableMaintainance(id string) error { return c.client.Agent().DisableServiceMaintenance(id) } func (c *Consul) registration(id, name, ip string, s config.Service) *api.AgentServiceRegistration { reg := &api.AgentServiceRegistration{ ID: id, Name: name, Tags: s.Labels, Port: s.Port, Address: ip, } for _, c := range s.Checks { var check api.AgentServiceCheck check.Name = name if c.Interval != 0 { check.Interval = fmt.Sprintf("%ds", c.Interval) } if c.Timeout != 0 { check.Timeout = fmt.Sprintf("%ds", c.Timeout) } addr := fmt.Sprintf("%s:%d", ip, s.Port) switch c.Type { case config.HTTP: check.HTTP = addr case config.TCP: check.TCP = addr case config.GRPC: check.GRPC = addr } reg.Checks = append(reg.Checks, &check) } return reg }
package search import ( logger "github.com/joaosoft/logger" "github.com/joaosoft/manager" ) // SearchOption ... type SearchOption func(search *Search) // Reconfigure ... func (search *Search) Reconfigure(options ...SearchOption) { for _, option := range options { option(search) } } // WithConfiguration ... func WithConfiguration(config *SearchConfig) SearchOption { return func(search *Search) { search.config = config } } // WithLogger ... func WithLogger(logger logger.ILogger) SearchOption { return func(search *Search) { search.logger = logger search.isLogExternal = true } } // WithLogLevel ... func WithLogLevel(level logger.Level) SearchOption { return func(search *Search) { search.logger.SetLevel(level) } } // WithManager ... func WithManager(mgr *manager.Manager) SearchOption { return func(search *Search) { search.pm = mgr } } // WithMaxSize ... func WithMaxSize(maxSize int) SearchOption { return func(search *Search) { search.maxSize = maxSize } }
package git import ( "testing" ) func TestRemotePush(t *testing.T) { t.Parallel() repo := createBareTestRepo(t) defer cleanupTestRepo(t, repo) localRepo := createTestRepo(t) defer cleanupTestRepo(t, localRepo) remote, err := localRepo.Remotes.Create("test_push", repo.Path()) checkFatal(t, err) defer remote.Free() seedTestRepo(t, localRepo) err = remote.Push([]string{"refs/heads/master"}, nil) checkFatal(t, err) ref, err := localRepo.References.Lookup("refs/remotes/test_push/master") checkFatal(t, err) defer ref.Free() ref, err = repo.References.Lookup("refs/heads/master") checkFatal(t, err) defer ref.Free() }
package domain_test import ( "encoding/json" "github.com/pivotal-cf-experimental/envoy/domain" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) var _ = Describe("Catalog", func() { var catalog domain.Catalog Context("catalog with all optional fields", func() { var catalog domain.Catalog BeforeEach(func() { catalog = domain.Catalog{ Services: []domain.Service{ { ID: "test-service", Name: "testing", Description: "A testable service", Bindable: true, Tags: []string{"testable", "fast"}, Metadata: &domain.ServiceMetadata{ DisplayName: "Testable Service", ImageURL: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUg", LongDescription: "This service is used to test things", ProviderDisplayName: "My Testing Framework", DocumentationURL: "http://docs.example.com", SupportURL: "http://support.example.com", }, Requires: []string{"syslog_drain"}, Plans: []domain.Plan{ { ID: "test-plan-1", Name: "first", Description: "The first plan", Metadata: &domain.PlanMetadata{ Bullets: []string{ "Is not a real thing", "Runs on unicorns", }, Costs: []domain.Cost{ { Amount: domain.Amount{ "usd": 12.3, }, Unit: "MONTHLY", }, }, DisplayName: "This plan is first", }, }, { ID: "test-plan-2", Name: "second", Description: "The second plan", Free: domain.FreeFalse, }, }, DashboardClient: &domain.DashboardClient{ ID: "client-1", Secret: "super-secret", RedirectURI: "http://dashboard.example.com", }, }, }, } }) It("can be correctly represented in JSON", func() { document, err := json.Marshal(catalog) Expect(err).NotTo(HaveOccurred()) Expect(document).To(MatchJSON([]byte(` { "services": [ { "id": "test-service", "name": "testing", "description": "A testable service", "bindable": true, "plans": [ { "id": "test-plan-1", "name": "first", "description": "The first plan", "metadata": { "bullets": [ "Is not a real thing", "Runs on unicorns" ], "costs": [ { "amount": { "usd": 12.3 }, "unit": "MONTHLY" } ], "displayName": "This plan is first" } }, { "id": "test-plan-2", "name": "second", "description": "The second plan", "free": false } ], "metadata": { "displayName": "Testable Service", "imageUrl": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUg", "longDescription": "This service is used to test things", "providerDisplayName": "My Testing Framework", "documentationUrl": "http://docs.example.com", "supportUrl": "http://support.example.com" }, "tags": [ "testable", "fast" ], "requires": [ "syslog_drain" ], "dashboard_client": { "id": "client-1", "secret": "super-secret", "redirect_uri": "http://dashboard.example.com" } } ] } `))) }) }) Context("catalog with only required fields", func() { It("can be correctly represented in JSON", func() { catalog = domain.Catalog{ Services: []domain.Service{ { ID: "test-service", Name: "my-test", Description: "Testing the catalog", Bindable: false, Plans: []domain.Plan{ { ID: "plan-1", Name: "First plan", Description: "this is the first plan", }, }, }, }, } document, err := json.Marshal(catalog) Expect(err).NotTo(HaveOccurred()) Expect(document).To(MatchJSON([]byte(` { "services": [ { "id": "test-service", "name": "my-test", "description": "Testing the catalog", "bindable": false, "plans": [ { "id": "plan-1", "name": "First plan", "description": "this is the first plan" } ] } ] } `))) }) }) })
package main import ( "fmt" "io/ioutil" "os" "strings" ) func main() { s, err := ioutil.ReadAll(os.Stdin) if err != nil { panic(err) } msg := string(s) r := strings.NewReplacer( "@", "a", "&", "e", "!", "i", "*", "o", "#", "u", ) fmt.Print(r.Replace(msg)) }
package 二叉树 import "github.com/Lxy417165709/LeetCode-Golang/新刷题/util/math_util" func diameterOfBinaryTree(root *TreeNode) int { _, diameter := getHeightAndDiameterOfBinaryTree(root) return diameter } func getHeightAndDiameterOfBinaryTree(root *TreeNode) (int, int) { if root == nil { return 0, -1 } leftHeight, leftDiameter := getHeightAndDiameterOfBinaryTree(root.Left) rightHeight, rightDiameter := getHeightAndDiameterOfBinaryTree(root.Right) return math_util.Max(leftHeight, rightHeight) + 1, math_util.Max(leftHeight+rightHeight, leftDiameter, rightDiameter) }
// Generated source file. // Edit files in 'src' folder package maestro // Copyright (c) 2018, Arm Limited and affiliates. // SPDX-License-Identifier: Apache-2.0 // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. import ( "github.com/edhemphill/relaymq" relaymqClient "github.com/edhemphill/relaymq/client" "github.com/armPelionEdge/maestro/dcsAPI" "github.com/armPelionEdge/hashmap" // thread-safe, fast hashmaps "encoding/json" "github.com/armPelionEdge/maestro/log" "github.com/armPelionEdge/maestro/tasks" "github.com/armPelionEdge/maestro/processes" "github.com/armPelionEdge/maestroSpecs" "github.com/armPelionEdge/maestro/maestroConfig" "time" "unsafe" "strconv" IFDEBUG("fmt") ) const RELAYMQ_DEFAULT_QUEUE_NAME = "upgrades" const RELAYMQ_DEFAULT_PORT = 443 const ( stop_driver = 0 ) type relayMQDriver struct { client *relaymqClient.RelayMQClient clientConfig *relaymqClient.RelayMQClientConfig config *maestroConfig.RelayMQDriverConfig listenerRunning bool controlChan chan int } func NewRelayMQDriver(config *maestroConfig.RelayMQDriverConfig) (driver *relayMQDriver, err error) { clientConfig := new(relaymqClient.RelayMQClientConfig) if(len(config.RootCA)>0) { clientConfig.RootCA = []byte(config.RootCA) } if(len(config.ClientCertificate)>0) { clientConfig.ClientCertificate = []byte(config.ClientCertificate) } if(len(config.ClientKey)>0) { clientConfig.ClientKey = []byte(config.ClientKey) } clientConfig.ServerName = config.ServerName if config.NoValidate == "true" { clientConfig.NoValidate = true } clientConfig.Host = config.Host if len(config.Port) > 0 { port, err2 := strconv.Atoi(config.Port) if err2 != nil { log.MaestroErrorf("RelayMQDriver>> Invalid port number: \"%s\". Using default.", config.Port) clientConfig.Port = RELAYMQ_DEFAULT_PORT } else { clientConfig.Port = port if(port == 0) { clientConfig.Port = RELAYMQ_DEFAULT_PORT } } } else { clientConfig.Port = RELAYMQ_DEFAULT_PORT } if config.EnableLogging == "true" { clientConfig.EnableLogging = true } clientConfig.PrefetchLimit = config.PrefetchLimit if(len(config.QueueName)>0) { clientConfig.QueueName = config.QueueName } else { clientConfig.QueueName = RELAYMQ_DEFAULT_QUEUE_NAME } driver = new(relayMQDriver) driver.clientConfig = clientConfig driver.config = config driver.controlChan = make(chan int) driver.client, err = relaymqClient.New(*driver.clientConfig) return } func (this *relayMQDriver) Connect() (err error) { err = this.client.Connect() if err == nil { if !this.listenerRunning { go this.listener() } } return } func (this *relayMQDriver) Disconnect() { this.client.Disconnect() this.controlChan <- stop_driver } // messages should be kept in these while they are being processed // and after they are succesfully processed // but not if they fail var messagesProcessed *hashmap.HashMap // by Message.ID to *heldMessage var messagesProcessedBySignature *hashmap.HashMap type heldMessage struct { msg *relaymq.Message decodedIn *dcsAPI.GatewayConfigMessage recieved int64 // time as Unix epoch in nanoseconds signature string client *relaymqClient.RelayMQClient } func (this *relayMQDriver) newHeldMessage() (ret *heldMessage) { ret = new(heldMessage) ret.client = this.client return } func init() { messagesProcessed = hashmap.New(10) messagesProcessedBySignature = hashmap.New(10) } type relayMQ_OpAckHandler struct { op string // add_image is first, then start_job (when we use JobManager to handle task launch) task *tasks.MaestroTask // the Task which will - hopefully - be ACKed mqMsg *heldMessage // originating relayMQ message } func transposeDCSToMaestroJob(dcs *dcsAPI.GatewayConfigMessage, job *maestroSpecs.JobDefinitionPayload) { if len(dcs.Jobs)>0 { job.Job = dcs.Jobs[0].Job job.CompositeId = dcs.Jobs[0].CompositeId } else { job.Job = "MISSING_NAME" } if len(dcs.Configs) > 0 { appname := dcs.Configs[0].Name ret, ok := ImageManagerGetInstance().LookupImage(appname) if ok { job.ExecCmd = ret.location // the command to start for now is just the directory of the deviceJS script } } if len(job.ExecCmd) < 1 { log.MaestroErrorf("RelayMQDriver>> Can't start process b/c don't know where image is.") } job.ContainerTemplate = "deviceJS_process" // FIXME FIXME deal with config, composite ID, etc. } func (this *relayMQ_OpAckHandler) SendFinishedAck(task *tasks.MaestroTask) (err error) { DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>>>> GOT SendFinishedAck() call -> %s\n",this.op) log.MaestroInfof("RelayMQDriver - submitted Task finished: %s",task.Id) job := new(maestroSpecs.JobDefinitionPayload) transposeDCSToMaestroJob(this.mqMsg.decodedIn,job) DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>>>> transposed Job to register %+v\n",job) // Register the Job and start it err = processes.RegisterMutableJob(job) if err != nil { log.MaestroErrorf("RelayMQDriver --> Failed to register Job: %s\n",err.Error()) } else { // start the Job err = processes.ValidateJobs() if err == nil { processes.RestartJob(job.GetJobName()) } else { log.MaestroErrorf("RelayMQDriver --> Problem validating jobs: %s\n",err.Error()) } } this.mqMsg.client.Ack(this.mqMsg.msg.ID) return } func (this *relayMQ_OpAckHandler) SendFailedAck(task *tasks.MaestroTask) (err error) { DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>>>> [FAIL] GOT SendFailedAck() call -> %s\n",this.op) log.MaestroErrorf("RelayMQDriver - submitted Task failed: %s",task.Id) return } // main thread for handling inbound RelayMQ messages. // The main use case for these messages today are: // - Installing images // - Starting / Stopping Jobs func (this *relayMQDriver) listener() { messageCount := 0 messageChan := this.client.Messages() mainLoop: for { select { case control := <-this.controlChan: if control == stop_driver { DEBUG_OUT("RelayMQDriver.listener() got shutdown\n") break mainLoop } case message := <-messageChan: DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>> Got relayMQ message %+v\n",message) dcsUpdate := new(dcsAPI.GatewayConfigMessage) err := json.Unmarshal([]byte(message.Body),dcsUpdate) if err != nil { log.MaestroErrorf("Can't decode inbound RelayMQ message: %s\n",err.Error()) } else { DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>> [unique seen: %d] decoded %+v\n",messageCount,dcsUpdate) _, ok := messagesProcessed.GetStringKey(message.ID) if ok { DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>> Message seen and processed already.\n") } else { messageCount++ DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>> Handle message\n") if len(dcsUpdate.Images) > 0 && len(dcsUpdate.Configs) > 0 { sig := dcsUpdate.Images[0].ComputeSignature() _, ok := messagesProcessedBySignature.GetStringKey(sig) if ok { DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>> Duplicate message by signature\n") log.MaestroWarnf("RelayMQDriver: Saw duplicate message based on signature") } else { holder := this.newHeldMessage() holder.msg = &message holder.decodedIn = dcsUpdate holder.recieved = time.Now().UnixNano() messagesProcessed.Set(message.ID,unsafe.Pointer(holder)) messagesProcessedBySignature.Set(sig,unsafe.Pointer(holder)) // For now we will make a very unsafe assumption, that all // we need to do is install the App // TODO: // - See if a job exists with this name // - Shutdown if it does and remove // // - Install new image op := maestroSpecs.NewImageOpPayload() op.Op = "add" op.Image = &holder.decodedIn.Images[0] op.AppName = holder.decodedIn.Configs[0].Name DEBUG_OUT("RELAYMQ>>>>>>>>>>>>>>>>> formed payload - sending to TaskManager %+v\n",op) log.MaestroDebugf("RelayMQDriver: Have new recognized inbound payload - sending to TaskManager") task, err := tasks.CreateNewTask(op, TASK_SRC_RELAYMQ) if err == nil { handler := &relayMQ_OpAckHandler{op:"add_image",task:task,mqMsg:holder} err = tasks.EnqueTask(task,handler,false) } // // - Start Job } } else { log.MaestroErrorf(" Error in relayMQDriver: badly formed inbound 'update' - had no valid image") // TODO - ack this message - and fail it } } } } } this.listenerRunning = false }
package testing import ( "fmt" "github.com/queueup-dev/qup-io/v2/writer" "sync" "testing" ) func TestAssert_Eq(t *testing.T) { a := Assert{Input: int64(1)} fmt.Print(a.Eq(int64(1))) } func TestAssert_Same(t *testing.T) { a := Assert{Input: float64(1)} fmt.Print(a.Same(int64(1))) } func TestAssertInstance(t *testing.T) { a := AssertInstance{} a.Eq("test") fmt.Print(a.Execute("test")) } func TestDummyAPI_Assert(t *testing.T) { var wg sync.WaitGroup dummyAPI := NewMockApi(t, StdLogger(1), &wg) dummyAPI.Assert().That("test_uri", "GET").RequestBody().Eq("test123") dummyAPI.Mock().When("test_uri", "GET").RespondWith( writer.NewJsonWriter(struct{ Hello string }{Hello: "world"}), nil, 200, ) }
package main //给定一个 n × n 的二维矩阵 matrix 表示一个图像。请你将图像顺时针旋转 90 度。 // // 你必须在 原地 旋转图像,这意味着你需要直接修改输入的二维矩阵。请不要 使用另一个矩阵来旋转图像。 // // // // 示例 1: // // //输入:matrix = [[1,2,3],[4,5,6],[7,8,9]] //输出:[[7,4,1],[8,5,2],[9,6,3]] // // // 示例 2: // // //输入:matrix = [[5,1,9,11],[2,4,8,10],[13,3,6,7],[15,14,12,16]] //输出:[[15,13,2,5],[14,3,4,1],[12,6,8,9],[16,7,10,11]] // // // // // 提示: // // // n == matrix.length == matrix[i].length // 1 <= n <= 20 // -1000 <= matrix[i][j] <= 1000 // // // // Related Topics 数组 数学 矩阵 👍 1204 👎 0 func main() { matrix := [][]int{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}} rotate(matrix) } func rotate(matrix [][]int) { // 1.根据对角线先转换矩阵 for row, value := range matrix { for col, innerValue := range value { // 只需要遍历1/2 if col <= row { // 2.暂存临时值 tmpVal := matrix[col][row] // 3.对角线转换 matrix[col][row] = innerValue matrix[row][col] = tmpVal } } } // 4. 每行数据调转 for index, _ := range matrix { for i, j := 0, len(matrix)-1; i < j; i, j = i+1, j-1 { tmp := matrix[index][j] matrix[index][j] = matrix[index][i] matrix[index][i] = tmp } } }
package main import ( "encoding/json" "io" "net/http" "video_server/api/defs" ) func SendErrorResponse(writer http.ResponseWriter, errResp defs.ErrorResponse) { writer.WriteHeader(errResp.HttpSC) res, _ := json.Marshal(&errResp.Error) io.WriteString(writer, string(res)) } func SendNormalResponse(writer http.ResponseWriter, resp string, sc int) { writer.WriteHeader(sc) io.WriteString(writer, resp) }
//Package message used for control and display error or succes message package message import ( "errors" "fmt" "net/http" "reflect" "strings" ) var messages = map[int]map[string]interface{}{ 0: {"message": "Page not found", "status": http.StatusNotFound}, 1: {"message": "You are not authorize", "status": http.StatusUnauthorized}, 2: {"message": "Forbidden endpoint", "status": http.StatusForbidden}, 3: {"message": "Data not found", "status": http.StatusNotFound}, 4: {"message": "Wrong parameter", "status": http.StatusBadRequest}, 5: {"message": "Data on field '{f}' not permitted", "status": http.StatusBadRequest}, 6: {"message": "{f} failed", "status": http.StatusBadRequest}, 7: {"message": "{f} success", "status": http.StatusOK}, 8: {"message": "Internal Server Error", "status": http.StatusInternalServerError}, } //Error used for get message for display type Error struct { Message string `json:"message,omitempty"` Code string `json:"code,omitempty"` Error string `json:"error,omitempty"` StatusCode int `json:"-"` } //New used for create 'Error' structure func New(code int, err interface{}, fields ...string) Error { message := make(map[string]interface{}) for k, v := range messages[code] { message[k] = v } for _, field := range fields { message["message"] = strings.Replace(message["message"].(string), "{f}", field, 1) } if err == nil { err = errors.New("") } return Error{ StatusCode: message["status"].(int), Message: message["message"].(string), Code: fmt.Sprintf("%03d", code), Error: err.(error).Error(), } } //IsInitial used for checking object 'Error' is empty func (obj Error) IsInitial() bool { if reflect.DeepEqual(obj, Error{}) { return true } return false }
// Copyright 2022 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package core_test import ( "encoding/base64" "fmt" "io" "os" "strings" "testing" "github.com/golang/snappy" "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/parser/auth" "github.com/pingcap/tidb/sessionctx/variable" "github.com/pingcap/tidb/testkit" "github.com/pingcap/tidb/testkit/testdata" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/stmtsummary" "github.com/pingcap/tipb/go-tipb" "github.com/stretchr/testify/require" ) func TestBinaryPlanSwitch(t *testing.T) { originCfg := config.GetGlobalConfig() newCfg := *originCfg f, err := os.CreateTemp("", "tidb-slow-*.log") require.NoError(t, err) newCfg.Log.SlowQueryFile = f.Name() config.StoreGlobalConfig(&newCfg) defer func() { config.StoreGlobalConfig(originCfg) require.NoError(t, f.Close()) require.NoError(t, os.Remove(newCfg.Log.SlowQueryFile)) }() require.NoError(t, logutil.InitLogger(newCfg.Log.ToLogConfig())) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", f.Name())) origin := tk.MustQuery("SELECT @@global.tidb_generate_binary_plan") originStr := origin.Rows()[0][0].(string) defer func() { tk.MustExec("set @@global.tidb_generate_binary_plan = '" + originStr + "'") }() tk.MustExec("use test") // 1. assert binary plan is generated if the variable is turned on tk.MustExec("set global tidb_generate_binary_plan = 1") tk.MustQuery("select sleep(1)") result := testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.slow_query " + `where query like "%select sleep(1)%" and query not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s := result[0] b, err := base64.StdEncoding.DecodeString(s) require.NoError(t, err) b, err = snappy.Decode(nil, b) require.NoError(t, err) binary := &tipb.ExplainData{} err = binary.Unmarshal(b) require.NoError(t, err) result = testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.statements_summary " + `where QUERY_SAMPLE_TEXT like "%select sleep(1)%" and QUERY_SAMPLE_TEXT not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s = result[0] b, err = base64.StdEncoding.DecodeString(s) require.NoError(t, err) b, err = snappy.Decode(nil, b) require.NoError(t, err) binary = &tipb.ExplainData{} err = binary.Unmarshal(b) require.NoError(t, err) // 2. assert binary plan is not generated if the variable is turned off tk.MustExec("set global tidb_generate_binary_plan = 0") tk.MustQuery("select 1 > sleep(1)") result = testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.slow_query " + `where query like "%select 1 > sleep(1)%" and query not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s = result[0] require.Empty(t, s) result = testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.statements_summary " + `where QUERY_SAMPLE_TEXT like "%select 1 > sleep(1)%" and QUERY_SAMPLE_TEXT not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s = result[0] require.Empty(t, s) } // TestTooLongBinaryPlan asserts that if the binary plan is larger than 1024*1024 bytes, it should be output to slow query but not to stmt summary. func TestTooLongBinaryPlan(t *testing.T) { originCfg := config.GetGlobalConfig() newCfg := *originCfg f, err := os.CreateTemp("", "tidb-slow-*.log") require.NoError(t, err) newCfg.Log.SlowQueryFile = f.Name() config.StoreGlobalConfig(&newCfg) defer func() { config.StoreGlobalConfig(originCfg) require.NoError(t, f.Close()) require.NoError(t, os.Remove(newCfg.Log.SlowQueryFile)) }() require.NoError(t, logutil.InitLogger(newCfg.Log.ToLogConfig())) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", f.Name())) origin := tk.MustQuery("SELECT @@global.tidb_enable_stmt_summary") originStr := origin.Rows()[0][0].(string) defer func() { tk.MustExec("set @@global.tidb_enable_stmt_summary = '" + originStr + "'") }() // Trigger clear the stmt summary in memory to prevent this case from being affected by other cases. tk.MustExec("set global tidb_enable_stmt_summary = 0") tk.MustExec("set global tidb_enable_stmt_summary = 1") tk.MustExec("use test") tk.MustExec("drop table if exists th") tk.MustExec("set @@session.tidb_enable_table_partition = 1") tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`) tk.MustExec("create table th (i int, a int,b int, c int, index (a)) partition by hash (a) partitions 8192;") tk.MustQuery("select count(*) from th t1 join th t2 join th t3 join th t4 join th t5 join th t6 where t1.i=t2.a and t1.i=t3.i and t3.i=t4.i and t4.i=t5.i and t5.i=t6.i") result := testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.slow_query " + `where query like "%th t1 join th t2 join th t3%" and query not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s := result[0] require.Greater(t, len(s), stmtsummary.MaxEncodedPlanSizeInBytes) b, err := base64.StdEncoding.DecodeString(s) require.NoError(t, err) b, err = snappy.Decode(nil, b) require.NoError(t, err) binary := &tipb.ExplainData{} err = binary.Unmarshal(b) require.NoError(t, err) require.False(t, binary.DiscardedDueToTooLong) require.True(t, binary.WithRuntimeStats) require.NotNil(t, binary.Main) result = testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.statements_summary " + `where QUERY_SAMPLE_TEXT like "%th t1 join th t2 join th t3%" and QUERY_SAMPLE_TEXT not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s = result[0] b, err = base64.StdEncoding.DecodeString(s) require.NoError(t, err) b, err = snappy.Decode(nil, b) require.NoError(t, err) binary = &tipb.ExplainData{} err = binary.Unmarshal(b) require.NoError(t, err) require.True(t, binary.DiscardedDueToTooLong) require.Nil(t, binary.Main) require.Nil(t, binary.Ctes) } // TestLongBinaryPlan asserts that if the binary plan is smaller than 1024*1024 bytes, it should be output to both slow query and stmt summary. func TestLongBinaryPlan(t *testing.T) { originCfg := config.GetGlobalConfig() newCfg := *originCfg f, err := os.CreateTemp("", "tidb-slow-*.log") require.NoError(t, err) newCfg.Log.SlowQueryFile = f.Name() config.StoreGlobalConfig(&newCfg) defer func() { config.StoreGlobalConfig(originCfg) require.NoError(t, f.Close()) require.NoError(t, os.Remove(newCfg.Log.SlowQueryFile)) }() require.NoError(t, logutil.InitLogger(newCfg.Log.ToLogConfig())) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", f.Name())) tk.MustExec("use test") tk.MustExec("drop table if exists th") tk.MustExec("set @@session.tidb_enable_table_partition = 1") tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`) tk.MustExec("create table th (i int, a int,b int, c int, index (a)) partition by hash (a) partitions 1000;") tk.MustQuery("select count(*) from th t1 join th t2 join th t3 join th t4 join th t5 join th t6 where t1.i=t2.a and t1.i=t3.i and t3.i=t4.i and t4.i=t5.i and t5.i=t6.i") result := testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.slow_query " + `where query like "%th t1 join th t2 join th t3%" and query not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s1 := result[0] // The binary plan in this test case is expected to be smaller than MaxEncodedPlanSizeInBytes. // If the size of the binary plan changed and this case failed in the future, you can adjust the partition numbers in the CREATE TABLE statement above. require.Less(t, len(s1), stmtsummary.MaxEncodedPlanSizeInBytes) b, err := base64.StdEncoding.DecodeString(s1) require.NoError(t, err) b, err = snappy.Decode(nil, b) require.NoError(t, err) binary := &tipb.ExplainData{} err = binary.Unmarshal(b) require.NoError(t, err) require.False(t, binary.DiscardedDueToTooLong) require.True(t, binary.WithRuntimeStats) require.NotNil(t, binary.Main) result = testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.statements_summary " + `where QUERY_SAMPLE_TEXT like "%th t1 join th t2 join th t3%" and QUERY_SAMPLE_TEXT not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s2 := result[0] require.Equal(t, s1, s2) } func TestBinaryPlanOfPreparedStmt(t *testing.T) { originCfg := config.GetGlobalConfig() newCfg := *originCfg f, err := os.CreateTemp("", "tidb-slow-*.log") require.NoError(t, err) newCfg.Log.SlowQueryFile = f.Name() config.StoreGlobalConfig(&newCfg) defer func() { config.StoreGlobalConfig(originCfg) require.NoError(t, f.Close()) require.NoError(t, os.Remove(newCfg.Log.SlowQueryFile)) }() require.NoError(t, logutil.InitLogger(newCfg.Log.ToLogConfig())) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", f.Name())) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t (a int, b int);") tk.MustExec("insert into t value(30,30);") tk.MustExec(`prepare stmt from "select sleep(1), b from t where a > ?"`) tk.MustExec("set @a = 20") tk.MustQuery("execute stmt using @a") result := testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.slow_query " + `where query like "%select sleep%" and query not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s1 := result[0] b, err := base64.StdEncoding.DecodeString(s1) require.NoError(t, err) b, err = snappy.Decode(nil, b) require.NoError(t, err) binary := &tipb.ExplainData{} err = binary.Unmarshal(b) require.NoError(t, err) require.False(t, binary.DiscardedDueToTooLong) require.True(t, binary.WithRuntimeStats) require.NotNil(t, binary.Main) result = testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.statements_summary " + `where QUERY_SAMPLE_TEXT like "%select sleep%" and QUERY_SAMPLE_TEXT not like "%like%" ` + "limit 1;").Rows()) require.Len(t, result, 1) s2 := result[0] require.Equal(t, s1, s2) } // TestDecodeBinaryPlan asserts that the result of EXPLAIN ANALYZE FORMAT = 'verbose' is the same as tidb_decode_binary_plan(). func TestDecodeBinaryPlan(t *testing.T) { // Prepare the slow log originCfg := config.GetGlobalConfig() newCfg := *originCfg f, err := os.CreateTemp("", "tidb-slow-*.log") require.NoError(t, err) newCfg.Log.SlowQueryFile = f.Name() config.StoreGlobalConfig(&newCfg) defer func() { config.StoreGlobalConfig(originCfg) require.NoError(t, f.Close()) require.NoError(t, os.Remove(newCfg.Log.SlowQueryFile)) }() require.NoError(t, logutil.InitLogger(newCfg.Log.ToLogConfig())) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", f.Name())) tk.MustExec("set tidb_slow_log_threshold=0;") defer func() { tk.MustExec("set tidb_slow_log_threshold=300;") }() tk.MustExec(`create table tp (a int, b int) partition by range(a) ( partition p0 values less than (100), partition p1 values less than (200), partition p2 values less than (300), partition p3 values less than maxvalue )`) tk.MustExec("insert into tp value(1,1), (10,10), (123,234), (213, 234);") tk.MustExec("create table t(a int, b int, c int, index ia(a));") tk.MustExec("insert into t value(1,1,1), (10,10,10), (123,234,345), (-213, -234, -234);") cases := []string{ "explain analyze format = 'verbose' select * from t", "explain analyze format = 'verbose' select * from t where a > 10", "explain analyze format = 'verbose' select /*+ inl_join(t1) */ * from t t1 join t t2 where t1.a = t2.a", "explain analyze format = 'verbose' WITH RECURSIVE cte(n) AS (SELECT 1 UNION ALL SELECT n + 1 FROM cte WHERE n < 5) SELECT * FROM cte", "set @@tidb_partition_prune_mode='static'", "explain analyze format = 'verbose' select * from tp", "explain analyze format = 'verbose' select * from tp t1 join tp t2 on t1.b > t2.b", "explain analyze format = 'verbose' select * from tp where a > 400", "explain analyze format = 'verbose' select * from tp where a < 30", "explain analyze format = 'verbose' select * from tp where a > 0", "set @@tidb_partition_prune_mode='dynamic'", "explain analyze format = 'verbose' select * from tp", "explain analyze format = 'verbose' select * from tp t1 join tp t2 on t1.b > t2.b", "explain analyze format = 'verbose' select * from tp where a > 400", "explain analyze format = 'verbose' select * from tp where a < 30", "explain analyze format = 'verbose' select * from tp where a > 0", } for _, c := range cases { if len(c) < 7 || c[:7] != "explain" { tk.MustExec(c) continue } comment := fmt.Sprintf("sql:%s", c) var res1, res2 []string explainResult := tk.MustQuery(c).Rows() for _, row := range explainResult { for _, val := range row { str := val.(string) str = strings.TrimSpace(str) if len(str) > 0 { res1 = append(res1, str) } } } slowLogResult := testdata.ConvertRowsToStrings(tk.MustQuery("select binary_plan from information_schema.slow_query " + `where query = "` + c + `;" ` + "order by time desc limit 1").Rows()) require.Lenf(t, slowLogResult, 1, comment) decoded := testdata.ConvertRowsToStrings(tk.MustQuery(`select tidb_decode_binary_plan('` + slowLogResult[0] + `')`).Rows())[0] decodedRows := strings.Split(decoded, "\n") // remove the first newline and the title row decodedRows = decodedRows[2:] for _, decodedRow := range decodedRows { vals := strings.Split(decodedRow, "|") for _, val := range vals { val = strings.TrimSpace(val) if len(val) > 0 { res2 = append(res2, val) } } } require.Equalf(t, res1, res2, comment) } } func TestInvalidDecodeBinaryPlan(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") str1 := "some random bytes" str2 := base64.StdEncoding.EncodeToString([]byte(str1)) str3 := base64.StdEncoding.EncodeToString(snappy.Encode(nil, []byte(str1))) tk.MustQuery(`select tidb_decode_binary_plan('` + str1 + `')`).Check(testkit.Rows("")) tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 illegal base64 data at input byte 4")) tk.MustQuery(`select tidb_decode_binary_plan('` + str2 + `')`).Check(testkit.Rows("")) tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 snappy: corrupt input")) tk.MustQuery(`select tidb_decode_binary_plan('` + str3 + `')`).Check(testkit.Rows("")) tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 proto: illegal wireType 7")) } func TestUnnecessaryBinaryPlanInSlowLog(t *testing.T) { originCfg := config.GetGlobalConfig() newCfg := *originCfg f, err := os.CreateTemp("", "tidb-slow-*.log") require.NoError(t, err) newCfg.Log.SlowQueryFile = f.Name() config.StoreGlobalConfig(&newCfg) defer func() { config.StoreGlobalConfig(originCfg) require.NoError(t, f.Close()) require.NoError(t, os.Remove(newCfg.Log.SlowQueryFile)) }() require.NoError(t, logutil.InitLogger(newCfg.Log.ToLogConfig())) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) tk.MustExec(fmt.Sprintf("set @@tidb_slow_query_file='%v'", f.Name())) origin := tk.MustQuery("SELECT @@global.tidb_slow_log_threshold") originStr := origin.Rows()[0][0].(string) defer func() { tk.MustExec("set @@global.tidb_slow_log_threshold = '" + originStr + "'") }() tk.MustExec("use test") tk.MustExec("drop table if exists th") tk.MustExec("set global tidb_slow_log_threshold = 1;") tk.MustExec("create table th (i int, a int,b int, c int, index (a)) partition by hash (a) partitions 100;") slowLogBytes, err := io.ReadAll(f) require.NoError(t, err) require.NotContains(t, string(slowLogBytes), `tidb_decode_binary_plan('')`) }
package main import ( "fmt" . "leetcode" ) func main() { h := NewListNode(1, 2, 3, 4) reorderList(h) fmt.Println(h) } func reorderList(head *ListNode) { arr := make([]*ListNode, 0) for head != nil { n := head.Next //head.Next = nil arr = append(arr, head) head = n } left, right := 0, len(arr)-1 for true { arr[left].Next = arr[right] left++ if left == right { break } arr[right].Next = arr[left] right-- } arr[left].Next = nil } func reorderList2(head *ListNode) { arr := make([]*ListNode, 0) for head != nil { n := head.Next //head.Next = nil arr = append(arr, head) head = n } for left, right := 0, len(arr)-1; left < right; left, right = left+1, right-1 { if left < right-1 { arr[left].Next, arr[right].Next, arr[right-1].Next = arr[right], arr[left].Next, arr[right].Next } else { if arr[right].Next != nil { arr[left].Next, arr[right].Next = arr[right], arr[left].Next } else { arr[left].Next = arr[right] } } } } //leetcode submit region end(Prohibit modification and deletion)
package db import ( "database/sql" "strconv" "sync" "github.com/textileio/go-textile/pb" "github.com/textileio/go-textile/repo" "github.com/textileio/go-textile/util" ) type CafeClientMessagesDB struct { modelStore } func NewCafeClientMessageStore(db *sql.DB, lock *sync.Mutex) repo.CafeClientMessageStore { return &CafeClientMessagesDB{modelStore{db, lock}} } func (c *CafeClientMessagesDB) AddOrUpdate(message *pb.CafeClientMessage) error { c.lock.Lock() defer c.lock.Unlock() tx, err := c.db.Begin() if err != nil { return err } stm := `insert or replace into cafe_client_messages(id, peerId, clientId, date) values(?,?,?,?)` stmt, err := tx.Prepare(stm) if err != nil { log.Errorf("error in tx prepare: %s", err) return err } defer stmt.Close() _, err = stmt.Exec( message.Id, message.Peer, message.Client, util.ProtoNanos(message.Date), ) if err != nil { _ = tx.Rollback() return err } return tx.Commit() } func (c *CafeClientMessagesDB) ListByClient(clientId string, limit int) []pb.CafeClientMessage { c.lock.Lock() defer c.lock.Unlock() stm := "select * from cafe_client_messages where clientId='" + clientId + "' order by date asc limit " + strconv.Itoa(limit) + ";" return c.handleQuery(stm) } func (c *CafeClientMessagesDB) CountByClient(clientId string) int { c.lock.Lock() defer c.lock.Unlock() row := c.db.QueryRow("select Count(*) from cafe_client_messages where clientId='" + clientId + "';") var count int _ = row.Scan(&count) return count } func (c *CafeClientMessagesDB) Delete(id string, clientId string) error { c.lock.Lock() defer c.lock.Unlock() _, err := c.db.Exec("delete from cafe_client_messages where id=? and clientId=?", id, clientId) return err } func (c *CafeClientMessagesDB) DeleteByClient(clientId string, limit int) error { c.lock.Lock() defer c.lock.Unlock() sel := "select id from cafe_client_messages where clientId='" + clientId + "'" if limit > 0 { sel += " order by date asc limit " + strconv.Itoa(limit) } query := "delete from cafe_client_messages where id in (" + sel + ");" _, err := c.db.Exec(query) return err } func (c *CafeClientMessagesDB) handleQuery(stm string) []pb.CafeClientMessage { var list []pb.CafeClientMessage rows, err := c.db.Query(stm) if err != nil { log.Errorf("error in db query: %s", err) return nil } for rows.Next() { var id, peerId, clientId string var dateInt int64 if err := rows.Scan(&id, &peerId, &clientId, &dateInt); err != nil { log.Errorf("error in db scan: %s", err) continue } list = append(list, pb.CafeClientMessage{ Id: id, Peer: peerId, Client: clientId, Date: util.ProtoTs(dateInt), }) } return list }
package main import ( "fmt" "log" "database/sql" _ "github.com/go-sql-driver/mysql" ) func main() { db, err := sql.Open("mysql", "MeatTaro:9868@/test?charset=utf8") checkErr(err) defer db.Close() //設定資料內容 tableUser := `CREATE TABLE IF NOT EXISTS users( username VARCHAR(64) NULL DEFAULT NULL, password VARCHAR(64) NULL DEFAULT NULL );` //建立資料表 帶入資料內容 //Exec 函式執行 Prepare 準備好的 sql,傳入參數執行 update/insert 等操作,回傳 Result 資料 _, err = db.Exec(tableUser) checkErr(err) //資料表欄位插入數據 rs, err := db.Exec( "INSERT INTO users(username, password) VALUES ('MeatTaro','9868')") checkErr(err) //通過RowsAffected獲取受影響的行數 rowCount, err := rs.RowsAffected() checkErr(err) log.Printf("inserted %d rows", rowCount) //使用Query方法執行sql的SELECT語句(查詢)返回一個Rows(Result of Query) rows, err := db.Query("SELECT * FROM users") checkErr(err) //Next()依序準備 result Row 供 Scan() 寫入 //Next() 必須區分 false 是由於所有的result Row已Scan()完畢 還是發生錯誤而執行錯誤訊息 for rows.Next() { var username string var password string //Scan() copies result Row裡的正確欄位的value of point 給對應的變數 err = rows.Scan(&username, &password) checkErr(err) fmt.Println(username, password) } } func checkErr(err error) { if err != nil { panic(err) } }
package repository import ( "encoding/json" "github.com/pkg/errors" "github.com/shharn/blog/db" "github.com/shharn/blog/model" ) type MenuRepository interface { GetAll(interface{}) ([]model.Menu, error) Create(interface{}, model.Menu) (string, error) Update(interface{}, model.Menu) error Get(interface{}, string) (model.Menu, error) Delete(interface{}, string) error DeleteParent(interface{}, string, string) error DeleteChild(interface{}, string, string) error AddChild(interface{}, string, string) error Contextual } const ( getMenusQuery = ` query { menus(func: has(url)) { uid name url parent { uid } children: child { uid } } } ` getMenuQuery = ` query getMenu($id: string) { menus(func: uid($id)) { uid name url parent { uid } children: child { uid } } } ` ) type getMenusPayload struct { Menus []model.Menu `json:"menus,omitempty"` } type getMenuPayload struct { Menus []model.Menu `json:"menus,omitempty"` } type DgraphMenuRepository struct {} func (r *DgraphMenuRepository) Context() interface{} { c, err := db.Init() if err != nil { panic(err) } return &dgraphRepositoryContext{c, nil} } func (r *DgraphMenuRepository) GetAll(ctx interface{}) ([]model.Menu, error) { rctx := ctx.(*dgraphRepositoryContext) res, err := rctx.Client.Query(getMenusQuery) if err != nil { rctx.Err = err return []model.Menu{}, err } var payload getMenusPayload if err := json.Unmarshal(res.Json, &payload); err != nil { rctx.Err = err return []model.Menu{}, errors.New(err.Error()) } return payload.Menus, nil } func (r *DgraphMenuRepository) Create(ctx interface{}, menu model.Menu) (string, error) { rctx := ctx.(*dgraphRepositoryContext) menu.ID = "_:new" md := db.MutationData{menu} assigned, err := rctx.Client.Mutate(md) if err != nil { rctx.Err = err return "", err } return assigned.Uids["new"], nil } func (r *DgraphMenuRepository) Update(ctx interface{}, menu model.Menu) error { rctx := ctx.(*dgraphRepositoryContext) md := db.MutationData{menu} _, err := rctx.Client.Mutate(md) if err != nil { rctx.Err = err } return err } func (r *DgraphMenuRepository) Get(ctx interface{}, id string) (model.Menu, error) { rctx := ctx.(*dgraphRepositoryContext) vars := map[string]string{"$id":id} res, err := rctx.Client.QueryWithVars(getMenuQuery, vars) if err != nil { rctx.Err = err return model.EmptyMenu, err } var payload getMenuPayload if err := json.Unmarshal(res.Json, &payload); err != nil { rctx.Err = err return model.EmptyMenu, errors.New(err.Error()) } if len(payload.Menus) < 1 { return model.EmptyMenu, nil } return payload.Menus[0], nil } func (r *DgraphMenuRepository) Delete(ctx interface{}, id string) error { rctx := ctx.(*dgraphRepositoryContext) _, err := rctx.Client.DeleteNode(id) rctx.Err = err return err } func (r *DgraphMenuRepository) DeleteParent(ctx interface{}, id, pid string) error { rctx := ctx.(*dgraphRepositoryContext) _, err := rctx.Client.DeleteEdge(id, "parent", pid) rctx.Err = err return err } func (r *DgraphMenuRepository) DeleteChild(ctx interface{}, id, cid string) error { rctx := ctx.(*dgraphRepositoryContext) _, err := rctx.Client.DeleteEdge(id, "child", cid) rctx.Err = err return err } func (r *DgraphMenuRepository) AddChild(ctx interface{}, id, cid string) error { rctx := ctx.(*dgraphRepositoryContext) _, err := rctx.Client.AddEdge(id, "child", cid) rctx.Err = err return err } func NewMenuRepository() MenuRepository { return &DgraphMenuRepository{} }
package handlers import ( "github.com/krix38/gophotogallery/web/controller" "github.com/krix38/gophotogallery/model/dao" "github.com/krix38/gophotogallery/model" "github.com/krix38/gophotogallery/properties" "mime/multipart" "io" "regexp" "net/http" "errors" "strconv" "log" ) func AddPhoto(w http.ResponseWriter, r *http.Request) { if r.Method == "POST" { galleryId, err := strconv.ParseInt(r.FormValue("galleryid"), 10, 64) if err != nil { log.Print(err) http.Error(w, err.Error(), http.StatusNotFound) return } r.ParseMultipartForm(properties.MaxFileChunk) file, handler, err := r.FormFile("filedata") if err != nil { log.Print(err) http.Redirect(w, r, properties.UrlAdminShowGalleryPath + strconv.FormatInt(galleryId, 10), http.StatusFound) return } defer file.Close() data := multipartFileToByteArray(file) /* TODO: dodaj pobieranie opisu zdjecia */ photo := model.NewPhoto(galleryId, data, "description", handler.Header.Get("Content-Type")) dao.SavePhoto(photo) http.Redirect(w, r, properties.UrlAdminShowGalleryPath + strconv.FormatInt(galleryId, 10), http.StatusFound) }else{ http.Redirect(w, r, properties.UrlAdminPath, http.StatusFound) } } func DeletePhoto(w http.ResponseWriter, r *http.Request) { if r.Method == "POST"{ photoIdString := r.FormValue("photoId") galleryIdString := r.FormValue("galleryId") if photoIdString == "" { http.Error(w, "Bad request", http.StatusBadRequest) return } photoId, err := strconv.ParseInt(photoIdString, 10, 64) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } galleryId, err := strconv.ParseInt(galleryIdString, 10, 64) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } err = dao.DeletePhoto(photoId) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } http.Redirect(w, r, properties.UrlAdminShowGalleryPath + strconv.FormatInt(galleryId, 10), http.StatusFound) }else{ http.Redirect(w, r, properties.UrlAdminPath, http.StatusFound) } } func ShowPhoto(w http.ResponseWriter, r *http.Request) { idString := regexp.MustCompile("^" + properties.UrlAdminShowPhotoPath + "([0-9]+)$").FindStringSubmatch(r.URL.Path) if idString == nil { controller.PrintErrorAndRedirect(errors.New("bad url"), w, r, properties.UrlAdminPath) return } id, err := strconv.ParseInt(idString[1], 10, 64) if err != nil { log.Print(err) http.Error(w, err.Error(), http.StatusInternalServerError) return } photo, err := dao.LoadPhoto(id) if err != nil { log.Print(err) http.Error(w, err.Error(), http.StatusNotFound) return }else{ w.Header().Add("Content-Type", photo.Mimetype()) w.Write(photo.Data()) } } func multipartFileToByteArray(file multipart.File) []byte { chunk := make([]byte, properties.MaxFileChunk) var data []byte sumSize := int64(0) for { chunk = chunk[:cap(chunk)] n, err := file.Read(chunk) sumSize += int64(n) if sumSize > properties.MaxFileInMemory { log.Print("File too large, saving incomplete image...") break } if err != nil { if err == io.EOF { break } log.Print(err) return nil } data = append(data, chunk...) } return data }
package externalservices import ( "encoding/json" "fmt" "io/ioutil" "net/http" "poliskarta/api/structs" "sync" ) func CallPoliceScraping(policeEvent *structs.PoliceEvent, credentials structs.Credentials, wg *sync.WaitGroup) { scrapeURL := "https://api.import.io/store/data/3c3e1355-d3c9-4047-bd2e-f86d36af29dc/_query?input/webpage/url=" apikey := "&_user=" + credentials.Importiouser + "&_apikey=" + credentials.Importiokey httpResponse, httperr := http.Get(scrapeURL + policeEvent.PoliceEventURL + apikey) if httperr != nil { fmt.Println("Importio http-error: " + httperr.Error()) policeEvent.DescriptionLong = "<N/A>" } else { defer httpResponse.Body.Close() body, ioerr := ioutil.ReadAll(httpResponse.Body) if ioerr != nil { fmt.Println("Ioutilreadallerror: ", ioerr.Error()) policeEvent.DescriptionLong = "<N/A>" } else { var scrapedEvents ScrapedEvents unmarshErr := json.Unmarshal(body, &scrapedEvents) //For unknown reasons, unmarshal fails some times, might be that the response from //police scraping is wrong (200OK instead of a real http-error) if unmarshErr != nil { fmt.Println("Unmarshal error after police scraping (import.io): " + unmarshErr.Error()) policeEvent.DescriptionLong = "<N/A>" } else { //We dont know why, but even though everything seems fine here, //policeEvent.DescriptionLong = scrapedEvents.Results[0].Result, //sometimes crashes the server. Below is a safety measure. for _, result := range scrapedEvents.Results { policeEvent.DescriptionLong = result.Result break } } } } defer wg.Done() } type ScrapedEvents struct { Results []ScrapedEvent `json:"results"` } type ScrapedEvent struct { Result string `json:"result"` }
package command import ( "fmt" "strings" "github.com/emicklei/go-restful" . "data-importer/dbcentral/es" . "data-importer/dbcentral/etcd" . "data-importer/dbcentral/pg" . "data-importer/importer" . "data-importer/office" . "data-importer/tasks" "grm-service/command" "grm-service/dbcentral/es" "grm-service/dbcentral/etcd" "grm-service/dbcentral/pg" "grm-service/geoserver" "grm-service/mq" "grm-service/service" ) type ImporterCommand struct { command.Meta SysDB string MetaDB string DBuser string DBpwd string EtcdEndpoint string MqUrl string EsUrl string GeoServer string GeoUser string GeoPwd string OfficeServer string OfficeUser string OfficePwd string } func (c *ImporterCommand) Help() string { helpText := ` Usage: titan-grm data-importer [registry_address] [server_address] [server_namespace] [data_dir] [config_dir] Example: titan-grm data-importer -registry_address consul:8500 -server_address :8080 -server_namespace titangrm -data_dir /opt/titangrm/data -config_dir /opt/titangrm/config ` return strings.TrimSpace(helpText) } func (c *ImporterCommand) Synopsis() string { return "GRM Data Importer Service" } func (c *ImporterCommand) Run(args []string) int { flags := c.Meta.FlagSet(service.DataImporterService, command.FlagSetDefault) flags.StringVar(&c.SysDB, "sysdb", "192.168.1.149:31771", "postgresql server address and port") flags.StringVar(&c.MetaDB, "metadb", "192.168.1.149:31771", "postgresql server address and port") flags.StringVar(&c.DBuser, "dbuser", "postgres", "postgresql user") flags.StringVar(&c.DBpwd, "dbpwd", "otitan123", "postgresql user password") flags.StringVar(&c.EtcdEndpoint, "etcd", "192.168.1.149:31686", "etcd endpoint") flags.StringVar(&c.MqUrl, "mq", "amqp://admin:otitan123@192.168.1.149:5672/", "rammitmq url") flags.StringVar(&c.EsUrl, "es", "http://192.168.1.149:9200", "elasticsearch url") flags.StringVar(&c.GeoServer, "geoserver", "http://192.168.1.179:8181/grm;http://192.168.1.189:8181/grm", "geo server endpoint") flags.StringVar(&c.GeoUser, "geouser", "admin", "geoserver user") flags.StringVar(&c.GeoPwd, "geopwd", "geoserver", "geoserver user password") flags.StringVar(&c.OfficeServer, "officeserver", "http://106.74.18.39:8313", "office server") flags.StringVar(&c.OfficeUser, "officeuser", "grm@otitan.com", "offie user") flags.StringVar(&c.OfficePwd, "officepwd", "123456", "offie user password") if err := flags.Parse(args); err != nil { c.Ui.Error(c.Help()) return 1 } service := service.NewService(service.DataImporterService, "v2") service.Init(&c.Meta) // 初始化数据库 sysDB, err := pg.ConnectSystemDB(c.SysDB, c.DBuser, c.DBpwd) if err != nil { fmt.Println("Faile to connect system db:", err, c.SysDB, c.DBuser, c.DBpwd) return 1 } defer sysDB.DisConnect() metaDB, err := pg.ConnectMetaDB(c.MetaDB, c.DBuser, c.DBpwd) if err != nil { fmt.Println("Faile to connect meta db:", err, c.MetaDB, c.DBuser, c.DBpwd) return 1 } defer metaDB.DisConnect() // 初始化etcd连接 dynamic := DynamicDB{etcd.DynamicEtcd{Endpoints: strings.Split(c.EtcdEndpoint, ";")}} if err := dynamic.Connect(); err != nil { fmt.Println("Faile to connect etcd v3:", err) return 1 } defer dynamic.DisConnect() // 初始化消息队列 mQueue := mq.RabbitMQ{URL: c.MqUrl} if err := mQueue.Connect(); err != nil { fmt.Printf("Faile to connect mq(%s):%s\n", c.MqUrl, err) return 1 } defer mQueue.Close() // geoserver geoUtil := geoserver.GeoserverUtil{strings.Split(c.GeoServer, ";"), c.GeoUser, c.GeoPwd} // office server officeUtil := &OnlyOffice{Endpoints: c.OfficeServer, UserName: c.OfficeUser, Password: c.OfficePwd} if err := officeUtil.Login(); err != nil { fmt.Printf("Faile to login office(%s):%s\n", c.OfficeServer, err) return 1 } // 初始化es连接 es := MetaEs{es.DynamicES{Endpoints: []string{c.EsUrl}}} if err := es.Connect(); err != nil { fmt.Println("Faile to connect es:", err) return 1 } // router wc := restful.NewContainer() importer_svc := ImporterSvc{ SysDB: &SystemDB{sysDB}, MetaDB: &MetaDB{metaDB}, ConfigDir: c.ConfigDir, DynamicDB: &dynamic, MsgQueue: &mQueue, GeoServer: &geoUtil, OfficeServer: officeUtil, EsServer: &es, EsUrl: c.EsUrl, } wc.Add(importer_svc.WebService()) // tasks tasks_sv := TasksSvc{ DynamicDB: &dynamic, MetaDB: &MetaDB{metaDB}, ConfigDir: c.ConfigDir, } wc.Add(tasks_sv.WebService()) service.Handle("/", wc) service.Run() return 0 }
package types import ( "testing" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" ) func TestMax(t *testing.T) { var data = []ArgsType{ StringArgsType("1"), StringArgsType("2"), StringArgsType("3"), StringArgsType("2"), NumArgsType(7.0), } max, _ := GetAggregateFunc("max") result := max(data) require.Equal(t, "7.00000000", result) } func TestMin(t *testing.T) { var data = []ArgsType{ StringArgsType("1"), StringArgsType("2"), StringArgsType("3"), StringArgsType("2"), StringArgsType("-1"), NumArgsType(7.0), } min, _ := GetAggregateFunc("min") result := min(data) require.Equal(t, "-1.00000000", result) } func TestAvg(t *testing.T) { var data = []ArgsType{ StringArgsType("1"), StringArgsType("2"), StringArgsType("3"), StringArgsType("4"), StringArgsType("5"), StringArgsType("6"), StringArgsType("7"), StringArgsType("8"), StringArgsType("9"), StringArgsType("10"), } avg, _ := GetAggregateFunc("avg") result := avg(data) require.Equal(t, "5.50000000", result) } func StringArgsType(v string) ArgsType { return ArgsType{ Type: gjson.String, Str: v, } } func NumArgsType(f float64) ArgsType { return ArgsType{ Type: gjson.Number, Num: f, } }
package train import ( "github.com/y4v8/errors" ) type ParamsTrainWagons struct { From string `url:"from"` To string `url:"to"` Date string `url:"date"` Train string `url:"train"` WagonTypeID string `url:"wagon_type_id"` GetTpl string `url:"get_tpl"` } type ResultTrainWagons struct { Data DataTrainWagons `json:"data"` } type DataTrainWagons struct { Types []WagonType `json:"types"` Wagons []Wagon `json:"wagons"` TplPage *string `json:"tplPage,omitempty"` } type WagonType struct { TypeID string `json:"type_id"` Title string `json:"title"` Letter string `json:"letter"` Free int `json:"free"` Cost int `json:"cost"` IsOneCost bool `json:"isOneCost"` } type Wagon struct { Num int `json:"num"` TypeID string `json:"type_id"` Type string `json:"type"` Class string `json:"class"` Railway int `json:"railway"` Free int `json:"free"` ByWishes bool `json:"byWishes"` HasBedding bool `json:"hasBedding"` Services []string `json:"services"` Prices map[string]int `json:"prices"` ReservedPrice int `json:"reservedPrice"` AllowBonus bool `json:"allowBonus"` } func (a *Api) TrainWagons(param ParamsTrainWagons) (*DataTrainWagons, error) { var data DataTrainWagons err := a.requestDataObject("POST", "train_wagons/", param, &data) if err != nil { return nil, errors.Wrap(err) } return &data, nil }
// Copyright © 2021 Attestant Limited. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package advanced import ( "context" "strings" "time" "github.com/attestantio/vouch/services/metrics" "github.com/attestantio/vouch/services/scheduler" "github.com/pkg/errors" "github.com/rs/zerolog" zerologger "github.com/rs/zerolog/log" "github.com/sasha-s/go-deadlock" "go.uber.org/atomic" ) // module-wide log. var log zerolog.Logger // job contains control points for a job. type job struct { // stateLock is required for active or finalised. stateLock deadlock.Mutex active atomic.Bool finalised atomic.Bool periodic bool cancelCh chan struct{} runCh chan struct{} } // Service is a scheduler service. It uses additional per-job information to manage // the state of each job, in an attempt to ensure additional robustness in the face // of high concurrent load. type Service struct { monitor metrics.SchedulerMonitor jobs map[string]*job jobsMutex deadlock.RWMutex } // New creates a new scheduling service. func New(ctx context.Context, params ...Parameter) (*Service, error) { parameters, err := parseAndCheckParameters(params...) if err != nil { return nil, errors.Wrap(err, "problem with parameters") } // Set logging. log = zerologger.With().Str("service", "scheduler").Str("impl", "advanced").Logger() if parameters.logLevel != log.GetLevel() { log = log.Level(parameters.logLevel) } return &Service{ jobs: make(map[string]*job), monitor: parameters.monitor, }, nil } // ScheduleJob schedules a one-off job for a given time. // Note that if the parent context is cancelled the job wil not run. func (s *Service) ScheduleJob(ctx context.Context, name string, runtime time.Time, jobFunc scheduler.JobFunc, data interface{}) error { if name == "" { return scheduler.ErrNoJobName } if jobFunc == nil { return scheduler.ErrNoJobFunc } s.jobsMutex.Lock() _, exists := s.jobs[name] if exists { s.jobsMutex.Unlock() return scheduler.ErrJobAlreadyExists } job := &job{ cancelCh: make(chan struct{}), runCh: make(chan struct{}), } s.jobs[name] = job s.jobsMutex.Unlock() s.monitor.JobScheduled() log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Scheduled job") go func() { select { case <-ctx.Done(): log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Parent context done; job not running") s.jobsMutex.Lock() delete(s.jobs, name) s.jobsMutex.Unlock() finaliseJob(job) s.monitor.JobCancelled() case <-job.cancelCh: log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Cancel triggered; job not running") s.jobsMutex.Lock() delete(s.jobs, name) s.jobsMutex.Unlock() finaliseJob(job) s.monitor.JobCancelled() case <-job.runCh: log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Run triggered; job running") // If we receive this signal the job has already been deleted from the jobs list so no need to // do so again here. s.monitor.JobStartedOnSignal() jobFunc(ctx, data) log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Job complete") finaliseJob(job) job.active.Store(false) case <-time.After(time.Until(runtime)): // It is possible that the job is already active, so check that first before proceeding. if job.active.Load() { log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Already running; job not running") break } s.jobsMutex.Lock() delete(s.jobs, name) s.jobsMutex.Unlock() log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Timer triggered; job running") job.active.Store(true) s.monitor.JobStartedOnTimer() jobFunc(ctx, data) log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Job complete") job.active.Store(false) finaliseJob(job) } }() return nil } // SchedulePeriodicJob schedules a job to run in a loop. // The loop starts by calling runtimeFunc, which sets the time for the first run. // Once the time as specified by runtimeFunc is met, jobFunc is called. // Once jobFunc returns, go back to the beginning of the loop. func (s *Service) SchedulePeriodicJob(ctx context.Context, name string, runtimeFunc scheduler.RuntimeFunc, runtimeData interface{}, jobFunc scheduler.JobFunc, jobData interface{}) error { if name == "" { return scheduler.ErrNoJobName } if runtimeFunc == nil { return scheduler.ErrNoRuntimeFunc } if jobFunc == nil { return scheduler.ErrNoJobFunc } s.jobsMutex.Lock() _, exists := s.jobs[name] if exists { s.jobsMutex.Unlock() return scheduler.ErrJobAlreadyExists } job := &job{ cancelCh: make(chan struct{}), runCh: make(chan struct{}), periodic: true, } s.jobs[name] = job s.jobsMutex.Unlock() s.monitor.JobScheduled() go func() { for { runtime, err := runtimeFunc(ctx, runtimeData) if err == scheduler.ErrNoMoreInstances { log.Trace().Str("job", name).Msg("No more instances; period job stopping") s.jobsMutex.Lock() delete(s.jobs, name) s.jobsMutex.Unlock() finaliseJob(job) s.monitor.JobCancelled() return } if err != nil { log.Error().Str("job", name).Err(err).Msg("Failed to obtain runtime; periodic job stopping") s.jobsMutex.Lock() delete(s.jobs, name) s.jobsMutex.Unlock() finaliseJob(job) s.monitor.JobCancelled() return } log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Scheduled job") select { case <-ctx.Done(): log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Parent context done; job not running") s.jobsMutex.Lock() delete(s.jobs, name) s.jobsMutex.Unlock() finaliseJob(job) s.monitor.JobCancelled() return case <-job.cancelCh: log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Cancel triggered; job not running") finaliseJob(job) s.monitor.JobCancelled() return case <-job.runCh: log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Run triggered; job running") s.monitor.JobStartedOnSignal() jobFunc(ctx, jobData) log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Job complete") job.active.Store(false) case <-time.After(time.Until(runtime)): if job.active.Load() { log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Already running; job not running") continue } job.active.Store(true) log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Timer triggered; job running") s.monitor.JobStartedOnTimer() jobFunc(ctx, jobData) log.Trace().Str("job", name).Time("scheduled", runtime).Msg("Job complete") job.active.Store(false) } } }() return nil } // RunJob runs a named job immediately. // If the job does not exist it will return an appropriate error. func (s *Service) RunJob(ctx context.Context, name string) error { s.jobsMutex.Lock() job, exists := s.jobs[name] if !exists { s.jobsMutex.Unlock() return scheduler.ErrNoSuchJob } if !job.periodic { // Because this job only runs once we remove it from the jobs list immediately. delete(s.jobs, name) } s.jobsMutex.Unlock() return s.runJob(ctx, job) } // RunJobIfExists runs a job if it exists. // This does not return an error if the job does not exist or is otherwise unable to run. func (s *Service) RunJobIfExists(ctx context.Context, name string) { s.jobsMutex.Lock() job, exists := s.jobs[name] if !exists { s.jobsMutex.Unlock() return } if !job.periodic { // Because this job only runs once we remove it from the jobs list immediately. delete(s.jobs, name) } s.jobsMutex.Unlock() //nolint s.runJob(ctx, job) } // JobExists returns true if a job exists. func (s *Service) JobExists(ctx context.Context, name string) bool { s.jobsMutex.RLock() _, exists := s.jobs[name] s.jobsMutex.RUnlock() return exists } // ListJobs returns the names of all jobs. func (s *Service) ListJobs(ctx context.Context) []string { s.jobsMutex.RLock() names := make([]string, 0, len(s.jobs)) for name := range s.jobs { names = append(names, name) } s.jobsMutex.RUnlock() return names } // CancelJob removes a named job. // If the job does not exist it will return an appropriate error. func (s *Service) CancelJob(ctx context.Context, name string) error { s.jobsMutex.Lock() job, exists := s.jobs[name] if !exists { s.jobsMutex.Unlock() return scheduler.ErrNoSuchJob } delete(s.jobs, name) s.jobsMutex.Unlock() job.stateLock.Lock() if job.finalised.Load() { // Already marked to be cancelled. job.stateLock.Unlock() return nil } job.finalised.Store(true) job.cancelCh <- struct{}{} job.stateLock.Unlock() return nil } // CancelJobIfExists cancels a job that may or may not exist. // If this is a period job then all future instances are cancelled. func (s *Service) CancelJobIfExists(ctx context.Context, name string) { //nolint s.CancelJob(ctx, name) } // CancelJobs cancels all jobs with the given prefix. // If the prefix matches a period job then all future instances are cancelled. func (s *Service) CancelJobs(ctx context.Context, prefix string) { names := make([]string, 0) s.jobsMutex.Lock() for name := range s.jobs { if strings.HasPrefix(name, prefix) { names = append(names, name) } } s.jobsMutex.Unlock() for _, name := range names { // It is possible that the job has been removed whist we were iterating, so use the non-erroring version of cancel. s.CancelJobIfExists(ctx, name) } } // finaliseJob tidies up a job that is no longer in use. func finaliseJob(job *job) { job.stateLock.Lock() job.finalised.Store(true) // Close the channels for the job to ensure that nothing is hanging on sending a message. close(job.cancelCh) close(job.runCh) job.stateLock.Unlock() } // runJob runs the given job. func (s *Service) runJob(ctx context.Context, job *job) error { job.stateLock.Lock() if job.active.Load() { job.stateLock.Unlock() return scheduler.ErrJobRunning } if job.finalised.Load() { job.stateLock.Unlock() return scheduler.ErrJobFinalised } job.active.Store(true) job.runCh <- struct{}{} job.stateLock.Unlock() return nil }
/* Copyright (c) 2017-2018 Simon Schmidt Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ // Deprecated. package xsqldb import "database/sql" import "text/template" import "bytes" import "time" import "github.com/maxymania/fastnntp-polyglot-labs/bufferex" type AuthRank uint8 const ( ARReader AuthRank = iota ARUser ARModerator ARFeeder ) type Dialect struct{ Binary, Int64, Date, Byte string } // PostgreSQL var PgDialect = &Dialect{ Binary: "bytea", Int64: "bigint", Date: "date", Byte: "smallint", } // Microsoft SQL Server // (untested) var MsSqlDialect = &Dialect{ Binary: "varbinary", Int64: "bigint", Date: "date", Byte: "tinyint", } var createTables = template.Must(template.New("create").Parse(` CREATE TABLE ngrpnumvalue ( ngrp {{.Binary}}, mnum {{.Int64}}, msgid {{.Binary}}, expir {{.Date}}, PRIMARY KEY(ngrp,mnum) ); CREATE TABLE msgidbkt ( msgid {{.Binary}} PRIMARY KEY, bucket {{.Binary}}, expir {{.Date}} ); CREATE TABLE ngrpstatic ( ngrp {{.Binary}} PRIMARY KEY, descr {{.Binary}} ); CREATE TABLE ngrpcnt ( ngrp {{.Binary}} PRIMARY KEY, latest {{.Int64}}, gcount {{.Int64}}, status {{.Byte}} ); CREATE VIEW ngrpstat AS SELECT ngrp, count(mnum) as narts, min(mnum) as low, max(mnum) as high FROM ngrpnumvalue GROUP BY ngrp ; `)) type Base struct{ DB *sql.DB } /* This must be used to implement GroupHeadCache. */ type AuthBase struct { Base Rank AuthRank } /* This is for PostgreSQL only. This must not be used with any other Database. */ type PgBase struct{ Base } func (b *AuthBase) GroupHeadFilter(groups [][]byte) ([][]byte, error) { var status byte stm,err := b.DB.Prepare(` SELECT n.status FROM ngrpcnt n WHERE n.ngrp = $1 ;`) if err!=nil { return nil,err } defer stm.Close() i := 0 for _,group := range groups { row := stm.QueryRow(group) ok := false err := row.Scan(&status) if err!=nil { continue } switch status { case 'y': ok = ARUser<=b.Rank case 'm': ok = ARModerator<=b.Rank default: /* 'n' */ ok = ARFeeder<=b.Rank } if ok { groups[i] = group i++ } } return groups[:i],nil } func (b *Base) CreateTables(d *Dialect) error { buf := new(bytes.Buffer) createTables.Execute(buf, d) _,err := b.DB.Exec(buf.String()) return err } /* =========================================================================================================================== */ func (b *Base) InsertGoupMapping(group []byte, num int64, msgid []byte, expire time.Time) error { _,err := b.DB.Exec(`INSERT INTO ngrpnumvalue (ngrp,mnum,msgid,expir) VALUES ($1,$2,$3,$4);`,group,num,msgid,expire) if err==nil { _,err = b.DB.Exec(`UPDATE ngrpcnt SET gcount = gcount + 1 WHERE ngrp = $1;`,group) } return err } func (b *Base) InsertIDMapping(msgid, bucket []byte, expire time.Time) error { _,err := b.DB.Exec(`INSERT INTO msgidbkt (msgid,bucket,expir) VALUES ($1,$2,$3);`,msgid,bucket,expire) return err } func (b *Base) AdmPutDescr(group []byte, descr []byte) { _,err := b.DB.Exec(`INSERT INTO ngrpstatic (ngrp,descr) VALUES ($1,$2);`,group,descr) if err!=nil { b.DB.Exec(`UPDATE ngrpstatic SET descr=$1 WHERE ngrp=$2;` ,descr,group) } } func (b *Base) AdmPutStatus(group []byte, status byte) { _,err := b.DB.Exec(`INSERT INTO ngrpcnt (ngrp,latest,gcount,status) VALUES ($1,0,0,$2);`,group,int(status)) if err!=nil { b.DB.Exec(`UPDATE ngrpcnt SET status=$1 WHERE ngrp=$2;`,int(status),group) } } /* =========================================================================================================================== */ func (b *Base) QueryGroupMapping(group []byte, num int64) (msgid, bucket bufferex.Binary,err error) { var res *sql.Rows res,err = b.DB.Query(` SELECT m.msgid, m.bucket FROM ngrpnumvalue n JOIN msgidbkt m ON n.msgid = m.msgid WHERE n.ngrp = $1 AND n.mnum = $2 ;`,group,num) if err!=nil { return } defer res.Close() if !res.Next() { return } var rmsgid,rbucket sql.RawBytes err = res.Scan(&rmsgid,&rbucket) if err!=nil { return } msgid = bufferex.NewBinary(rmsgid) bucket = bufferex.NewBinary(rbucket) return } func (b *Base) QueryIDMapping(msgid []byte) (bucket bufferex.Binary,err error) { var res *sql.Rows res,err = b.DB.Query(` SELECT m.bucket FROM msgidbkt m WHERE m.msgid = $1 ;`,msgid) if err!=nil { return } defer res.Close() if !res.Next() { return } var rbucket sql.RawBytes err = res.Scan(&rbucket) if err!=nil { return } bucket = bufferex.NewBinary(rbucket) return } func (b *Base) QueryGroupShift(group []byte, num int64, backward bool) (nxt int64,msgid bufferex.Binary,err error) { var res *sql.Rows aggr,comp := "min",">" if backward { aggr,comp = "max","<" } res,err = b.DB.Query(` SELECT `+aggr+`(n.mnum) FROM ngrpnumvalue n WHERE n.ngrp = $1 AND n.mnum `+comp+` $2 ;`,group,num) if err!=nil { return } if !res.Next() { res.Close() ; return } err = res.Scan(&nxt) res.Close() if err!=nil { return } res,err = b.DB.Query(` SELECT n.msgid FROM ngrpnumvalue n WHERE n.ngrp = $1 AND n.mnum = $2 ;`,group,nxt) if err!=nil { return } defer res.Close() if !res.Next() { nxt = 0 ; return } var rmsgid sql.RawBytes err = res.Scan(&rmsgid) if err!=nil { return } msgid = bufferex.NewBinary(rmsgid) return } func (b *Base) QueryGroupList(group []byte, first, last int64, targ func(num int64, bucket, msgid bufferex.Binary)) error { row,err := b.DB.Query(` SELECT n.mnum, m.bucket, m.msgid FROM ngrpnumvalue n LEFT OUTER JOIN msgidbkt m ON n.msgid = m.msgid WHERE n.ngrp = $1 AND n.mnum >= $2 AND n.mnum <= $3 ;`,group,first,last) if err!=nil { return err } defer row.Close() var num int64 var bucket,msgid sql.RawBytes scan := []interface{}{&num,&bucket,&msgid} for row.Next() { err := row.Scan(scan...) if err!=nil { return err } targ(num,bufferex.NewBinary(bucket),bufferex.NewBinary(msgid)) } return nil } func (b *Base) groupExpire(group []byte, expire time.Time,errs chan <- error) { tx,e := b.DB.Begin() if e!=nil { errs <- e; return } res,e := tx.Exec(`DELETE FROM ngrpnumvalue n WHERE n.expir <= $1 AND n.ngrp = $1 ;`,expire,group) if e!=nil { errs <- e; return } i,e := res.RowsAffected() if e!=nil { errs <- e; return } /* TODO: there is a better way. */ _,e = tx.Exec(`UPDATE ngrpcnt SET gcount = gcount - $1 WHERE ngrp = $2`,i,group) if e!=nil { errs <- e; return } errs <- tx.Commit() } func (b *Base) Expire(expire time.Time) error { rows,err := b.DB.Query(`SELECT ngrp FROM ngrpcnt`) if err!=nil { return err } var group []byte errs := make(chan error,16) n := 0 for rows.Next() { rows.Scan(group) go b.groupExpire(group,expire,errs) n++ } n++ go func(){ _,e2 := b.DB.Exec(`DELETE FROM msgidbkt m WHERE m.expir <= $1 ;`,expire) errs <- e2 }() for i := 0; i<n; i++ { e := <-errs if e!=nil { err = e } } return err } /* =================================================================================== */ func (b *Base) GroupHeadInsert(groups [][]byte, buf []int64) (nums []int64, e error) { { l := len(groups) if cap(buf)<l { buf = make([]int64,l) } buf = buf[:l] nums = buf } tx,err := b.DB.Begin() if err!=nil { return nil,err } defer func() { if e!=nil { tx.Rollback() return } err := tx.Commit() if err==nil { e = err } }() for i,group := range groups { tx.Exec(` UPDATE ngrpcnt SET latest = latest + 1 WHERE ngrp = $1 ;`,group) e = tx.QueryRow(` SELECT n.latest FROM ngrpcnt n WHERE n.ngrp = $1 ;`,group).Scan(&buf[i]) if e!=nil { return } } return } /* PostgreSQL specific variant/Optimization */ func (b *PgBase) GroupHeadInsert(groups [][]byte, buf []int64) (nums []int64, e error) { { l := len(groups) if cap(buf)<l { buf = make([]int64,l) } buf = buf[:l] nums = buf } for i,group := range groups { e = b.DB.QueryRow(` UPDATE ngrpcnt SET latest = latest + 1 WHERE ngrp = $1 RETURNING latest; `,group).Scan(&buf[i]) if e!=nil { return } } return } func (b *Base) GroupHeadRevert(groups [][]byte, nums []int64) error { for i,group := range groups { b.DB.Exec(` UPDATE ngrpcnt SET latest = latest - 1 WHERE latest = $1, ngrp = $2 ;`,nums[i],group) } return nil } func (b *Base) GroupRealtimeQuery(group []byte) (number int64, low int64, high int64, ok bool) { err := b.DB.QueryRow(` SELECT COALESCE(n.narts,0),COALESCE(n.low,0),COALESCE(n.high,0) FROM ngrpstat n RIGHT JOIN ngrpcnt m on n.ngrp=m.ngrp WHERE m.ngrp = $1 ;`,group).Scan(&number,&low,&high) ok = err==nil if ok { if low==0 && high >0 { low = 1 } if high ==0 { low = 0 } } return } func (b *Base) GroupRealtimeList(targ func(group []byte, high, low int64, status byte)) bool { rows,err := b.DB.Query(` SELECT m.ngrp,COALESCE(n.high,0),COALESCE(n.low,0),m.status FROM ngrpstat n RIGHT JOIN ngrpcnt m on n.ngrp=m.ngrp ;`) if err!=nil { return false } var group sql.RawBytes var high, low int64 var status uint scn := []interface{}{&group,&high,&low,&status} for rows.Next() { err := rows.Scan(scn...) if err==nil { if low==0 && high >0 { low = 1 } if high ==0 { low = 0 } targ(group,high,low,byte(status)) } } return true } func (b *Base) GroupStaticList(targ func(group []byte, descr []byte)) bool { rows,err := b.DB.Query(` SELECT ngrp,descr FROM ngrpstatic ;`) if err!=nil { return false } var group,descr sql.RawBytes scn := []interface{}{&group,&descr} for rows.Next() { if rows.Scan(scn...)==nil { targ(group,descr) } } return true }
package postgres import ( "final-project/model" "fmt" "log" "os" "gorm.io/driver/postgres" "gorm.io/gorm" ) func Connect() *gorm.DB { host := os.Getenv("DB_HOST") port := os.Getenv("DB_PORT") user := os.Getenv("DB_USER") password := os.Getenv("DB_PASSWORD") dbname := os.Getenv("DB_NAME") dsn := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=disable TimeZone=Asia/Jakarta", host, port, user, password, dbname) db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{}) if err != nil { log.Fatal(err) } sqlDB, err := db.DB() if err != nil { log.Fatal(err) } sqlDB.SetMaxOpenConns(100) sqlDB.SetMaxIdleConns(1) db.AutoMigrate(&model.Todo{}) return db }
package j2rpc import ( "bytes" "errors" "fmt" "io" "mime" "net/http" "reflect" "strings" "unsafe" "github.com/atcharles/gof/v2/json" ) // parsePositionalArguments tries to parse the given args to an array of values with the // given types. It returns the parsed values or an error when the args could not be // parsed. Missing optional arguments are returned as reflect.Zero|reflect.New values. func parsePositionalArguments(rawArgs json.RawMessage, types []reflect.Type) ([]reflect.Value, error) { dec := json.NewDecoder(bytes.NewReader(rawArgs)) var args []reflect.Value tok, err := dec.Token() switch { case err == io.EOF || (err == nil && tok == nil): // "params" is optional and may be empty. Also allow "params":null even though it's // not in the spec because our own client used to send it. case err != nil: return nil, err case tok == json.Delim('['): // Read argument array. if args, err = parseArgumentArray(dec, types); err != nil { return nil, err } default: return nil, errors.New("non-array args") } // Set any missing args to nil. for i := len(args); i < len(types); i++ { ctp := types[i] addVal := reflect.Zero(ctp) if ctp.Kind() == reflect.Ptr { //addVal = reflect.New(ctp).Elem() addVal = reflect.New(ctp.Elem()) } args = append(args, addVal) } return args, nil } func parseArgumentArray(dec *json.Decoder, types []reflect.Type) ([]reflect.Value, error) { args := make([]reflect.Value, 0, len(types)) for i := 0; dec.More(); i++ { if i >= len(types) { return args, fmt.Errorf("too many arguments, want at most %d", len(types)) } agv := reflect.New(types[i]) if err := dec.Decode(agv.Interface()); err != nil { return args, fmt.Errorf("invalid argument %d: %v", i, err) } if agv.IsNil() && types[i].Kind() != reflect.Ptr { return args, fmt.Errorf("missing value for required argument %d", i) } args = append(args, agv.Elem()) } // Read end of args array. _, err := dec.Token() return args, err } //------------------------------ [segmentation] -------------------------------- func isErrorType(t reflect.Type) bool { for t.Kind() == reflect.Ptr { t = t.Elem() } return t.Implements(errorType) } func writeError(w http.ResponseWriter, status int, msg string) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") AbortWriteHeader(w, status) _, _ = fmt.Fprint(w, msg) } // AbortWriteHeader ... func AbortWriteHeader(w http.ResponseWriter, code int) { w.WriteHeader(code) w.Header().Set("Status-Written", "1") } // SnakeString converts the accepted string to a snake string (XxYy to xx_yy) func SnakeString(s string) string { data := make([]byte, 0, len(s)*2) j := false for _, d := range StringToBytes(s) { if d >= 'A' && d <= 'Z' { if j { data = append(data, '_') j = false } } else if d != '_' { j = true } data = append(data, d) } return strings.ToLower(BytesToString(data)) } // CamelString converts the accepted string to a camel string (xx_yy to XxYy) func CamelString(s string) string { data := make([]byte, 0, len(s)) j := false k := false num := len(s) - 1 for i := 0; i <= num; i++ { d := s[i] if !k && d >= 'A' && d <= 'Z' { k = true } if d >= 'a' && d <= 'z' && (j || !k) { d = d - 32 j = false k = true } if k && d == '_' && num > i && s[i+1] >= 'a' && s[i+1] <= 'z' { j = true continue } data = append(data, d) } return BytesToString(data[:]) } // StringToBytes convert string type to []byte type. // NOTE: panic if modify the member value of the []byte. func StringToBytes(s string) []byte { sp := *(*[2]uintptr)(unsafe.Pointer(&s)) bp := [3]uintptr{sp[0], sp[1], sp[1]} return *(*[]byte)(unsafe.Pointer(&bp)) } // BytesToString convert []byte type to string type. func BytesToString(b []byte) string { return *(*string)(unsafe.Pointer(&b)) } const ( maxRequestContentLength = 1024 * 1024 * 5 contentType = "application/json" ) // https://www.jsonrpc.org/historical/json-rpc-over-http.html#id13 var acceptedContentTypes = []string{contentType, "application/json-rpc", "application/jsonrequest"} // validateRequest returns a non-zero response code and error message if the // request is invalid. func validateRequest(r *http.Request) (int, error) { if r.Method == http.MethodPut || r.Method == http.MethodDelete { return http.StatusMethodNotAllowed, errors.New("method not allowed") } if r.ContentLength > maxRequestContentLength { err := fmt.Errorf("content length too large (%d>%d)", r.ContentLength, maxRequestContentLength) return http.StatusRequestEntityTooLarge, err } // Allow OPTIONS (regardless of content-type) if r.Method == http.MethodOptions { return 0, nil } // Check content-type if mt, _, err := mime.ParseMediaType(r.Header.Get("content-type")); err == nil { for _, accepted := range acceptedContentTypes { if accepted == mt { return 0, nil } } } // Invalid content-type err := fmt.Errorf("invalid content type, only %s is supported", contentType) return http.StatusUnsupportedMediaType, err }
package main import "testing" func TestSum(t *testing.T) { type test struct { data []int result int testname string } tests := []test{ {data: []int{1, 2, 3}, result: 6, testname: "Test 1"}, {data: []int{1, 2}, result: 3, testname: "Test 2"}, {data: []int{1, 3}, result: 3, testname: "Test 3"}, {data: []int{-1, -2, 3}, result: 0, testname: "Test 4"}, {data: []int{-1, -2, -3}, result: -6, testname: "Test 5"}, } for _, v := range tests { x := sum(v.data...) if x != v.result { t.Errorf("Test: %v Expected %v, got %v", v.testname, v.result, x) } } }
package db import ( "fmt" "strconv" "testing" "time" "github.com/stretchr/testify/assert" "go.mongodb.org/mongo-driver/bson" ) const DB_LINK = "mongodb+srv://ddpoll:ddpoll@test-ycw1l.mongodb.net/test?retryWrites=true&w=majority" const TEST_DB = "test" const TEST_COLLECTION = "testCollection_weifeng" const POLL_DB = "Polls" func TestBasicDB(t *testing.T) { db, err := initializeTestEnv() defer db.Disconnect() ctx, cancel := db.QueryContext() defer cancel() collection := db.Client.Database(TEST_DB).Collection(TEST_COLLECTION) _, err = collection.InsertOne(ctx, bson.M{"name": "pi", "value": 3.14159, "desc": "I ate that pie yesterday!"}) assert.Nil(t, err) ctx, cancel2 := db.QueryContext() defer cancel2() var result struct { Name string Value float64 } singRes := collection.FindOne(ctx, bson.M{"name": "pi", "value": 3.14159}) singRes.Decode(&result) assert.Equal(t, result.Name, "pi") assert.Equal(t, result.Value, 3.14159) } func TestUserDB(t *testing.T) { db, err := initializeTestEnv() defer db.Disconnect() _, cancel := db.QueryContext() defer cancel() usersDB := db.ToUserDB(TEST_DB, TEST_COLLECTION, "") id, err := usersDB.CreateNewUser("even2", "666") assert.Nil(t, err) u, err := usersDB.GetUserByID(id) assert.Equal(t, id, u.UID) assert.Equal(t, "even2", u.Name) assert.Nil(t, err) } func TestPollsDB(t *testing.T) { db, err := initializeTestEnv() defer db.Disconnect() assert.Nil(t, err) pollsDB := db.ToPollsDB(TEST_DB, TEST_COLLECTION, "") id, err := pollsDB.CreatePoll("miska", "example poll", "vote for dinner", "Life Style", true, time.Hour, []string{"Chicken", "Rice"}) fmt.Println(id) assert.Nil(t, err) // Find the poll with the id p, err := pollsDB.GetPollByPID(id) assert.Nil(t, err) assert.Equal(t, p.PID, id) assert.Equal(t, p.Owner, "miska") assert.Equal(t, p.Choices, []string{"Chicken", "Rice"}) assert.Equal(t, p.Votes, []uint64{0, 0}) assert.Equal(t, 2, len(p.Votes)) // Find the poll with invalid id p, err = pollsDB.GetPollByPID("") assert.NotNil(t, err) res, err := pollsDB.GetPollsByUser("miska") assert.Nil(t, err) assert.Equal(t, id, (<-res).PID) _, ok := <-res assert.False(t, ok) } func TestPollsDBNewstPolls(t *testing.T) { db, err := initializeTestEnv() defer db.Disconnect() assert.Nil(t, err) pollsDB := db.ToPollsDB(TEST_DB, TEST_COLLECTION, "") ids := make([]string, 10) for i := 0; i < 10; i++ { id, err := pollsDB.CreatePoll("miska", strconv.Itoa(i), "vote for dinner", "Life Style", true, time.Hour, []string{"Chicken", "Rice"}) assert.Nil(t, err) ids[i] = id } ch, err := pollsDB.GetNewestPolls(10) assert.Nil(t, err) for i := 9; i >= 0; i-- { val, ok := <-ch assert.True(t, ok) assert.Equal(t, val.PID, ids[i]) assert.Equal(t, strconv.Itoa(i), val.Title) assert.Equal(t, "miska", val.Owner) } _, ok := <-ch assert.False(t, ok) } func initializeTestEnv() (db *DB, err error) { db, err = Dial(DB_LINK, 2*time.Second, 5*time.Second) err = wipeDatabase(db) return } func wipeDatabase(db *DB) error { ctx, cancel := db.QueryContextEx(5 * time.Second) defer cancel() _, err := db.Client.Database(TEST_DB).Collection(TEST_COLLECTION).DeleteMany(ctx, bson.M{}) return err }
package schemes import "image/color" // AlphaFire is a gradient color scheme from transparent through red // to yellow then white. var AlphaFire []color.Color func init() { AlphaFire = []color.Color{ color.NRGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xfa, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xf5, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xf0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xeb, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xe6, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xe1, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xdc, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xd7, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xd1, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xcc, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xc7, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xc2, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xbd, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xb8, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xb3, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xae, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xa8, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xa3, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x9e, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x99, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x94, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x8f, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x8a, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x85, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x7f, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x7a, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x75, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x70, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x6b, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x66, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x61, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x5c, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x57, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x51, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x4c, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x47, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x42, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x3d, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x38, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x33, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x2e, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x28, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x23, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x1e, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x19, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x14, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xf, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0xa, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x5, A: 0xff}, color.NRGBA{R: 0xff, G: 0xff, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xfb, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xf7, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xf3, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xee, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xea, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xe6, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xe2, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xdd, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xd9, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xd5, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xd1, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xcc, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xc8, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xc4, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xbf, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xbb, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xb7, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xb3, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xae, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xaa, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xa6, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xa2, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x9d, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x99, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x95, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x91, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x8c, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x88, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x84, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x7f, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x7b, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x77, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x73, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x6e, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x6a, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x66, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x62, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x5d, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x59, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x55, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x51, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x4c, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x48, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x44, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x3f, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x3b, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x37, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x33, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x2e, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x2a, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x26, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x22, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x1d, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x19, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x15, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x11, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0xc, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x8, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x4, B: 0x0, A: 0xff}, color.NRGBA{R: 0xff, G: 0x0, B: 0x0, A: 0xff}, color.NRGBA{R: 0xfe, G: 0x1, B: 0x1, A: 0xff}, color.NRGBA{R: 0xfd, G: 0x2, B: 0x2, A: 0xff}, color.NRGBA{R: 0xfc, G: 0x3, B: 0x3, A: 0xfe}, color.NRGBA{R: 0xfb, G: 0x4, B: 0x4, A: 0xfe}, color.NRGBA{R: 0xf9, G: 0x5, B: 0x5, A: 0xfe}, color.NRGBA{R: 0xf9, G: 0x6, B: 0x6, A: 0xfd}, color.NRGBA{R: 0xf7, G: 0x7, B: 0x7, A: 0xfd}, color.NRGBA{R: 0xf6, G: 0x8, B: 0x8, A: 0xfd}, color.NRGBA{R: 0xf5, G: 0x9, B: 0x9, A: 0xfc}, color.NRGBA{R: 0xf4, G: 0xb, B: 0xb, A: 0xfc}, color.NRGBA{R: 0xf3, G: 0xc, B: 0xc, A: 0xfc}, color.NRGBA{R: 0xf2, G: 0xd, B: 0xd, A: 0xfb}, color.NRGBA{R: 0xf1, G: 0xe, B: 0xe, A: 0xfb}, color.NRGBA{R: 0xef, G: 0xf, B: 0xf, A: 0xfb}, color.NRGBA{R: 0xef, G: 0x10, B: 0x10, A: 0xfa}, color.NRGBA{R: 0xed, G: 0x11, B: 0x11, A: 0xfa}, color.NRGBA{R: 0xec, G: 0x12, B: 0x12, A: 0xfa}, color.NRGBA{R: 0xeb, G: 0x13, B: 0x13, A: 0xf9}, color.NRGBA{R: 0xea, G: 0x15, B: 0x15, A: 0xf9}, color.NRGBA{R: 0xe9, G: 0x16, B: 0x16, A: 0xf8}, color.NRGBA{R: 0xe8, G: 0x17, B: 0x17, A: 0xf8}, color.NRGBA{R: 0xe7, G: 0x18, B: 0x18, A: 0xf8}, color.NRGBA{R: 0xe6, G: 0x19, B: 0x19, A: 0xf7}, color.NRGBA{R: 0xe5, G: 0x1a, B: 0x1a, A: 0xf7}, color.NRGBA{R: 0xe2, G: 0x1b, B: 0x1b, A: 0xf7}, color.NRGBA{R: 0xe2, G: 0x1d, B: 0x1d, A: 0xf6}, color.NRGBA{R: 0xe0, G: 0x1e, B: 0x1e, A: 0xf6}, color.NRGBA{R: 0xdf, G: 0x20, B: 0x20, A: 0xf6}, color.NRGBA{R: 0xde, G: 0x21, B: 0x21, A: 0xf5}, color.NRGBA{R: 0xdd, G: 0x22, B: 0x22, A: 0xf5}, color.NRGBA{R: 0xdc, G: 0x23, B: 0x23, A: 0xf5}, color.NRGBA{R: 0xdb, G: 0x24, B: 0x24, A: 0xf4}, color.NRGBA{R: 0xda, G: 0x25, B: 0x25, A: 0xf4}, color.NRGBA{R: 0xd8, G: 0x26, B: 0x26, A: 0xf4}, color.NRGBA{R: 0xd7, G: 0x28, B: 0x28, A: 0xf3}, color.NRGBA{R: 0xd5, G: 0x29, B: 0x29, A: 0xf3}, color.NRGBA{R: 0xd5, G: 0x2b, B: 0x2b, A: 0xf2}, color.NRGBA{R: 0xd3, G: 0x2c, B: 0x2c, A: 0xf2}, color.NRGBA{R: 0xd2, G: 0x2d, B: 0x2d, A: 0xf2}, color.NRGBA{R: 0xd1, G: 0x2e, B: 0x2e, A: 0xf1}, color.NRGBA{R: 0xd0, G: 0x2f, B: 0x2f, A: 0xf1}, color.NRGBA{R: 0xcf, G: 0x30, B: 0x30, A: 0xf1}, color.NRGBA{R: 0xcd, G: 0x32, B: 0x32, A: 0xf0}, color.NRGBA{R: 0xcc, G: 0x33, B: 0x33, A: 0xf0}, color.NRGBA{R: 0xca, G: 0x34, B: 0x34, A: 0xf0}, color.NRGBA{R: 0xca, G: 0x35, B: 0x35, A: 0xef}, color.NRGBA{R: 0xc8, G: 0x37, B: 0x37, A: 0xef}, color.NRGBA{R: 0xc7, G: 0x38, B: 0x38, A: 0xef}, color.NRGBA{R: 0xc5, G: 0x3a, B: 0x3a, A: 0xee}, color.NRGBA{R: 0xc4, G: 0x3b, B: 0x3b, A: 0xee}, color.NRGBA{R: 0xc2, G: 0x3c, B: 0x3c, A: 0xee}, color.NRGBA{R: 0xc2, G: 0x3d, B: 0x3d, A: 0xed}, color.NRGBA{R: 0xc1, G: 0x3e, B: 0x3e, A: 0xed}, color.NRGBA{R: 0xbf, G: 0x3f, B: 0x3f, A: 0xed}, color.NRGBA{R: 0xbe, G: 0x41, B: 0x41, A: 0xec}, color.NRGBA{R: 0xbc, G: 0x43, B: 0x43, A: 0xec}, color.NRGBA{R: 0xbc, G: 0x44, B: 0x44, A: 0xeb}, color.NRGBA{R: 0xba, G: 0x45, B: 0x45, A: 0xeb}, color.NRGBA{R: 0xb9, G: 0x46, B: 0x46, A: 0xeb}, color.NRGBA{R: 0xb7, G: 0x48, B: 0x48, A: 0xea}, color.NRGBA{R: 0xb6, G: 0x49, B: 0x49, A: 0xea}, color.NRGBA{R: 0xb5, G: 0x4a, B: 0x4a, A: 0xea}, color.NRGBA{R: 0xb4, G: 0x4b, B: 0x4b, A: 0xe9}, color.NRGBA{R: 0xb3, G: 0x4c, B: 0x4c, A: 0xe9}, color.NRGBA{R: 0xb0, G: 0x4f, B: 0x4f, A: 0xe9}, color.NRGBA{R: 0xb0, G: 0x50, B: 0x50, A: 0xe8}, color.NRGBA{R: 0xae, G: 0x51, B: 0x51, A: 0xe8}, color.NRGBA{R: 0xad, G: 0x52, B: 0x52, A: 0xe8}, color.NRGBA{R: 0xab, G: 0x54, B: 0x54, A: 0xe7}, color.NRGBA{R: 0xaa, G: 0x55, B: 0x55, A: 0xe7}, color.NRGBA{R: 0xa8, G: 0x56, B: 0x56, A: 0xe7}, color.NRGBA{R: 0xa8, G: 0x57, B: 0x57, A: 0xe6}, color.NRGBA{R: 0xa6, G: 0x59, B: 0x59, A: 0xe6}, color.NRGBA{R: 0xa5, G: 0x5b, B: 0x5b, A: 0xe5}, color.NRGBA{R: 0xa4, G: 0x5c, B: 0x5c, A: 0xe5}, color.NRGBA{R: 0xa2, G: 0x5d, B: 0x5d, A: 0xe5}, color.NRGBA{R: 0xa1, G: 0x5f, B: 0x5f, A: 0xe4}, color.NRGBA{R: 0x9f, G: 0x60, B: 0x60, A: 0xe4}, color.NRGBA{R: 0x9e, G: 0x61, B: 0x61, A: 0xe4}, color.NRGBA{R: 0x9c, G: 0x63, B: 0x63, A: 0xe3}, color.NRGBA{R: 0x9b, G: 0x64, B: 0x64, A: 0xe3}, color.NRGBA{R: 0x99, G: 0x65, B: 0x65, A: 0xe3}, color.NRGBA{R: 0x98, G: 0x68, B: 0x68, A: 0xe2}, color.NRGBA{R: 0x97, G: 0x69, B: 0x69, A: 0xe2}, color.NRGBA{R: 0x95, G: 0x6a, B: 0x6a, A: 0xe2}, color.NRGBA{R: 0x95, G: 0x6c, B: 0x6c, A: 0xe1}, color.NRGBA{R: 0x92, G: 0x6d, B: 0x6d, A: 0xe1}, color.NRGBA{R: 0x91, G: 0x6e, B: 0x6e, A: 0xe1}, color.NRGBA{R: 0x8f, G: 0x6f, B: 0x6f, A: 0xe0}, color.NRGBA{R: 0x8e, G: 0x71, B: 0x71, A: 0xe0}, color.NRGBA{R: 0x8c, G: 0x72, B: 0x72, A: 0xe0}, color.NRGBA{R: 0x8c, G: 0x73, B: 0x73, A: 0xdf}, color.NRGBA{R: 0x8a, G: 0x76, B: 0x76, A: 0xdf}, color.NRGBA{R: 0x89, G: 0x77, B: 0x77, A: 0xde}, color.NRGBA{R: 0x88, G: 0x79, B: 0x79, A: 0xde}, color.NRGBA{R: 0x85, G: 0x7a, B: 0x7a, A: 0xde}, color.NRGBA{R: 0x85, G: 0x7b, B: 0x7b, A: 0xdd}, color.NRGBA{R: 0x82, G: 0x7d, B: 0x7d, A: 0xdd}, color.NRGBA{R: 0x81, G: 0x7e, B: 0x7e, A: 0xdd}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xdc}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xd8}, color.NRGBA{R: 0x80, G: 0x80, B: 0x80, A: 0xd3}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xce}, color.NRGBA{R: 0x80, G: 0x80, B: 0x80, A: 0xc9}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xc4}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xc0}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xbb}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xb6}, color.NRGBA{R: 0x80, G: 0x80, B: 0x80, A: 0xb1}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xac}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xa8}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xa3}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x9e}, color.NRGBA{R: 0x80, G: 0x80, B: 0x80, A: 0x99}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x94}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x90}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x8b}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x86}, color.NRGBA{R: 0x80, G: 0x80, B: 0x80, A: 0x81}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x7c}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x78}, color.NRGBA{R: 0x7e, G: 0x7e, B: 0x7e, A: 0x73}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x6e}, color.NRGBA{R: 0x81, G: 0x81, B: 0x81, A: 0x69}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x64}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x60}, color.NRGBA{R: 0x7e, G: 0x7e, B: 0x7e, A: 0x5b}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x56}, color.NRGBA{R: 0x7e, G: 0x7e, B: 0x7e, A: 0x51}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x4c}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x48}, color.NRGBA{R: 0x7e, G: 0x7e, B: 0x7e, A: 0x43}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x3e}, color.NRGBA{R: 0x7d, G: 0x7d, B: 0x7d, A: 0x39}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x34}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x30}, color.NRGBA{R: 0x7d, G: 0x7d, B: 0x7d, A: 0x2b}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x26}, color.NRGBA{R: 0x7c, G: 0x7c, B: 0x7c, A: 0x21}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x1c}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x18}, color.NRGBA{R: 0x79, G: 0x79, B: 0x79, A: 0x13}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0xe}, color.NRGBA{R: 0x71, G: 0x71, B: 0x71, A: 0x9}, color.NRGBA{R: 0x7f, G: 0x7f, B: 0x7f, A: 0x4}, } }
package main import "fmt" type person struct { name string } func main() { p := person{name: "Amit"} updateStruct(&p) fmt.Println(p) //var p1 *person = &p //p1 := &p //p1.name = "Raj" } func updateStruct(p1 *person) { p1.name = "raj" }
package yousign import ( "encoding/json" "net/http" "time" ) type ProceduresService struct { client *Client } type Procedure struct { ID *string `json:"id,omitempty"` Name *string `json:"name,omitempty"` Description *string `json:"description,omitempty"` CreatedAt *time.Time `json:"createdAt,omitempty"` UpdatedAt *time.Time `json:"updatedAt,omitempty"` ExpiresAt *time.Time `json:"expiresAt,omitempty"` Status *string `json:"status,omitempty"` Creator *string `json:"creator,omitempty"` CreatorFirstName *string `json:"creatorFirstName,omitempty"` CreatorLastName *string `json:"creatorLastName,omitempty"` Company *string `json:"company,omitempty"` Template *bool `json:"template,omitempty"` Ordered *bool `json:"ordered,omitempty"` Parent *string `json:"parent,omitempty"` Metadata json.RawMessage `json:"metadata,omitempty"` Config json.RawMessage `json:"config,omitempty"` Members []Member `json:"members,omitempty"` Files []File `json:"files,omitempty"` RelatedFilesEnable *bool `json:"relatedFilesEnable,omitempty"` Archive *bool `json:"archive,omitempty"` } type ProcedureConfig struct { Email ProcedureConfigEmail `json:"email,omitempty"` Reminders []ProcedureConfigReminder `json:"reminders,omitempty"` Webhook ProcedureConfigWebhook `json:"webhook,omitempty"` } type ProcedureConfigEmail struct { ProcedureStarted []Msg `json:"procedure.started,omitempty"` ProcedureFinished []Msg `json:"procedure.finished,omitempty"` ProcedureRefused []Msg `json:"procedure.refused,omitempty"` ProcedureExpired []Msg `json:"procedure.expired,omitempty"` ProcedureDeleted []Msg `json:"procedure.deleted,omitempty"` MemberStarted []Msg `json:"member.started,omitempty"` MemberFinished []Msg `json:"member.finished,omitempty"` CommentCreated []Msg `json:"comment.created,omitempty"` } type ProcedureConfigReminder struct { Interval *int `json:"interval,omitempty"` Limit *int `json:"limit,omitempty"` Config ReminderConfig `json:"config,omitempty"` } type ReminderConfig struct { Email ReminderConfigEmail `json:"email,omitempty"` } type ReminderConfigEmail struct { ReminderExecuted []Msg `json:"reminder.executed,omitempty"` } type ProcedureConfigWebhook struct { ProcedureStarted []Webhook `json:"procedure.started,omitempty"` ProcedureFinished []Webhook `json:"procedure.finished,omitempty"` ProcedureRefused []Webhook `json:"procedure.refused,omitempty"` ProcedureExpired []Webhook `json:"procedure.expired,omitempty"` ProcedureDeleted []Webhook `json:"procedure.deleted,omitempty"` MemberStarted []Webhook `json:"member.started,omitempty"` MemberFinished []Webhook `json:"member.finished,omitempty"` CommentCreated []Webhook `json:"comment.created,omitempty"` } type Msg struct { To []string `json:"to,omitempty"` Subject *string `json:"subject,omitempty"` Message *string `json:"message,omitempty"` FromName *string `json:"fromName,omitempty"` } type Webhook struct { URL *string `json:"url,omitempty"` Method *string `json:"method,omitempty"` Headers *WebhookHeader `json:"headers,omitempty"` } type WebhookHeader struct { XYousignCustomHeader string `json:"X-Yousign-Custom-Header,omitempty"` } type ProcedureRequest struct { Name *string `json:"name,omitempty"` Description *string `json:"description,omitempty"` ExpiresAt *string `json:"expiresAt,omitempty"` Template *bool `json:"template,omitempty"` Ordered *bool `json:"ordered,omitempty"` Metadata map[string]string `json:"metadata,omitempty"` Config *ProcedureConfig `json:"config,omitempty"` Members []MemberRequest `json:"members,omitempty"` Start *bool `json:"start,omitempty"` RelatedFilesEnable *bool `json:"relatedFilesEnable,omitempty"` Archive *bool `json:"archive,omitempty"` } func (s *ProceduresService) Create(r *ProcedureRequest) (*Procedure, *http.Response, error) { req, err := s.client.NewRequest("POST", "procedures", nil, r) if err != nil { return nil, nil, err } var v Procedure resp, err := s.client.Do(req, &v) return &v, resp, err } func (s *ProceduresService) Get(id string) (*Procedure, *http.Response, error) { req, err := s.client.NewRequest("GET", id, nil, nil) if err != nil { return nil, nil, err } var v Procedure resp, err := s.client.Do(req, &v) return &v, resp, err } func (s *ProceduresService) Update(id string, r *ProcedureRequest) (*Procedure, *http.Response, error) { req, err := s.client.NewRequest("PUT", id, nil, r) if err != nil { return nil, nil, err } var v Procedure resp, err := s.client.Do(req, &v) return &v, resp, err }
package server import ( "testing" "github.com/eclipse/paho.mqtt.golang/packets" "github.com/htdvisser/squatt/session" "github.com/htdvisser/squatt/topic" . "github.com/smartystreets/goconvey/convey" ) func TestSubscription(t *testing.T) { Convey(`Given a Subcription`, t, func() { fooSession := session.NewSession("foo") fooTopic := topic.NewTopic("foo") s := NewSubscription(fooSession, fooTopic, 1) ch := make(chan packets.ControlPacket, 1) fooSession.Connect(ch) Convey(`When delivering a message`, func() { msg := new(packets.PublishPacket) msg.Qos = 2 s.Deliver(msg) Convey(`Then the message should be delivered to the session`, func() { So(ch, ShouldNotBeEmpty) }) Convey(`Then the QoS should be downgraded to 1`, func() { So((<-ch).(*packets.PublishPacket).Qos, ShouldEqual, 1) }) }) }) Convey(`Testing server subscriptions`, t, func() { s := NewServer() fooSession := session.NewSession("foo") fooTopic := topic.NewTopic("foo") fooSub := s.Subscribe(fooSession, fooTopic, 1) So(s.TopicSubscriptions(fooTopic), ShouldContain, fooSub) So(s.SessionSubscriptions(fooSession), ShouldContain, fooSub) s.Subscribe(fooSession, fooTopic, 2) So(fooSub.qos.Load(), ShouldEqual, 2) barTopic := topic.NewTopic("bar") barSession := session.NewSession("bar") s.Unsubscribe(barSession, barTopic) s.Subscribe(barSession, barTopic, 0) barSub := s.Subscribe(fooSession, barTopic, 2) So(s.TopicSubscriptions(fooTopic), ShouldNotContain, barSub) So(s.TopicSubscriptions(barTopic), ShouldContain, barSub) So(s.SessionSubscriptions(fooSession), ShouldContain, barSub) s.Unsubscribe(fooSession, fooTopic) So(s.TopicSubscriptions(fooTopic), ShouldNotContain, fooSub) So(s.SessionSubscriptions(fooSession), ShouldNotContain, fooSub) s.Unsubscribe(fooSession) s.Unsubscribe(fooSession, barTopic) }) }
package vaku import "fmt" // PathUpdate takes in a path with existing data and new data to write to that path. // It then merges the data at the existing path with the new data, with precedence given // to the new data, and writes the merged data back to Vault func (c *Client) PathUpdate(i *PathInput, d map[string]interface{}) error { var err error // Get old data read, err := c.PathRead(i) if err != nil { return fmt.Errorf("failed to read data at path %s. PathUpdate only works on existing data: %w", i.Path, err) } // Generate the new data to write for k, v := range d { read[k] = v } // Write the updated data back to vault err = c.PathWrite(i, read) if err != nil { return fmt.Errorf("failed to write updated data back to %s: %w", i.opPath, err) } return err }
package cmd import ( "context" "fmt" "io" "io/ioutil" "os" "path/filepath" "regexp" "sync" "time" "github.com/briandowns/spinner" "github.com/giantswarm/microerror" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "k8s.io/apiserver/pkg/apis/audit" "github.com/corest/k8s-resource-lifecycle/pkg/metaresource" "github.com/corest/k8s-resource-lifecycle/pkg/project" ) type runner struct { flag *flag logger *log.Logger stdout io.Writer stderr io.Writer } func (r *runner) PersistentPreRun(cmd *cobra.Command, args []string) error { fmt.Printf("Version = %#q\n", project.Version()) fmt.Printf("Git SHA = %#q\n", project.GitSHA()) fmt.Printf("Command = %#q\n", cmd.Name()) fmt.Println() return nil } func (r *runner) Run(cmd *cobra.Command, args []string) error { ctx := context.Background() err := r.flag.Validate() if err != nil { return microerror.Mask(err) } err = r.run(ctx, cmd, args) if err != nil { return microerror.Mask(err) } return nil } func (r *runner) run(ctx context.Context, cmd *cobra.Command, args []string) error { var err error s := spinner.New(spinner.CharSets[35], 100*time.Millisecond) var auditLogFiles []string { s.Prefix = "Searching for audit log files " s.Start() auditLogFiles, err = auditLogFilesSearch(r.flag.AuditLogPath, r.flag.SearchPattern, r.flag.RecursiveSearch) if err != nil { return microerror.Mask(err) } s.Stop() } fmt.Printf("Looking for resource events:\n\t- kind: %#q\n\t- name: %#q\n\t- apiGroup: %#q\n\n", r.flag.ResourceKind, r.flag.ResourceName, r.flag.ResourceAPIGroup) s.Prefix = "Processing log files " s.Start() // write constructor for this metaResource := metaresource.MetaResource{ Kind: r.flag.ResourceKind, Name: r.flag.ResourceName, Namespace: r.flag.ResourceNamespace, APIGroup: r.flag.ResourceAPIGroup, } var wg sync.WaitGroup storeCh := make(chan audit.Event) errCh := make(chan error) for _, f := range auditLogFiles { wg.Add(1) go metaResource.FindEvents(f, storeCh, errCh, &wg) } var errors []error go func() { for { select { case event := <-storeCh: metaResource.StoreEvent(event) case err := <-errCh: errors = append(errors, err) } } }() wg.Wait() s.Stop() for _, err := range errors { fmt.Println("error") return err } if len(metaResource.Events) == 0 { fmt.Println("No events found") return nil } metaResource.SortEvents() for _, event := range metaResource.Events { fmt.Printf("Found %#q resource event (%s)\n", event.Verb, event.StageTimestamp.String()) } return nil } func auditLogFilesSearch(path, searchPattern string, recursive bool) ([]string, error) { var result []string var err error searchRegexp, err := regexp.Compile(searchPattern) if err != nil { return nil, microerror.Mask(err) } if recursive { err = filepath.Walk(path, func(path string, file os.FileInfo, err error) error { if err == nil && searchRegexp.MatchString(file.Name()) { result = append(result, path) } return nil }) if err != nil { return nil, microerror.Mask(err) } return result, nil } files, err := ioutil.ReadDir(path) if err != nil { return nil, microerror.Mask(err) } for _, file := range files { if searchRegexp.MatchString(file.Name()) { result = append(result, filepath.Join(path, file.Name())) } } return result, nil }
package model import ( "database/sql" //"github.com/CourseComment/conf" _ "github.com/go-sql-driver/mysql" //"os" //"time" ) // var ( // db *sql.DB // ) // func init() { // db = conf.DB // } type Course struct { Id idtype Name string Number string Professors []Professor } func GetCourse(key string, value interface{}) *Course { var rows *sql.Rows res := new(Course) switch key { case "Id": id := value.(idtype) //extract from the database rows, _ = db.Query("select id, name, number from course where id=?", id) case "Name": name := value.(string) //extract from the database rows, _ = db.Query("select id, name, number from course where name=?", name) case "Number": number := value.(string) //extract from the database rows, _ = db.Query("select id, name, number from course where number=?", number) } if rows.Next() { rows.Scan(&res.Id, &res.Name, &res.Number) } return res } func (c *Course) GetProfessorOfThisCourse() { c.Professors = make([]Professor, 0) rows, _ := db.Query("select professor_id from lecture where cource_id=?", c.Id) for rows.Next() { var id idtype rows.Scan(&id) tmp := GetProfessor("Id", id) c.Professors = append(c.Professors, *tmp) } }
package main import "fmt" // fibonacci is a function that returns // a function that returns an int. func fibonacci() func() int { var fibonacci int = 0 var fn1 int = 1 var fn2 int = 0 return func() int { if fibonacci == 0 { fibonacci = fn1 return fn2 } else if fibonacci == 1 && fn2 != 0 { fibonacci = fn1 fibonacci++ return fn1 } else { var value int = fn1 + fn2 fn2 = fn1 fibonacci = value fn1 = fibonacci return fibonacci } } } func main() { f := fibonacci() for i := 0; i < 10; i++ { fmt.Println(f()) } }
package config import ( "github.com/Azer0s/quacktors/logging" ) var logger logging.Logger var qpmdPort uint16 func init() { logger = &logging.LogrusLogger{} logger.Init() qpmdPort = 7161 }
package main import ( "fmt" "sort" ) type IntegerSlice []int func (mss IntegerSlice) Len() int { return len(mss) } // HL func (mss IntegerSlice) Less(i, j int) bool { return mss[i] < mss[j] } // HL func (mss IntegerSlice) Swap(i, j int) { mss[i], mss[j] = mss[j], mss[i] } // HL func main() { ints := []int{6, 3, 4, 5, 1, 0, 2} sort.Sort(IntegerSlice(ints)) // HL fmt.Println(ints) }
package cache type Cache interface { Remove(key string) Get(key string) (value Value, ok bool) Add(key string, value Value) Keys() []string }
package types import ( "github.com/google/uuid" "github.com/jinzhu/gorm" // HOFSTADTER_START import // HOFSTADTER_END import ) /* Name: AuthBasicUser-db-type-funcs About: */ // HOFSTADTER_START start // HOFSTADTER_END start /* Migrates (or creates) the table for AuthBasicUser */ func MigrateAuthBasicUserTable(db *gorm.DB) (err error) { // create own table err = db.Debug().AutoMigrate(&AuthBasicUser{}).Error // create index on uuid db.Model(&AuthBasicUser{}).Debug().AddUniqueIndex("idx_auth_basic_user_uuid", "uuid") // deal with foreign keys db.Model(&AuthBasicUser{}).Debug().AddForeignKey("user_id", "users(id)", "RESTRICT", "RESTRICT") return } /* GORM hook to ensure UUID is created */ func (T *AuthBasicUser) BeforeCreate(scope *gorm.Scope) error { T.UUID = uuid.New().String() return nil } /* Creates a AuthBasicUser record. */ func CreateAuthBasicUser(db *gorm.DB, T *AuthBasicUser, UserUUID string) (err error) { // user-defined // it's not a view var User User err = db. Where("user_uuid = ?", UserUUID). First(User).Error if err != nil { return err } T.UserID = User.ID // other relations? (has-one, has-many, many-to-many) err = db.Create(T).Error return } /* Find a AuthBasicUser record by ID */ func GetAuthBasicUserByID(db *gorm.DB, T *AuthBasicUser, UserUUID string) (err error) { err = db. Where("user_uuid = ?", UserUUID). First(T).Error return } /* Find a AuthBasicUser record by UUID */ func GetAuthBasicUserByUUID(db *gorm.DB, UUID string) (T *AuthBasicUser, err error) { T = NewAuthBasicUser() err = db. Where("uuid = ?", UUID). First(T).Error return } /* Updates a AuthBasicUser record */ func UpdateAuthBasicUser(db *gorm.DB, T *AuthBasicUser) (err error) { err = db.Update(T).Error return } /* Soft deletes the record (i.e. it has a deleted at timestamp and is not returned in queries) */ func DeleteAuthBasicUser(db *gorm.DB, T *AuthBasicUser) (err error) { err = db.Delete(T).Error return } /* Hard deletes the record, permenently deleting it from the DB */ func HardDeleteAuthBasicUser(db *gorm.DB, T *AuthBasicUser) (err error) { err = db.Unscoped().Delete(T).Error return } // HOFSTADTER_BELOW
package muxcodec import ( mc "gx/ipfs/QmYMiyZRYDmhMr2phMc4FGrYbsyzvR751BgeobnWroiq2z/go-multicodec" cbor "gx/ipfs/QmYMiyZRYDmhMr2phMc4FGrYbsyzvR751BgeobnWroiq2z/go-multicodec/cbor" json "gx/ipfs/QmYMiyZRYDmhMr2phMc4FGrYbsyzvR751BgeobnWroiq2z/go-multicodec/json" ) func StandardMux() *Multicodec { return MuxMulticodec([]mc.Multicodec{ cbor.Multicodec(), json.Multicodec(false), json.Multicodec(true), }, SelectFirst) }
// Source : https://oj.leetcode.com/problems/anagrams/ // Author : Austin Vern Songer // Date : 2016-06-04 /********************************************************************************** * * Given an array of strings, group anagrams together. * * For example, given: ["eat", "tea", "tan", "ate", "nat", "bat"], * Return: * * [ * ["ate", "eat","tea"], * ["nat","tan"], * ["bat"] * ] * * Note: * * For the return value, each inner list's elements must follow the lexicographic order. * All inputs will be in lower-case. * **********************************************************************************/ package main import ( "fmt" "sort" ) type sortRunes []rune func (s sortRunes) Less(i, j int) bool { return s[i] < s[j] } func (s sortRunes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortRunes) Len() int { return len(s) } func SortString(s string) string { r := []rune(s) sort.Sort(sortRunes(r)) return string(r) } func groupAnagrams(strs []string) [][]string { strlen := len(strs) result := make([][]string, strlen) m := make(map[string]int) var i, j int for i, j = 0, 0; i < strlen; i++ { word := strs[i] sortword := SortString(word) if val, ok := m[sortword]; ok { result[val] = append(result[val], word) } else { m[sortword] = j result[j] = append(result[j], word) j++ } } return result[0:j] } func main() { strs := []string{"eat", "tea", "tan", "ate", "nat", "bat"} fmt.Println(groupAnagrams(strs)) }
package helper func SafeMap(m map[string]interface{}, key string, def interface{}) interface{} { var outp interface{} = def switch def.(type) { case string: if val, ok := m[key].(string); ok { outp = val } case int: if val, ok := m[key].(int); ok { outp = val } case int64: if val, ok := m[key].(int64); ok { outp = val } case []interface{}: if val, ok := m[key].([]interface{}); ok { outp = val } } return outp }
package business_hours import ( "regexp" "strconv" "strings" ) var ( // 24:00未満である必要がある openTimeValidater = regexp.MustCompile(`^([0-1]?\d|2[0-3]):[0-5]\d$`) // 終了時間は24:00を超える表記を許容する closeTimeValidater = regexp.MustCompile(`^[0-2]?\d:[0-5]\d$`) ) func IsValidBusinessHours(hours []string) bool { var lastClose string for i := 0; i < len(hours); i = i + 2 { open, close := hours[i], hours[i+1] if open == "" && close == "" { break } ret := isValidBusinessHourPair(open, close) if ret == false { return false } if lastClose != "" { ret := isValidBusinessHourOrder(lastClose, open) if ret == false { return false } } lastClose = close } return true } func isValidBusinessHourPair(open, close string) bool { ret := true if open == "" { ret = closeTimeValidater.MatchString(close) } else if close == "" { ret = openTimeValidater.MatchString(open) } else if open != "" && close != "" { ret = openTimeValidater.MatchString(open) && closeTimeValidater.MatchString(close) && isValidBusinessHourOrder(open, close) } return ret } func isValidBusinessHourOrder(first, second string) bool { firstVal, _ := strconv.Atoi(strings.Replace(first, ":", "", -1)) secondVal, _ := strconv.Atoi(strings.Replace(second, ":", "", -1)) return firstVal < secondVal }
package uinput import ( "fmt" "io" "os" "time" "unsafe" ) // Joystick interface type Joystick interface { BtnDown(btn uint16) error BtnUp(btn uint16) error LeftStickX(x int32) error LeftStickY(y int32) error RightStickX(x int32) error RightStickY(y int32) error io.Closer } type vJoystick struct { devFile *os.File } func setupJoystick(devFile *os.File, minX int32, maxX int32, minY int32, maxY int32, flat int32, fuzz int32) error { var uinp uinputUserDev buttons := []uint16{ BtnSouth, BtnEast, BtnNorth, BtnWest, BtnTL, BtnTR, BtnTL2, BtnTR2, BtnSelect, BtnStart, BtnMode, BtnThumbL, BtnThumbR, BtnDpadUp, BtnDpadDown, BtnDpadLeft, BtnDpadRight, } hats := []uint16{ AbsHat0X, AbsHat0Y, } // TODO: add possibility to change these values uinp.Name = uinputSetupNameToBytes([]byte("GoUinputDevice")) uinp.ID.BusType = BusVirtual uinp.ID.Vendor = 1 uinp.ID.Product = 2 uinp.ID.Version = 3 // Sticks uinp.AbsMin[AbsX] = minX uinp.AbsMax[AbsX] = maxX uinp.AbsFuzz[AbsX] = fuzz uinp.AbsFlat[AbsX] = flat uinp.AbsMin[AbsY] = minY uinp.AbsMax[AbsY] = maxY uinp.AbsFuzz[AbsY] = fuzz uinp.AbsFlat[AbsY] = flat uinp.AbsMin[AbsRX] = minX uinp.AbsMax[AbsRX] = maxX uinp.AbsFuzz[AbsRX] = fuzz uinp.AbsFlat[AbsRX] = flat uinp.AbsMin[AbsRY] = minY uinp.AbsMax[AbsRY] = maxY uinp.AbsFuzz[AbsRY] = fuzz uinp.AbsFlat[AbsRY] = flat // Digital dpad buttons for _, i := range hats { uinp.AbsMax[i] = 1 uinp.AbsMin[i] = -1 } buf, err := uinputUserDevToBuffer(uinp) if err != nil { goto err } err = ioctl(devFile, uiSetEvBit, EvKey) if err != nil { err = fmt.Errorf("Could not perform UI_SET_EVBIT ioctl: %v", err) goto err } // Register all gamepad buttons for _, i := range buttons { err = ioctl(devFile, uiSetKeyBit, uintptr(i)) if err != nil { err = fmt.Errorf("Could not perform UI_SET_KEYBIT ioctl: %v", err) goto err } } // Configure the sticks (and the digital dpad buttons) err = ioctl(devFile, uiSetEvBit, EvAbs) if err != nil { err = fmt.Errorf("Could not perform UI_SET_EVBIT ioctl: %v", err) goto err } err = ioctl(devFile, uiSetAbsBit, uintptr(AbsX)) if err != nil { err = fmt.Errorf("Could not perform UI_SET_EVBIT ioctl: %v", err) goto err } err = ioctl(devFile, uiSetAbsBit, uintptr(AbsY)) if err != nil { err = fmt.Errorf("Could not perform UI_SET_EVBIT ioctl: %v", err) goto err } err = ioctl(devFile, uiSetAbsBit, uintptr(AbsRX)) if err != nil { err = fmt.Errorf("Could not perform UI_SET_EVBIT ioctl: %v", err) goto err } err = ioctl(devFile, uiSetAbsBit, uintptr(AbsRY)) if err != nil { err = fmt.Errorf("Could not perform UI_SET_EVBIT ioctl: %v", err) goto err } // Dpad buttons for _, i := range hats { err = ioctl(devFile, uiSetAbsBit, uintptr(i)) if err != nil { err = fmt.Errorf("Could not perform UI_SET_EVBIT ioctl: %v", err) goto err } } err = ioctl(devFile, uiDevSetup, uintptr(unsafe.Pointer(&uinp))) if err != nil { err = fmt.Errorf("Could not perform UI_DEV_SETUP ioctl: %v", err) goto err } _, err = devFile.Write(buf) if err != nil { err = fmt.Errorf("Could not write uinputUserDev to device: %v", err) goto err } err = ioctl(devFile, uiDevCreate, uintptr(0)) if err != nil { devFile.Close() err = fmt.Errorf("Could not perform UI_DEV_CREATE ioctl: %v", err) goto err } time.Sleep(time.Millisecond * 200) return nil err: return err } func emitBtnDown(devFile *os.File, code uint16) error { err := emitEvent(devFile, EvKey, code, 1) if err != nil { return fmt.Errorf("Could not emit key down event: %v", err) } err = emitEvent(devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return err } func emitBtnUp(devFile *os.File, code uint16) error { err := emitEvent(devFile, EvKey, code, 0) if err != nil { return fmt.Errorf("Could not emit key up event: %v", err) } err = emitEvent(devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return err } // CreateJoystick creates a virtual input device that emulates a joystick func CreateJoystick(minX int32, maxX int32, minY int32, maxY int32, flat int32, fuzz int32) (Joystick, error) { dev, err := openUinputDev() if err != nil { return nil, err } err = setupJoystick(dev, minX, maxX, minY, maxY, flat, fuzz) if err != nil { return nil, err } return vJoystick{devFile: dev}, err } // BtnDown presses and holds a button func (vj vJoystick) BtnDown(btn uint16) error { if btn == BtnDpadUp || btn == BtnDpadDown || btn == BtnDpadLeft || btn == BtnDpadRight { vj.dpadDown(btn) } err := emitEvent(vj.devFile, EvKey, btn, 1) if err != nil { return fmt.Errorf("Could not emit dpad down event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return err } func (vj vJoystick) dpadDown(btnCode uint16) error { var hat uint16 var val int32 switch btnCode { case BtnDpadUp: hat = AbsHat0Y val = -1 case BtnDpadDown: hat = AbsHat0Y val = 1 case BtnDpadLeft: hat = AbsHat0X val = -1 case BtnDpadRight: hat = AbsHat0X val = 1 default: return fmt.Errorf("Unknown dpad button: %x", btnCode) } err := emitEvent(vj.devFile, EvAbs, hat, val) if err != nil { return fmt.Errorf("Could not emit button up event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return nil } // BtnUp releases a button func (vj vJoystick) BtnUp(btn uint16) error { if btn == BtnDpadUp || btn == BtnDpadDown || btn == BtnDpadLeft || btn == BtnDpadRight { vj.dpadUp(btn) } err := emitEvent(vj.devFile, EvKey, btn, 0) if err != nil { return fmt.Errorf("Could not emit button up event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return err } func (vj vJoystick) dpadUp(btnCode uint16) error { var hat uint16 switch btnCode { case BtnDpadUp, BtnDpadDown: hat = AbsHat0Y case BtnDpadLeft, BtnDpadRight: hat = AbsHat0X default: return fmt.Errorf("Unknown dpad button: %x", btnCode) } err := emitEvent(vj.devFile, EvAbs, hat, 0) if err != nil { return fmt.Errorf("Could not emit dpad up event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return nil } func (vj vJoystick) LeftStickX(x int32) error { err := emitEvent(vj.devFile, EvAbs, AbsX, x) if err != nil { return fmt.Errorf("Could not emit AbsX event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return nil } func (vj vJoystick) LeftStickY(y int32) error { err := emitEvent(vj.devFile, EvAbs, AbsY, y) if err != nil { return fmt.Errorf("Could not emit AbsY event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return nil } func (vj vJoystick) RightStickY(y int32) error { err := emitEvent(vj.devFile, EvAbs, AbsRY, y) if err != nil { return fmt.Errorf("Could not emit AbsRY event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return nil } func (vj vJoystick) RightStickX(x int32) error { err := emitEvent(vj.devFile, EvAbs, AbsRX, x) if err != nil { return fmt.Errorf("Could not emit AbsRX event: %v", err) } err = emitEvent(vj.devFile, EvSyn, SynReport, 0) if err != nil { return fmt.Errorf("Could not emit sync event: %v", err) } return nil } func (vj vJoystick) Close() error { return destroyDevice(vj.devFile) }
package main import ( "WikiGo/crawler" "fmt" ) func main() { patterns := []string{"/wiki/"} exclude := []string{"Wikipedia:", "Special:", "Help:", "Books:", "File:", ".jpg"} trimMarkers := []string{">Notes<", ">References<", ">See also<", `#External_links">`, `id="catlinks"`} myCrawler := crawler.NewCrawler("https://en.wikipedia.org/wiki/UK_miners'_strike_(1984%E2%80%9385)", "https://en.wikipedia.org/wiki/Lawrence_Daly", "https://en.wikipedia.org", patterns, exclude, trimMarkers, 3, true) path, err := myCrawler.GetShortestPathToArticle() if err != nil { fmt.Print(err) } if path == nil { fmt.Println("FAILED") } }
package service import ( "archive/tar" "compress/gzip" "encoding/json" "io" "os" "regexp" "strings" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // regex matching the path of a service entries file. The captured group is the name of the service. // For example, if the filename is "log-bundle-20210329190553/bootstrap/services/release-image.json", // then the name of the service is "release-image". // In case the log-bundle is from bootstrap-in-place installation the file name is: // "log-bundle-20210329190553/log-bundle-bootstrap/bootstrap/services/release-image.json" var serviceEntriesFilePathRegex = regexp.MustCompile(`^[^\/]+(?:\/log-bundle-bootstrap)?\/bootstrap\/services\/([^.]+)\.json$`) // AnalyzeGatherBundle will analyze the bootstrap gather bundle at the specified path. // Analysis will be logged. // Returns an error if there was a problem reading the bundle. func AnalyzeGatherBundle(bundlePath string) error { // open the bundle file for reading bundleFile, err := os.Open(bundlePath) if err != nil { return errors.Wrap(err, "could not open the gather bundle") } defer bundleFile.Close() return analyzeGatherBundle(bundleFile) } func analyzeGatherBundle(bundleFile io.Reader) error { // decompress the bundle uncompressedStream, err := gzip.NewReader(bundleFile) if err != nil { return errors.Wrap(err, "could not decompress the gather bundle") } defer uncompressedStream.Close() // read through the tar for relevant files tarReader := tar.NewReader(uncompressedStream) serviceAnalyses := make(map[string]analysis) for { header, err := tarReader.Next() if err == io.EOF { break } if err != nil { return errors.Wrap(err, "encountered an error reading from the gather bundle") } if header.Typeflag != tar.TypeReg { continue } serviceEntriesFileSubmatch := serviceEntriesFilePathRegex.FindStringSubmatch(header.Name) if serviceEntriesFileSubmatch == nil { continue } serviceName := serviceEntriesFileSubmatch[1] serviceAnalysis, err := analyzeService(tarReader) if err != nil { logrus.Infof("Could not analyze the %s.service: %v", serviceName, err) continue } serviceAnalyses[serviceName] = serviceAnalysis } analysisChecks := []struct { name string check func(analysis) bool optional bool }{ {name: "release-image", check: checkReleaseImageDownload, optional: false}, {name: "bootkube", check: checkAPIURLs, optional: false}, } for _, check := range analysisChecks { a := serviceAnalyses[check.name] if a.starts == 0 { if check.optional { logrus.Infof("The bootstrap machine did not execute the %s.service systemd unit", check.name) break } logrus.Errorf("The bootstrap machine did not execute the %s.service systemd unit", check.name) break } if !check.check(a) { break } } return nil } func checkReleaseImageDownload(a analysis) bool { if a.successful { return true } logrus.Error("The bootstrap machine failed to download the release image") a.logLastError() return false } // bootstrap-verify-api-servel-urls.sh is currently running as part of the bootkube service. // And the verification of the API and API-Int URLs are the only stage where a failure is // currently reported. So, here we are able to conclude that a failure corresponds to a // failure to resolve either the API URL or API-Int URL or both. If that changes and if // any other stage in the bootkube service starts reporting a failure, we need to revisit // this. At that point verification of the URLs could be moved to its own service. func checkAPIURLs(a analysis) bool { if a.successful { return true } // Note: Even when there is a stage failure, we are not returning false here. That is // intentional because we donot want to report this as an error in the "analyze" output. logrus.Warn("The bootstrap machine is unable to resolve API and/or API-Int Server URLs") a.logLastError() return true } type analysis struct { // starts is the number of times that the service started starts int // successful is true if the last invocation of the service ended in success successful bool // failingStage is the stage that failed in the last unsuccessful invocation of the service failingStage string // lastError is the last error recorded in the last failure of the service lastError string } func analyzeService(r io.Reader) (analysis, error) { a := analysis{} decoder := json.NewDecoder(r) t, err := decoder.Token() if err != nil { return a, errors.Wrap(err, "service entries file does not begin with a token") } delim, isDelim := t.(json.Delim) if !isDelim { return a, errors.New("service entries file does not begin with a delimiter") } if delim != '[' { return a, errors.New("service entries file does not begin with an array") } var lastEntry *Entry for decoder.More() { entry := &Entry{} if err := decoder.Decode(entry); err != nil { return a, errors.Wrap(err, "could not decode an entry in the service entries file") } // record a new start of the service if entry.Phase == ServiceStart { a.starts++ } // the service is only considered successful if the last entry is either the service ending successfully or a // post-command ending successfully. a.successful = entry.Result == Success && (entry.Phase == ServiceEnd || entry.Phase == PostCommandEnd) // save the last error if entry.Result == Failure { // if a stage failure causes a service (or pre- or post-command) failure, we want to preserve the failing // stage from the stage end entry. if lastEntry == nil || lastEntry.Phase != StageEnd || lastEntry.Result != Failure { a.failingStage = entry.Stage } a.lastError = entry.ErrorMessage } lastEntry = entry } return a, nil } func (a analysis) logLastError() { for _, l := range strings.Split(a.lastError, "\n") { logrus.Info(l) } }
package main import ( "fmt" "strconv" "strings" "time" "gitlab.jasondale.me/jdale/govult/pkg/pidcheck" "gitlab.jasondale.me/jdale/govult/pkg/slack" "github.com/davecgh/go-spew/spew" "github.com/spf13/viper" ) // Config is the configuration struct type config struct { ChannelToMessage string ChannelToMonitor string PidFilePath string SlackBotToken string SlackMessageText string SlackToken string SlackUser string SlackWebHook string TriggerWords []string } // new config instance var ( conf *config SlackAPI slack.Service ) func getConf() *config { viper.AddConfigPath(".") viper.SetConfigName("config") err := viper.ReadInConfig() if err != nil { handleError(err) } conf := &config{} err = viper.Unmarshal(conf) if err != nil { fmt.Printf("unable to decode into config struct, %v", err) } return conf } //TODO: real error handling func handleError(err error) { if err != nil { panic(err) } } func main() { conf = getConf() // * SlackAPI is still a Service from slack package; slack.Client satisfies the Service requirements, // * but SlackAPI will remain a slack.Service as it was declared up top. SlackAPI = slack.Client{ ChannelToMessage: conf.ChannelToMessage, ChannelToMonitor: conf.ChannelToMonitor, SlackBotToken: conf.SlackBotToken, SlackMessageText: conf.SlackMessageText, SlackToken: conf.SlackToken, SlackUser: conf.SlackUser, SlackWebHook: conf.SlackWebHook, } pidPath := fmt.Sprintf("%s/goVult", conf.PidFilePath) pid := pidcheck.AlreadyRunning(pidPath) var LastMessageTs int LastMessageTs = 0 firstRun := true if !pid { // Infinite loop - get new messages every 5 seconds for { // fmt.Println("Tick...") // * dev code messages, err := getSlackMessages(conf.ChannelToMonitor, strconv.Itoa(LastMessageTs)) if err != nil { fmt.Println("Error encountered: ", err) } for _, message := range messages.Messages { currentTsSplit := strings.Split(message.Ts, ".") currentTs, err := strconv.Atoi(currentTsSplit[0]) if err != nil { spew.Dump(err) } if currentTs > LastMessageTs { LastMessageTs = currentTs if !firstRun { if len(message.Text) > 0 { if analyzeMessage(message.Text) { sendSlackMessage(message) } } } } } firstRun = false time.Sleep(5 * time.Second) } } } // Get Slack messages func getSlackMessages(channel string, timestamp string) (slack.Response, error) { response, err := SlackAPI.GetMessages(channel, timestamp) return response, err } // Check a message for a match to any of the keywords func analyzeMessage(message string) bool { words := strings.Split(message, " ") for _, word := range words { word = strings.ToLower(word) for _, trigger := range conf.TriggerWords { if strings.Contains(word, trigger) { return true } } } return false } // Send a slack message to a channel func sendSlackMessage(message slack.Message) { userData := SlackAPI.GetUserInfo(message.User) timestampSplit := strings.Split(message.Ts, ".") timestampInt, err := strconv.ParseInt(timestampSplit[0], 10, 64) timestamp := time.Unix(timestampInt, 0) if err != nil { fmt.Println(err) } err = SlackAPI.PostMessage(conf.ChannelToMessage, conf.SlackMessageText) if err != nil { spew.Dump(err) } err = SlackAPI.PostMessage(conf.ChannelToMessage, "> <@"+userData.User.ID+"> - "+timestamp.Format("03:04:05 PM")+": \n> "+message.Text) if err != nil { spew.Dump(err) } }
package api import ( "full-stack/utils" "log" "net/http" "github.com/gin-contrib/static" "github.com/gin-gonic/gin" "github.com/gorilla/websocket" ) func SetupApi() *gin.Engine { r := gin.New() r.Use(static.Serve("/", static.LocalFile("./public/", false))) api := r.Group("/api") { api.GET("/test", testHandler) } r.GET("/ws", socketHandler) return r } func testHandler(c *gin.Context) { c.JSON(http.StatusOK, gin.H{ "message": "hello world", }) } func socketHandler(c *gin.Context) { upgrader := websocket.Upgrader{ CheckOrigin: func(r *http.Request) bool { return true }, } conn, err := upgrader.Upgrade(c.Writer, c.Request, nil) if err != nil { log.Fatal("Error during connection upgradation: ", err) return } for { msgType, msg, err := conn.ReadMessage() if err != nil { log.Fatal("Error during message reading: ", err) break } log.Printf("Received: %s", msg) // sendText := string(msg) + " from backend" err = conn.WriteMessage(msgType, []byte(utils.CaptureImg())) if err != nil { log.Println("Error during message writing: ", err) break } } }
package repository import "github.com/akwanmaroso/blogos/api/models" type PostsRepository interface { Save(models.Post) (models.Post, error) FindAll() ([]models.Post, error) FindById(uint64) (models.Post, error) Update(uint64, models.Post) (int64, error) Delete(postId uint64, userId uint32) (int64, error) }
package utils import ( "archive/zip" "encoding/json" "io" "io/ioutil" "net/http" "os" "path/filepath" "strings" "task/defs" "time" ) const ( //BaseYearTime = "2006" BaseDayTime = "2006-01-02" ) //检查文件是否以.log为结尾 func CheckEndWithDotLog(inputStr string) bool { split := strings.Split(inputStr, ".") if split[len(split) -1 ] == "log" { return true } else { return false } } //拷贝文件 func CopyFile(src, des string) (written int64, err error) { //获取源文件的权限 srcFile, err := os.Open(src) if err != nil { return 0, err } fi, _ := srcFile.Stat() perm := fi.Mode() srcFile.Close() input, err := ioutil.ReadFile(src) if err != nil { return 0, err } err = ioutil.WriteFile(des, input, perm) if err != nil { return 0, err } return int64(len(input)), nil } //检查文件或者文件夹是否存在 func CheckExists(path string) (bool, error) { _, err := os.Stat(path) if err == nil { return true, nil } if os.IsNotExist(err) { return false, nil } return false, err } //判断字符串是否是以某个字符为结尾 //用于判断用户输入的路径是/tmp/back还是/tmp/back/ //如果不是以"/"结尾,加上 func PathWrapper(inputStr string) string { if strings.LastIndex(inputStr, "/") == len(inputStr) -1 { return inputStr } else { return inputStr + "/" } } //func FetchLogfileByFullPath(fullPath string) string { // split := strings.Split(fullPath,"/") // return split[len(split)-1] //} //根据超时时间设置:比如7天 //再根据日志文件名:icore-service-uaa-7484d8d4d8-5zr7f.2020-03-12.0.log,解析出该文件生成的日期 // 判断该文件是否需要备份 func IsNeedBackup(fileName string, expiredDay int) bool { //获取该文件的日期 fileDaytimeStr:= strings.Split(fileName,".")[1] fileDaytime, _ := time.Parse(BaseDayTime, fileDaytimeStr) nowDaytimeStr := time.Now().Format(BaseDayTime) nowDaytime, _ := time.Parse(BaseDayTime, nowDaytimeStr) if (fileDaytime.Unix() + int64(86400 * expiredDay) ) <= nowDaytime.Unix() { return true } else { return false } } //根据podBaseDir和path解析podId //baseDir="/mnt/paas/kubernetes/kubelet/pods/" //path=/mnt/paas/kubernetes/kubelet/pods/77e00ad0-7033-11ea-bfe7-000c2999f0e6/volumes/kubernetes.io~empty-dir/app-logs/icore-service-uaa-7484d8d4d8-5zr7f.2020-03-12.0.log func FetchPodIdByPath(podBaseDir,path string) string { podIdStr := strings.Split(path, podBaseDir)[1] return strings.Split(podIdStr,"/")[0] } //根据env和podId,调用restApiUrl去获取namespace,deploy,rs,podName //获取信息主要用于创建备份目标的对应目录 //备份目录的路径为:/tmp/back + /env/namespace/deploy/rs/podname/xxx.log func FetchDestPathByEnvAndPodId(env,podId,restApiUrl,backupDestBaseDir string) (string,error) { var podInfo defs.PodInfo result := "" urlParam := "?env=" + env + "&pod_id=" + podId resp, err := http.Get(restApiUrl + defs.UrlSuffix + urlParam) if err != nil { return result,err } defer resp.Body.Close() bytes, _ := ioutil.ReadAll(resp.Body) err = json.Unmarshal(bytes, &podInfo) if err != nil { return result,err } result = backupDestBaseDir + env + "/" + podInfo.Namespace + "/" + podInfo.DeployName + "/" + podInfo.RsName + "/" + podInfo.PodName return result,nil } // srcFile could be a single file or a directory func ZipFile(srcFile string, destZip string) error { zipfile, err := os.Create(destZip) if err != nil { return err } defer zipfile.Close() archive := zip.NewWriter(zipfile) defer archive.Close() filepath.Walk(srcFile, func(path string, info os.FileInfo, err error) error { if err != nil { return err } header, err := zip.FileInfoHeader(info) if err != nil { return err } header.Name = strings.TrimPrefix(path, filepath.Dir(srcFile) + "/") // header.Name = path if info.IsDir() { header.Name += "/" } else { header.Method = zip.Deflate } writer, err := archive.CreateHeader(header) if err != nil { return err } if ! info.IsDir() { file, err := os.Open(path) if err != nil { return err } defer file.Close() _, err = io.Copy(writer, file) } return err }) return err } ///mnt/paas/kubernetes/kubelet/pods/77e00ad0-7033-11ea-bfe7-000c2999f0e6/volumes/kubernetes.io~empty-dir/ /* func IsDirNameStartWithDot(name string) bool { return strings.HasPrefix(name,".") } func FetchAllDir(dir string) []string { var result []string files, _ := ioutil.ReadDir(dir) for _, f := range files { if f.IsDir() && !IsDirNameStartWithDot(f.Name()) { result = append(result,f.Name()) } } return result } err := filepath.Walk(".", func(path string, info os.FileInfo, err error) error { if err != nil { return err } fmt.Println(path, info.Size()) return nil }) if err != nil { log.Println(err) } */
package nfs import ( "context" "fmt" nfsstoragev1alpha1 "github.com/johandry/nfs-operator/api/v1alpha1" "github.com/johandry/nfs-operator/resources" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) var _ resources.Reconcilable = &StorageClass{} // StorageClass is the StorageClass resource used by the Nfs controller type StorageClass struct { Owner *nfsstoragev1alpha1.Nfs } var yamlStorageClass = []byte(` kind: StorageClass apiVersion: storage.k8s.io/v1 metadata: name: ibmcloud-nfs provisioner: ibmcloud/nfs mountOptions: - vers=4.1 `) // new returns the object as a storage.v1.StorageClass func (r *StorageClass) new() *storagev1.StorageClass { return &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: storageClassName, // Namespace: r.Owner.Namespace, }, Provisioner: provisionerName, MountOptions: []string{ "vers=4.1", }, } } // toResource returns the given object as a storage.v1.StorageClass func (r *StorageClass) toResource(ro runtime.Object) (*storagev1.StorageClass, error) { if v, ok := ro.(*storagev1.StorageClass); ok { return v, nil } return nil, fmt.Errorf("the received object is not a storage/v1.StorageClass") } // isValid returns true if the given object is valid. If it's valid won't be updated func (r *StorageClass) isValid(o *storagev1.StorageClass) bool { obj := r.new() if o.Provisioner != obj.Provisioner { return true } return true } // Object implements the Object method of the Reconcilable interface func (r *StorageClass) Object() runtime.Object { return r.new() } // SetControllerReference implements the SetControllerReference method of the Reconcilable interface func (r *StorageClass) SetControllerReference(scheme *runtime.Scheme) error { obj := r.new() var err error if scheme != nil { err = ctrl.SetControllerReference(r.Owner, obj, scheme) } return err } // Get implements the Get method of the Reconcilable interface func (r *StorageClass) Get(ctx context.Context, c client.Client) (runtime.Object, error) { found := &storagev1.StorageClass{} obj := r.new() err := c.Get(ctx, types.NamespacedName{Name: obj.Name /*, Namespace: obj.Namespace */}, found) if err == nil { return found, nil } return nil, client.IgnoreNotFound(err) } // Validate implements the Validate method of the Reconcilable interface func (r *StorageClass) Validate(ro runtime.Object) bool { current, err := r.toResource(ro) if err != nil { return false } return r.isValid(current) }
package main import ( "context" "flag" "fmt" "mysql-agent/common/env" "mysql-agent/common/http/server" "mysql-agent/common/logger" "mysql-agent/crontask" "mysql-agent/externalfile" "os" "os/signal" "path/filepath" "strings" "time" ) func main() { var printVersion bool flag.BoolVar(&printVersion, "version", false, "print program build version") flag.Parse() if printVersion { fmt.Printf("%s\n", Version()) os.Exit(0) } logger.StartLogger("mysql-agent.log", "info") httpServer := server.NewHttpServer(*env.AgentIp, *env.AgentPort) go func() { logger.Info("agent begin to listen %s:%d", *env.AgentIp, *env.AgentPort) certDirs := "externalfile" if err := externalfile.RestoreAssets("./", certDirs); err != nil { logger.Error("restore http cert fail, error: %s", err.Error()) return } serverCrtPath := strings.Join([]string{certDirs, "cert", "server.crt"}, string(filepath.Separator)) serverKeyPath := strings.Join([]string{certDirs, "cert", "server.key"}, string(filepath.Separator)) if err := httpServer.ListenAndServeTLS(serverCrtPath, serverKeyPath); err != nil { logger.Error("listen http server fail, error: %s", err.Error()) } }() crontask.StartCron() quit := make(chan os.Signal, 1) signal.Notify(quit, os.Interrupt) <-quit logger.Info("Shutting down server...") crontask.StopCron() ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() if err := httpServer.Shutdown(ctx); err != nil { logger.Error(err.Error()) } logger.Info("Success shutting server.") }
package piscine func Index(s string, toFind string) int { input := []rune(s) search := []rune(toFind) input_i := 0 search_i := 0 for index := range search { //to implement range index = index search_i++ } if search_i == 0 { return 0 } for index := range input { //to implement range index = index input_i++ } for index, char := range input { if char == search[0] && input_i >= search_i+index-1 { var i int = 1 for j := 1; j < search_i; j++ { if search[j] == input[index+j] { i++ } } if i == search_i { return index } } } return -1 }
package controllers import ( "log" "logserver/models" "github.com/astaxie/beego" ) type LogCenterController struct { beego.Controller } func (self *LogCenterController) Get() { log.Println("change page") self.TplName = "logcenter.html" devices, err := models.GetAllDevice() if err != nil { beego.Error(err) } if len(devices) > 0 { self.Data["HasDevice"] = true } else { self.Data["HasDevice"] = false } self.Data["Devices"] = devices beego.Info(self.Data["Devices"]) for _, device := range devices { beego.Info(device.Name) } }
package main type Perk struct { Description string Like int Heart int } type Company struct { Id int Name string Detail string Perks []Perk }
package main import ( "bufio" "bytes" "io" "log" "os" "unicode/utf8" "github.com/jcorbin/anansi/ansi" ) var readBuf bytes.Buffer var outBuf *bufio.Writer func main() { outBuf = bufio.NewWriter(os.Stdout) if err := readMore(); err != nil { log.Fatalln(err) } } func readMore() (err error) { defer func() { if ferr := outBuf.Flush(); err == nil { err = ferr } }() for err == nil { readBuf.Grow(4096) b := readBuf.Bytes() b = b[len(b):cap(b)] var n int n, err = os.Stdin.Read(b) ateof := err == io.EOF if err == nil || ateof { readBuf.Write(b[:n]) if perr := processMore(ateof); err == nil { err = perr } } } if err != io.EOF { return err } return nil } func processMore(ateof bool) (err error) { for err == nil && readBuf.Len() > 0 { e, a, n := ansi.DecodeEscape(readBuf.Bytes()) readBuf.Next(n) if e == 0 { r, n := utf8.DecodeRune(readBuf.Bytes()) const esc = 0x1b if r == esc && readBuf.Len() == 1 && !ateof { break // readMore, try to complete escape sequence } readBuf.Next(n) e = ansi.Escape(r) } err = process(e, a) } return err } func process(e ansi.Escape, a []byte) error { if !e.IsEscape() { _, err := outBuf.WriteRune(rune(e)) return err } var b [4096]byte p := b[:0] // try to convert legacy SGR colors to 24-bit; TODO palette selection if e == ansi.CSI('m') { if attr, _, err := ansi.DecodeSGR(a); err == nil { needed := false // maybe convert foreground if fg, hasFG := attr.FG(); hasFG { if newc := fg.To24Bit(); newc != fg { attr = attr.SansFG() | newc.FG() needed = true } } // maybe convert background if bg, hasBG := attr.BG(); hasBG { if newc := bg.To24Bit(); newc != bg.To24Bit() { attr = attr.SansBG() | newc.BG() needed = true } } if needed { p = attr.AppendTo(p) } } } // not SGR, or conversion failed/not necessary if len(p) == 0 { p = e.AppendWith(p, a...) } _, err := outBuf.Write(p) return err }
package main import ( "encoding/json" "fmt" "html/template" "log" "net" "net/http" "syscall" "time" "github.com/davecgh/go-spew/spew" "github.com/gorilla/mux" "github.com/zemirco/dcp" ) // host order (usually little endian) -> network order (big endian) func htons(n int) int { return int(int16(byte(n))<<8 | int16(byte(n>>8))) } var ( t *template.Template db = make(map[string]dcp.Frame) last time.Time ) func init() { t = template.Must(template.ParseFiles("src/index.html")) } func main() { r := mux.NewRouter() r.PathPrefix("/public/").Handler(http.StripPrefix("/public/", http.FileServer(http.Dir("public")))) r.HandleFunc("/api/json", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(db); err != nil { panic(err) } }) r.HandleFunc("/api/last", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(last); err != nil { panic(err) } }) r.Methods(http.MethodGet).Path("/api/{mac}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) mac := vars["mac"] w.Header().Set("Content-Type", "application/json") if err := json.NewEncoder(w).Encode(db[mac]); err != nil { panic(err) } }) r.Methods(http.MethodPost).Path("/api/{mac}").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { var f dcp.Frame if err := json.NewDecoder(r.Body).Decode(&f); err != nil { panic(err) } spew.Dump(f) }) r.PathPrefix("/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { t.Execute(w, nil) }) // start server go func() { fmt.Println("server running at http://localhost:8085. ctrl+c to stop it.") log.Fatal(http.ListenAndServe(":8085", r)) }() ifname := "enxa44cc8e54721" interf, err := net.InterfaceByName(ifname) if err != nil { panic(err) } request := dcp.NewIdentifyRequest(interf.HardwareAddr) b, err := request.MarshalBinary() if err != nil { panic(err) } fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, htons(0x8892)) if err != nil { panic(err) } defer syscall.Close(fd) addr := syscall.SockaddrLinklayer{ Ifindex: interf.Index, } // // request block // rb := block.NewIPParameterQualifier() // rb.IPAddress = []byte{0xac, 0x13, 0x68, 0x03} // rb.Subnetmask = []byte{0xff, 0xff, 0x00, 0x00} // rb.StandardGateway = []byte{0x00, 0x00, 0x00, 0x00} // destination := []byte{0x00, 0x09, 0xe5, 0x00, 0x9a, 0x20} // req := frame.NewSetIPParameterRequest(destination, interf.HardwareAddr, rb) // b, err := req.MarshalBinary() // if err != nil { // panic(err) // } // spew.Dump(req) // spew.Dump(b) if err := syscall.Sendto(fd, b, 0, &addr); err != nil { panic(err) } last = time.Now() // start reading incoming data for { buffer := make([]byte, 256) n, _, err := syscall.Recvfrom(fd, buffer, 0) if err != nil { panic(err) } fmt.Println(n) response := dcp.Frame{} if err := response.UnmarshalBinary(buffer); err != nil { panic(err) } spew.Dump(response) // save device to db in case we have an answer to our identify request if request.XID == response.XID { db[response.Source.String()] = response } } }
package models import ( "marketplace-api/application/base" "github.com/google/uuid" ) type Transaction struct { ID uuid.UUID `json:"_id"` Code string `json:"code"` Status uint8 `json:"status"` SubTotal int `json:"sub_total"` PriceTotal int `json:"price_total"` DiscountPriceAdded int `json:"discount_price_added"` DiscountPriceApplied int `json:"discount_price_applied"` VoucherPriceAdded int `json:"voucher_price_added"` VoucherPriceApplied int `json:"voucher_price_applied"` TransactionProduct []TransactionProduct `json:"transaction_product"` base.ModelHasAudit }
package auth import ( "github.com/kataras/golog" "github.com/kataras/iris/v12/sessions/sessiondb/redis" "time" ) type RedisManagerOfAuth struct { Database *redis.Database Addr string Logger *golog.Logger } func Init(logger *golog.Logger) RedisManagerOfAuth { addr := "127.0.0.1:6379" redis := redis.New(redis.Config{ Network: "tcp", Addr: addr, Timeout: time.Duration(30) * time.Second, MaxActive: 10, Username: "", Password: "", Database: "", Prefix: "Gnemes-Auth", Driver: redis.GoRedis(), }) logger.Info("Init redis database successfully") return RedisManagerOfAuth{Logger: logger, Addr: addr, Database: redis} } func (manager RedisManagerOfAuth) SetAuthInfo() { }
package main import ( "bufio" "fmt" "log" "net" "time" ) func main() { listener, err := net.Listen("tcp", ":8080") if err != nil { log.Fatalln(err) } for { conn, err := listener.Accept() if err != nil { log.Println(err) } go handleRequest(conn) } } func handleRequest(conn net.Conn) { err := conn.SetDeadline(time.Now().Add(2 * time.Second)) if err != nil { log.Println(err) } defer conn.Close() scanner := bufio.NewScanner(conn) for scanner.Scan() { cryptText := scanner.Text() fmt.Fprintf(conn, "%s -> %s\n", cryptText, decryptRot13([]byte(cryptText))) } } func decryptRot13(buffer []byte) []byte { decryptBuffer := make([]byte, len(buffer)) for index, b := range buffer { // ascii 97 - 122 a-z // ascii 65 - 90 A-Z switch { case 65 <= b && b <= 77, 97 <= b && b <= 109: decryptBuffer[index] = b + 13 case 78 <= b && b <= 90, 110 <= b && b <= 122: decryptBuffer[index] = b - 13 default: decryptBuffer[index] = b } } return decryptBuffer }
package main import ( "flag" "fmt" "time" "github.com/vharitonsky/iniflags" ) type sl []string type Mongo struct { Addresses []string User string Pass string } type Config struct { Interval time.Duration Mongo Mongo } func (s *sl) String() string { return fmt.Sprintf("%s", *s) } func (s *sl) Set(value string) error { *s = append(*s, value) return nil } var mongo_addresses sl func LoadConfig() Config { var ( mongo_user = flag.String("mongo_user", "", "MongoDB User") mongo_pass = flag.String("mongo_pass", "", "MongoDB Password") interval = flag.Duration("interval", 5*time.Second, "Polling interval") ) flag.Var(&mongo_addresses, "mongo_address", "List of mongo addresses in host:port format") iniflags.Parse() if len(mongo_addresses) == 0 { mongo_addresses = append(mongo_addresses, "localhost:27017") } cfg := Config{ Interval: *interval, Mongo: Mongo{ Addresses: mongo_addresses, User: *mongo_user, Pass: *mongo_pass, }, } return cfg }
package main import ( "fmt" "os" "io/ioutil" "path/filepath" "github.com/urfave/cli" ) func main() { app := cli.NewApp() app.Name = "clean_up" app.Usage = "deletes garbage files" app.Version = Version app.Author = "onody" app.Email = "onodera212@gmail.com" app.Action = func(c *cli.Context) error { path := c.Args().Get(0) doCleanUp(path, path) return nil } app.Run(os.Args) } func doCleanUp(rootPath string, searchPath string) { fis, err := ioutil.ReadDir(searchPath) if err != nil { panic(err) } for _, fi := range fis { fullPath := filepath.Join(searchPath, fi.Name()) delete(fullPath, fi.Name()) if fi.IsDir() { doCleanUp(rootPath, fullPath) } } } func delete(fullPath string, name string){ for _,g := range targetFiles { if(name == g){ if err := os.Remove(fullPath); err != nil { fmt.Println(err) }else{ fmt.Println("deleted!! -> " + fullPath) } } } }
package main import ( "fmt" "github.com/jackytck/projecteuler/tools" ) func solve(nth int) int { a := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} var i int for v := range tools.Perms(a) { i++ if i == nth { return tools.JoinInts(v) } } return 0 } func main() { fmt.Println(solve(1000000)) } // The millionth lexicographic permutation of the digits: [0, 9].
package controllers import ( "github.com/astaxie/beego" ) type MainController struct { beego.Controller } func (this *MainController) Get() { this.Data["Title"] = "企业信用查询系统" this.Data["URL"] = URL this.Layout = "layouts/nav.html" this.TplName = "index.html" this.Render() }
package repository_test import ( "context" "testing" "time" "github.com/dheerajgopi/todo-api/models" "github.com/stretchr/testify/assert" "github.com/DATA-DOG/go-sqlmock" "github.com/dheerajgopi/todo-api/task/repository" ) func TestGetByID(t *testing.T) { db, mock, err := sqlmock.New() if err != nil { t.Fatalf("Unexpected error while opening stub DB connection: %s", err) } defer db.Close() rows := sqlmock. NewRows([]string{"id", "title", "description", "created_by", "is_complete", "created_at", "updated_at"}). AddRow(1, "title", "description", 1, false, time.Now(), time.Now()) taskID := int64(1) query := "SELECT id, title, description, created_by, is_complete, created_at, updated_at FROM task WHERE id=\\?" prep := mock.ExpectPrepare(query) prep.ExpectQuery().WithArgs(taskID).WillReturnRows(rows) repo := repository.New(db) task, err := repo.GetByID(context.TODO(), taskID) assert.NoError(t, err) assert.NotNil(t, task) } func TestCreate(t *testing.T) { now := time.Now() task := &models.Task{ Title: "title", Description: "description", CreatedBy: &models.User{ ID: int64(1), }, IsComplete: false, CreatedAt: now, UpdatedAt: now, } db, mock, err := sqlmock.New() if err != nil { t.Fatalf("Unexpected error while opening stub DB connection: %s", err) } defer db.Close() query := "INSERT INTO task \\(title, description, created_by, is_complete, created_at, updated_at\\) VALUES \\(\\?, \\?, \\?, \\?, \\?, \\?\\)" mock.ExpectBegin() mock.ExpectExec(query).WithArgs( task.Title, task.Description, task.CreatedBy.ID, task.IsComplete, task.CreatedAt, task.UpdatedAt, ).WillReturnResult(sqlmock.NewResult(2, 1)) mock.ExpectCommit() repo := repository.New(db) err = repo.Create(context.TODO(), task) assert.NoError(t, err) assert.Equal(t, int64(2), task.ID) } func TestGetAllByUserID(t *testing.T) { assert := assert.New(t) db, mock, err := sqlmock.New() if err != nil { t.Fatalf("Unexpected error while opening stub DB connection: %s", err) } defer db.Close() rows := sqlmock. NewRows([]string{"id", "title", "description", "created_by", "is_complete", "created_at", "updated_at"}). AddRow(1, "title", "description", 1, false, time.Now(), time.Now()) userID := int64(1) query := "SELECT id, title, description, created_by, is_complete, created_at, updated_at FROM task WHERE created_by=\\?" prep := mock.ExpectPrepare(query) prep.ExpectQuery().WithArgs(userID).WillReturnRows(rows) repo := repository.New(db) tasks, err := repo.GetAllByUserID(context.TODO(), userID) assert.NoError(err) assert.NotNil(tasks) assert.Equal(1, len(tasks)) }
package addtwonumbers import ( "math/big" "strconv" "strings" ) // ListNode represents nonzero integer number type ListNode struct { Val int Next *ListNode } func convertLinkedListToInt(node *ListNode) *big.Int { currNode := node numAsStringSlice := []string{strconv.Itoa(currNode.Val)} for currNode.Next != nil { numAsStringSlice = append(numAsStringSlice, strconv.Itoa(currNode.Next.Val)) currNode = currNode.Next } numAsString := strings.Join(numAsStringSlice, "") revertedNumAsString := reverse(numAsString) resultNum := new(big.Int) resultNum.SetString(revertedNumAsString, 10) return resultNum } func reverse(stringToReverse string) string { var revertedstringSlice []string for ind := len(stringToReverse) - 1; ind >= 0; ind-- { revertedstringSlice = append(revertedstringSlice, string(stringToReverse[ind])) } return strings.Join(revertedstringSlice, "") } func convertIntSliceToLinkedList(intSlice []int, elementsIndex int) *ListNode { if elementsIndex == len(intSlice) { return nil } return &ListNode{ Val: intSlice[elementsIndex], Next: convertIntSliceToLinkedList(intSlice, elementsIndex+1), } } func convertStringToLinkedList(numAsString string) *ListNode { var numAsIntSlice []int for ind := len(numAsString) - 1; ind >= 0; ind-- { element, _ := strconv.Atoi(string(numAsString[ind])) numAsIntSlice = append(numAsIntSlice, element) } return convertIntSliceToLinkedList(numAsIntSlice, 0) } // AddTwoNumbers adds up two nonzero integers represented as listnode func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { listOneAsNum := convertLinkedListToInt(l1) listTwoAsNum := convertLinkedListToInt(l2) resultAsBigInt := new(big.Int) resultAsBigInt.Add(listOneAsNum, listTwoAsNum) return convertStringToLinkedList(resultAsBigInt.String()) }
package netstat import ( "fmt" "net" ) // SockAddr represents an ip:port pair type SockAddr struct { IP net.IP Port uint16 } func (s *SockAddr) String() string { return fmt.Sprintf("%v:%d", s.IP, s.Port) } // SockTabEntry type represents each line of the /proc/net/[tcp|udp] type SockTabEntry struct { ino string LocalAddr *SockAddr RemoteAddr *SockAddr State SkState UID uint32 Process *Process } // Process holds the PID and process name to which each socket belongs type Process struct { Pid int Name string } func (p *Process) String() string { return fmt.Sprintf("%d/%s", p.Pid, p.Name) } // SkState type represents socket connection state type SkState uint8 func (s SkState) String() string { return skStates[s] } // AcceptFn is used to filter socket entries. The value returned indicates // whether the element is to be appended to the socket list. type AcceptFn func(*SockTabEntry) bool // NoopFilter - a test function returning true for all elements func NoopFilter(*SockTabEntry) bool { return true } // TCPSocks returns a slice of active TCP sockets containing only those // elements that satisfy the accept function func TCPSocks(accept AcceptFn) ([]SockTabEntry, error) { return osTCPSocks(accept) } // TCP6Socks returns a slice of active TCP IPv4 sockets containing only those // elements that satisfy the accept function func TCP6Socks(accept AcceptFn) ([]SockTabEntry, error) { return osTCP6Socks(accept) } // UDPSocks returns a slice of active UDP sockets containing only those // elements that satisfy the accept function func UDPSocks(accept AcceptFn) ([]SockTabEntry, error) { return osUDPSocks(accept) } // UDP6Socks returns a slice of active UDP IPv6 sockets containing only those // elements that satisfy the accept function func UDP6Socks(accept AcceptFn) ([]SockTabEntry, error) { return osUDP6Socks(accept) }
package cloudflare import ( "context" "encoding/json" "fmt" "net/http" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func Test_toUTS46ASCII(t *testing.T) { tests := map[string]struct { domain string expected string }{ "empty stays empty": { domain: "", expected: "", }, "unicode gets encoded": { domain: "😺.com", expected: "xn--138h.com", }, "unicode gets mapped and encoded": { domain: "ÖBB.at", expected: "xn--bb-eka.at", }, "punycode stays punycode": { domain: "xn--138h.com", expected: "xn--138h.com", }, "hyphens are not checked": { domain: "s3--s4.com", expected: "s3--s4.com", }, "STD3 rules are not enforced": { domain: "℀.com", expected: "a/c.com", }, "bidi check is disabled": { domain: "englishﻋﺮﺑﻲ.com", expected: "xn--english-gqjzfwd1j.com", }, "invalid joiners are allowed": { domain: "a\u200cb.com", expected: "xn--ab-j1t.com", }, "partial results are used despite errors": { domain: "xn--:D.xn--.😺.com", expected: "xn--:d..xn--138h.com", }, } for name, tt := range tests { t.Run(name, func(t *testing.T) { actual := toUTS46ASCII(tt.domain) assert.Equal(t, tt.expected, actual) }) } } func TestCreateDNSRecord(t *testing.T) { setup() defer teardown() priority := uint16(10) proxied := false asciiInput := DNSRecord{ Type: "A", Name: "xn--138h.example.com", Content: "198.51.100.4", TTL: 120, Priority: &priority, Proxied: &proxied, } handler := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPost, r.Method, "Expected method 'POST', got %s", r.Method) var v DNSRecord err := json.NewDecoder(r.Body).Decode(&v) require.NoError(t, err) assert.Equal(t, asciiInput, v) w.Header().Set("content-type", "application/json") fmt.Fprint(w, `{ "success": true, "errors": [], "messages": [], "result": { "id": "372e67954025e0ba6aaa6d586b9e0b59", "type": "A", "name": "xn--138h.example.com", "content": "198.51.100.4", "proxiable": true, "proxied": false, "ttl": 120, "locked": false, "zone_id": "d56084adb405e0b7e32c52321bf07be6", "zone_name": "example.com", "created_on": "2014-01-01T05:20:00Z", "modified_on": "2014-01-01T05:20:00Z", "data": {}, "meta": { "auto_added": true, "source": "primary" } } }`) } mux.HandleFunc("/zones/"+testZoneID+"/dns_records", handler) createdOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") modifiedOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") want := DNSRecord{ ID: "372e67954025e0ba6aaa6d586b9e0b59", Type: asciiInput.Type, Name: asciiInput.Name, Content: asciiInput.Content, Proxiable: true, Proxied: asciiInput.Proxied, TTL: asciiInput.TTL, ZoneID: testZoneID, ZoneName: "example.com", CreatedOn: createdOn, ModifiedOn: modifiedOn, Data: map[string]interface{}{}, Meta: map[string]interface{}{ "auto_added": true, "source": "primary", }, } _, err := client.CreateDNSRecord(context.Background(), ZoneIdentifier(""), CreateDNSRecordParams{}) assert.ErrorIs(t, err, ErrMissingZoneID) actual, err := client.CreateDNSRecord(context.Background(), ZoneIdentifier(testZoneID), CreateDNSRecordParams{ Type: "A", Name: "😺.example.com", Content: "198.51.100.4", TTL: 120, Priority: &priority, Proxied: &proxied}) require.NoError(t, err) assert.Equal(t, want, actual) } func TestListDNSRecords(t *testing.T) { setup() defer teardown() asciiInput := DNSRecord{ Name: "xn--138h.example.com", Type: "A", Content: "198.51.100.4", } handler := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method) assert.Equal(t, asciiInput.Name, r.URL.Query().Get("name")) assert.Equal(t, asciiInput.Type, r.URL.Query().Get("type")) assert.Equal(t, asciiInput.Content, r.URL.Query().Get("content")) w.Header().Set("content-type", "application/json") fmt.Fprint(w, `{ "success": true, "errors": [], "messages": [], "result": [ { "id": "372e67954025e0ba6aaa6d586b9e0b59", "type": "A", "name": "xn--138h.example.com", "content": "198.51.100.4", "proxiable": true, "proxied": false, "ttl": 120, "locked": false, "zone_id": "d56084adb405e0b7e32c52321bf07be6", "zone_name": "example.com", "created_on": "2014-01-01T05:20:00Z", "modified_on": "2014-01-01T05:20:00Z", "data": {}, "meta": { "auto_added": true, "source": "primary" } } ], "result_info": { "count": 1, "page": 1, "per_page": 20, "total_count": 2000 } }`) } mux.HandleFunc("/zones/"+testZoneID+"/dns_records", handler) proxied := false createdOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") modifiedOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") want := []DNSRecord{{ ID: "372e67954025e0ba6aaa6d586b9e0b59", Type: "A", Name: asciiInput.Name, Content: asciiInput.Content, Proxiable: true, Proxied: &proxied, TTL: 120, Locked: false, ZoneID: testZoneID, ZoneName: "example.com", CreatedOn: createdOn, ModifiedOn: modifiedOn, Data: map[string]interface{}{}, Meta: map[string]interface{}{ "auto_added": true, "source": "primary", }, }} _, _, err := client.ListDNSRecords(context.Background(), ZoneIdentifier(""), ListDNSRecordsParams{}) assert.ErrorIs(t, err, ErrMissingZoneID) actual, _, err := client.ListDNSRecords(context.Background(), ZoneIdentifier(testZoneID), ListDNSRecordsParams{ Name: "😺.example.com", Type: "A", Content: "198.51.100.4", }) require.NoError(t, err) assert.Equal(t, want, actual) } func TestListDNSRecordsSearch(t *testing.T) { setup() defer teardown() recordInput := DNSRecord{ Name: "example.com", Type: "A", Content: "198.51.100.4", } handler := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method) assert.Equal(t, recordInput.Name, r.URL.Query().Get("name")) assert.Equal(t, recordInput.Type, r.URL.Query().Get("type")) assert.Equal(t, recordInput.Content, r.URL.Query().Get("content")) assert.Equal(t, "all", r.URL.Query().Get("match")) assert.Equal(t, "1", r.URL.Query().Get("page")) assert.Equal(t, "type", r.URL.Query().Get("order")) assert.Equal(t, "asc", r.URL.Query().Get("direction")) assert.Equal(t, "any", r.URL.Query().Get("tag-match")) assert.ElementsMatch(t, []string{"tag1", "tag2"}, r.URL.Query()["tag"]) w.Header().Set("content-type", "application/json") fmt.Fprint(w, `{ "success": true, "errors": [], "messages": [], "result": [ { "id": "372e67954025e0ba6aaa6d586b9e0b59", "type": "A", "name": "example.com", "content": "198.51.100.4", "proxiable": true, "proxied": true, "ttl": 120, "locked": false, "zone_id": "d56084adb405e0b7e32c52321bf07be6", "zone_name": "example.com", "created_on": "2014-01-01T05:20:00Z", "modified_on": "2014-01-01T05:20:00Z", "data": {}, "meta": { "auto_added": true, "source": "primary" }, "tags": ["tag1", "tag2extended"] } ], "result_info": { "count": 1, "page": 1, "per_page": 20, "total_count": 2000 } }`) } mux.HandleFunc("/zones/"+testZoneID+"/dns_records", handler) proxied := true createdOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") modifiedOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") want := []DNSRecord{{ ID: "372e67954025e0ba6aaa6d586b9e0b59", Type: "A", Name: recordInput.Name, Content: recordInput.Content, Proxiable: true, Proxied: &proxied, TTL: 120, Locked: false, ZoneID: testZoneID, ZoneName: "example.com", CreatedOn: createdOn, ModifiedOn: modifiedOn, Data: map[string]interface{}{}, Meta: map[string]interface{}{ "auto_added": true, "source": "primary", }, Tags: []string{"tag1", "tag2extended"}, }} actual, resultInfo, err := client.ListDNSRecords(context.Background(), ZoneIdentifier(testZoneID), ListDNSRecordsParams{ ResultInfo: ResultInfo{ Page: 1, }, Match: "all", Order: "type", Direction: ListDirectionAsc, Name: "example.com", Type: "A", Content: "198.51.100.4", TagMatch: "any", Tags: []string{"tag1", "tag2"}, }) require.NoError(t, err) assert.Equal(t, 2000, resultInfo.Total) assert.Equal(t, want, actual) } func TestListDNSRecordsPagination(t *testing.T) { // change listDNSRecordsDefaultPageSize value to 1 to force pagination listDNSRecordsDefaultPageSize = 3 setup() defer teardown() var page1Called, page2Called bool handler := func(w http.ResponseWriter, r *http.Request) { page := r.URL.Query().Get("page") w.Header().Set("content-type", "application/json") var response string switch page { case "1": response = loadFixture("dns", "list_page_1") page1Called = true case "2": response = loadFixture("dns", "list_page_2") page2Called = true default: assert.Failf(t, "Unexpeted page requested: %s", page) return } fmt.Fprint(w, response) } mux.HandleFunc("/zones/"+testZoneID+"/dns_records", handler) actual, _, err := client.ListDNSRecords(context.Background(), ZoneIdentifier(testZoneID), ListDNSRecordsParams{}) require.NoError(t, err) assert.True(t, page1Called) assert.True(t, page2Called) assert.Len(t, actual, 5) type ls struct { Results []map[string]interface{} `json:"result"` } expectedRecords := make(map[string]map[string]interface{}) response1 := loadFixture("dns", "list_page_1") var fixtureDataPage1 ls err = json.Unmarshal([]byte(response1), &fixtureDataPage1) assert.NoError(t, err) for _, record := range fixtureDataPage1.Results { expectedRecords[record["id"].(string)] = record } response2 := loadFixture("dns", "list_page_2") var fixtureDataPage2 ls err = json.Unmarshal([]byte(response2), &fixtureDataPage2) assert.NoError(t, err) for _, record := range fixtureDataPage2.Results { expectedRecords[record["id"].(string)] = record } for _, actualRecord := range actual { expected, exist := expectedRecords[actualRecord.ID] assert.True(t, exist, "DNS record doesn't exist in fixtures") assert.Equal(t, expected["type"].(string), actualRecord.Type) assert.Equal(t, expected["name"].(string), actualRecord.Name) assert.Equal(t, expected["content"].(string), actualRecord.Content) assert.Equal(t, expected["proxiable"].(bool), actualRecord.Proxiable) assert.Equal(t, expected["proxied"].(bool), *actualRecord.Proxied) assert.Equal(t, int(expected["ttl"].(float64)), actualRecord.TTL) assert.Equal(t, expected["locked"].(bool), actualRecord.Locked) assert.Equal(t, expected["zone_id"].(string), actualRecord.ZoneID) assert.Equal(t, expected["zone_name"].(string), actualRecord.ZoneName) assert.Equal(t, expected["data"], actualRecord.Data) assert.Equal(t, expected["meta"], actualRecord.Meta) } } func TestGetDNSRecord(t *testing.T) { setup() defer teardown() handler := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodGet, r.Method, "Expected method 'GET', got %s", r.Method) w.Header().Set("content-type", "application/json") fmt.Fprint(w, `{ "success": true, "errors": [], "messages": [], "result": { "id": "372e67954025e0ba6aaa6d586b9e0b59", "type": "A", "name": "example.com", "content": "198.51.100.4", "proxiable": true, "proxied": false, "ttl": 120, "locked": false, "zone_id": "d56084adb405e0b7e32c52321bf07be6", "zone_name": "example.com", "created_on": "2014-01-01T05:20:00Z", "modified_on": "2014-01-01T05:20:00Z", "data": {}, "meta": { "auto_added": true, "source": "primary" }, "comment": "This is a comment", "tags": ["tag1", "tag2"] } }`) } dnsRecordID := "372e67954025e0ba6aaa6d586b9e0b59" mux.HandleFunc("/zones/"+testZoneID+"/dns_records/"+dnsRecordID, handler) proxied := false createdOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") modifiedOn, _ := time.Parse(time.RFC3339, "2014-01-01T05:20:00Z") want := DNSRecord{ ID: dnsRecordID, Type: "A", Name: "example.com", Content: "198.51.100.4", Proxiable: true, Proxied: &proxied, TTL: 120, ZoneID: testZoneID, ZoneName: "example.com", CreatedOn: createdOn, ModifiedOn: modifiedOn, Data: map[string]interface{}{}, Meta: map[string]interface{}{ "auto_added": true, "source": "primary", }, Comment: "This is a comment", Tags: []string{"tag1", "tag2"}, } _, err := client.GetDNSRecord(context.Background(), ZoneIdentifier(""), dnsRecordID) assert.ErrorIs(t, err, ErrMissingZoneID) _, err = client.GetDNSRecord(context.Background(), ZoneIdentifier(testZoneID), "") assert.ErrorIs(t, err, ErrMissingDNSRecordID) actual, err := client.GetDNSRecord(context.Background(), ZoneIdentifier(testZoneID), dnsRecordID) require.NoError(t, err) assert.Equal(t, want, actual) } func TestUpdateDNSRecord(t *testing.T) { setup() defer teardown() proxied := false input := DNSRecord{ ID: "372e67954025e0ba6aaa6d586b9e0b59", Type: "A", Name: "xn--138h.example.com", Content: "198.51.100.4", TTL: 120, Proxied: &proxied, } handler := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPatch, r.Method, "Expected method 'PATCH', got %s", r.Method) var v DNSRecord err := json.NewDecoder(r.Body).Decode(&v) require.NoError(t, err) v.ID = "372e67954025e0ba6aaa6d586b9e0b59" assert.Equal(t, input, v) w.Header().Set("content-type", "application/json") fmt.Fprint(w, `{ "success": true, "errors": [], "messages": [], "result": { "id": "372e67954025e0ba6aaa6d586b9e0b59", "type": "A", "name": "example.com", "content": "198.51.100.4", "proxiable": true, "proxied": false, "ttl": 120, "locked": false, "zone_id": "d56084adb405e0b7e32c52321bf07be6", "zone_name": "example.com", "created_on": "2014-01-01T05:20:00Z", "modified_on": "2014-01-01T05:20:00Z", "data": {}, "meta": { "auto_added": true, "source": "primary" } } }`) } dnsRecordID := "372e67954025e0ba6aaa6d586b9e0b59" mux.HandleFunc("/zones/"+testZoneID+"/dns_records/"+dnsRecordID, handler) _, err := client.UpdateDNSRecord(context.Background(), ZoneIdentifier(""), UpdateDNSRecordParams{ID: dnsRecordID}) assert.ErrorIs(t, err, ErrMissingZoneID) _, err = client.UpdateDNSRecord(context.Background(), ZoneIdentifier(testZoneID), UpdateDNSRecordParams{}) assert.ErrorIs(t, err, ErrMissingDNSRecordID) _, err = client.UpdateDNSRecord(context.Background(), ZoneIdentifier(testZoneID), UpdateDNSRecordParams{ ID: dnsRecordID, Type: "A", Name: "😺.example.com", Content: "198.51.100.4", TTL: 120, Proxied: &proxied, }) require.NoError(t, err) } func TestUpdateDNSRecord_ClearComment(t *testing.T) { setup() defer teardown() input := DNSRecord{ ID: "372e67954025e0ba6aaa6d586b9e0b59", Comment: "", } handler := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodPatch, r.Method, "Expected method 'PATCH', got %s", r.Method) var v DNSRecord err := json.NewDecoder(r.Body).Decode(&v) require.NoError(t, err) v.ID = "372e67954025e0ba6aaa6d586b9e0b59" assert.Equal(t, input, v) w.Header().Set("content-type", "application/json") fmt.Fprint(w, `{ "success": true, "errors": [], "messages": [], "result": { "id": "372e67954025e0ba6aaa6d586b9e0b59", "type": "A", "name": "example.com", "content": "198.51.100.4", "proxiable": true, "proxied": false, "ttl": 120, "locked": false, "zone_id": "d56084adb405e0b7e32c52321bf07be6", "zone_name": "example.com", "created_on": "2014-01-01T05:20:00Z", "modified_on": "2014-01-01T05:20:00Z", "comment":null, "tags":[], "data": {}, "meta": { "auto_added": true, "source": "primary" } } }`) } dnsRecordID := "372e67954025e0ba6aaa6d586b9e0b59" mux.HandleFunc("/zones/"+testZoneID+"/dns_records/"+dnsRecordID, handler) _, err := client.UpdateDNSRecord(context.Background(), ZoneIdentifier(testZoneID), UpdateDNSRecordParams{ ID: dnsRecordID, Comment: "", }) require.NoError(t, err) } func TestDeleteDNSRecord(t *testing.T) { setup() defer teardown() handler := func(w http.ResponseWriter, r *http.Request) { assert.Equal(t, http.MethodDelete, r.Method, "Expected method 'DELETE', got %s", r.Method) w.Header().Set("content-type", "application/json") fmt.Fprint(w, `{ "success": true, "errors": [], "messages": [], "result": { "id": "372e67954025e0ba6aaa6d586b9e0b59" } }`) } dnsRecordID := "372e67954025e0ba6aaa6d586b9e0b59" mux.HandleFunc("/zones/"+testZoneID+"/dns_records/"+dnsRecordID, handler) err := client.DeleteDNSRecord(context.Background(), ZoneIdentifier(""), dnsRecordID) assert.ErrorIs(t, err, ErrMissingZoneID) err = client.DeleteDNSRecord(context.Background(), ZoneIdentifier(testZoneID), "") assert.ErrorIs(t, err, ErrMissingDNSRecordID) err = client.DeleteDNSRecord(context.Background(), ZoneIdentifier(testZoneID), dnsRecordID) require.NoError(t, err) }