text
stringlengths
11
4.05M
package bus import ( "fmt" "github.com/go-redis/redis" "github.com/tmtx/res-sys/pkg/bus" "github.com/tmtx/res-sys/pkg/validator" ) type redisBus struct { client *redis.Client subscriptions map[bus.MessageKey][]bus.Callback } type MessageBus interface { Dispatch(m bus.Message) DispatchSync(m bus.Message) Subscribe(key bus.MessageKey, cb bus.Callback) Listen() } func NewRedisMessageBus(options *redis.Options) (bus.MessageBus, error) { client := redis.NewClient(options) _, err := client.Ping().Result() if err != nil { return nil, err } return redisBus{ client: client, subscriptions: map[bus.MessageKey][]bus.Callback{}, }, nil } func (b redisBus) Dispatch(m bus.Message) { b.client.Publish(string(m.Key), &m) } func (b redisBus) DispatchSync(m bus.Message) (validator.Messages, error) { if b.subscriptions[m.Key] == nil { return nil, fmt.Errorf("No callbacks registered for key: " + string(m.Key)) } return b.executeCallbacks(m) } func (b redisBus) Subscribe(key bus.MessageKey, cb bus.Callback) { b.subscriptions[key] = append(b.subscriptions[key], cb) } func (b redisBus) Listen() { for key := range b.subscriptions { go b.handleSubscription(key) } } func (b redisBus) handleSubscription(key bus.MessageKey) { pubsub := b.client.Subscribe(string(key)) _, err := pubsub.Receive() if err != nil { panic(err) } for msg := range pubsub.Channel() { var m bus.Message m.UnmarshalBinary([]byte(msg.Payload)) b.executeCallbacks(m) } } func (b redisBus) executeCallbacks(m bus.Message) (validator.Messages, error) { var allMessages validator.Messages var err error var validatorMessages validator.Messages for _, cb := range b.subscriptions[m.Key] { validatorMessages, err = cb(m.Params) if err != nil { break } allMessages = validator.MergeMessages(allMessages, validatorMessages) } return allMessages, err }
package first import "fmt" type Person struct { Name string } // export first func Firsteg() { var i interface{} i = 4 fmt.Println(i) i = 4.5 //interface type conversion j := int(i.(float64)) fmt.Println(i, j) i = "Parit" fmt.Println(i) //interface can hold non-primitive type data as well i = Person{Name: "Parit Sharma"} fmt.Println(i) } //export Secondeg func Secondeg() { var i interface{} i = 3 checkType(i) i = 3.5 checkType(i) i = "Parit" checkType(i) type MyCustomeType uint i = MyCustomeType(5) checkType(i) } func checkType(i interface{}) { switch i.(type) { case int: fmt.Println("i am int") case float64: fmt.Println("I am float") case string: fmt.Println("I am string") default: fmt.Println("I am otherwise") } }
package ibmcloud // Metadata contains IBM Cloud metadata (e.g. for uninstalling the cluster). type Metadata struct { AccountID string `json:"accountID"` BaseDomain string `json:"baseDomain"` CISInstanceCRN string `json:"cisInstanceCRN,omitempty"` DNSInstanceID string `json:"dnsInstanceID,omitempty"` Region string `json:"region,omitempty"` ResourceGroupName string `json:"resourceGroupName,omitempty"` VPC string `json:"vpc,omitempty"` Subnets []string `json:"subnets,omitempty"` }
/* Copyright © 2022 SUSE LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "context" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" "github.com/rancher-sandbox/rancher-desktop/src/go/vtunnel/pkg/config" "github.com/rancher-sandbox/rancher-desktop/src/go/vtunnel/pkg/vmsock" ) // hostCmd represents the host command var hostCmd = &cobra.Command{ Use: "host", Short: "vtunnel host process", Long: `vtunnel host process runs on the host machine and binds to localhost and a given port acting as a host end of the tunnel.`, RunE: func(cmd *cobra.Command, args []string) error { cmd.SilenceUsage = true configPath, err := cmd.Flags().GetString("config-path") if err != nil { return err } conf, err := config.NewConfig(configPath) if err != nil { return err } errs, _ := errgroup.WithContext(context.Background()) for _, tun := range conf.Tunnel { hostConnector := vmsock.HostConnector{ UpstreamServerAddress: tun.UpstreamServerAddress, VsockListenPort: tun.VsockHostPort, PeerHandshakePort: tun.HandshakePort, } errs.Go(hostConnector.ListenAndDial) } return errs.Wait() }, } func init() { hostCmd.Flags().String("config-path", "", "Path to the vtunnel's yaml configuration file") hostCmd.MarkFlagRequired("config-path") rootCmd.AddCommand(hostCmd) }
package main import "fmt" //结构体嵌套 type Address struct { province string city string } type workPlace struct { province string city string } type Person struct { name string age int addr Address } type Company struct { name string Address } func main() { p1 := Person{ name: "lujing", age: 999, addr: Address{ province: "广东", city: "韶关", }, } fmt.Println(p1.name, p1.addr) c1 := Company{ name: "越狱", Address: Address{ province: "广东", city: "深圳", }, } fmt.Println(c1.Address, c1.city) }
/* Kirchhoff's law says that when you sum up all the currents (positive for the currents going to a junction, and negative for current leaving a junction), you will always get as result 0. Using Kirchhoff's law, you can see that i1 + i4 - i2 - i3 = 0, so i1 + i4 = i2 + i3. Given two lists, one with all the currents entering the junction and one with all the currents leaving the junction except one, output the last one. Testcases: [1, 2, 3], [1, 2] = 3 [4, 5, 6], [7, 8] = 0 [5, 7, 3, 4, 5, 2], [8, 4, 5, 2, 1] = 6 The second list always has one item less than the first list. The output cannot be negative. Smallest program wins. */ package main import "fmt" func main() { fmt.Println(lc([]float64{1, 2, 3}, []float64{1, 2})) fmt.Println(lc([]float64{4, 5, 6}, []float64{7, 8})) fmt.Println(lc([]float64{5, 7, 3, 4, 5, 2}, []float64{8, 4, 5, 2, 1})) } func lc(in, out []float64) float64 { s := 0.0 for i := range in { s += in[i] } for i := range out { s -= out[i] } return s }
package gotten_test import ( "bytes" "encoding/json" "errors" "fmt" "github.com/Hexilee/gotten" "github.com/Hexilee/gotten/headers" "io" "mime/multipart" "net/http" "net/url" "os" "strconv" "testing" ) type ( ChangeParam struct { NewToken string `type:"form"` OldToken string `type:"form" require:"true"` SecureId string `type:"form" require:"true"` TokenSec string `type:"form"` OldSec string `type:"form"` Expiration int `type:"form" require:"true"` } BenchService struct { Change func(param *ChangeParam) (*http.Request, error) `method:"POST" path:"change_item"` Upload func(param *UploadParam) (*http.Request, error) `method:"POST" path:"add_item"` } GitService struct { UpdateDeployKey func(param *UpdateParam) (*http.Request, error) `method:"PUT" path:"/projects/{id}/deploy_keys/{key_id}"` } UploadParam struct { PhpSession string `type:"part" key:"PHP_SESSION_UPLOAD_PROGRESS" default:"qscbox"` Filecount int `type:"part" default:"1"` File gotten.FilePath `type:"part" require:"true"` Callback string `type:"part" default:"handleUploadCallback"` IsIe9 int `type:"part" default:"0"` } UpdateParam struct { Id string `type:"path"` KeyId string `type:"path"` Key *Key `type:"json" require:"true"` } Key struct { Title string `json:"title"` Key string `json:"key,omitempty"` CanPush bool `json:"can_push"` } ) var ( service = new(BenchService) gitService = new(GitService) changeParam = &ChangeParam{ OldToken: "test", SecureId: "8nx1391907c5971n9112321d9y", Expiration: 86400, } uploadParam = &UploadParam{ File: gotten.FilePath("testAssets/avatar.jpg"), } updateParam = &UpdateParam{ Id: "12", KeyId: "1234", Key: &Key{ Title: "Push Key", CanPush: true, }, } ) func init() { creator, err := gotten.NewBuilder(). SetBaseUrl("https://box.zjuqsc.com/item/"). Build() if err != nil { panic(err) } err = creator.Impl(service) if err != nil { panic(err) } gitCreator, err := gotten.NewBuilder(). SetBaseUrl("https://git.zjuqsc.com/"). Build() if err != nil { panic(err) } err = gitCreator.Impl(gitService) if err != nil { panic(err) } } func buildFormRequestTraditionally(param *ChangeParam) (req *http.Request, err error) { form := make(url.Values) form.Add("new_token", param.NewToken) form.Add("old_token", param.OldToken) form.Add("secure_id", param.SecureId) form.Add("token_sec", param.TokenSec) form.Add("old_sec", param.OldSec) form.Add("new_token", param.NewToken) body := bytes.NewBufferString(form.Encode()) req, err = http.NewRequest("POST", "https://box.zjuqsc.com/item/change_item", body) if err == nil { req.Header.Set(headers.HeaderContentType, headers.MIMEApplicationForm) } return } func buildMultipartRequestTraditionally(param *UploadParam) (req *http.Request, err error) { var partWriter io.Writer body := bytes.NewBufferString("") writer := multipart.NewWriter(body) if param.PhpSession == gotten.ZeroStr { param.PhpSession = "qscbox" } if param.Filecount == gotten.ZeroInt { param.Filecount = 1 } if param.Callback == gotten.ZeroStr { param.Callback = "handleUploadCallback" } writer.WriteField("PHP_SESSION_UPLOAD_PROGRESS", param.PhpSession) writer.WriteField("filecount", strconv.Itoa(param.Filecount)) writer.WriteField("callback", param.Callback) writer.WriteField("is_ie9", strconv.Itoa(param.IsIe9)) var file *os.File file, err = os.Open(string(param.File)) if err == nil { if partWriter, err = writer.CreateFormFile("file", "avatar.jpg"); err == nil { _, err = io.Copy(partWriter, file) } } file.Close() writer.Close() if err == nil { req, err = http.NewRequest("POST", "https://box.zjuqsc.com/item/add_item", body) if err == nil { req.Header.Set(headers.HeaderContentType, headers.MIMEMultipartForm) } } return } func buildJSONRequestTraditionally(param *UpdateParam) (req *http.Request, err error) { if param.Id == gotten.ZeroStr || param.KeyId == gotten.ZeroStr || param.Key == nil { err = errors.New("param is invalid") } if err == nil { target := fmt.Sprintf("https://git.zjuqsc.com/projects/%s/deploy_keys/%s", url.QueryEscape(param.Id), url.QueryEscape(param.KeyId)) var data []byte data, err = json.Marshal(param.Key) if err == nil { body := bytes.NewBuffer(data) req, err = http.NewRequest("PUT", target, body) if err == nil { req.Header.Set(headers.HeaderContentType, headers.MIMEApplicationJSONCharsetUTF8) } } } return } func BenchmarkCreateFormTraditionally(b *testing.B) { for i := 0; i < b.N; i++ { buildFormRequestTraditionally(changeParam) } } func BenchmarkCreateFormByGotten(b *testing.B) { for i := 0; i < b.N; i++ { service.Change(changeParam) } } func BenchmarkCreateMultipartTraditionally(b *testing.B) { for i := 0; i < b.N; i++ { buildMultipartRequestTraditionally(uploadParam) } } func BenchmarkCreateMultipartByGotten(b *testing.B) { for i := 0; i < b.N; i++ { service.Upload(uploadParam) } } func BenchmarkCreateJSONReqTraditionally(b *testing.B) { for i := 0; i < b.N; i++ { buildJSONRequestTraditionally(updateParam) } } func BenchmarkCreateJSONReqByGotten(b *testing.B) { for i := 0; i < b.N; i++ { gitService.UpdateDeployKey(updateParam) } }
package cmd import ( "fmt" "os" "strings" "github.com/aelindeman/goname" "github.com/kyoh86/xdg" homedir "github.com/mitchellh/go-homedir" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( cfgFile string apiClient *goname.GoName ) // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ Use: "namedns", Short: "A command-line utility to manipulate Name.com DNS records", PersistentPreRun: func(cmd *cobra.Command, args []string) { if viper.GetBool("verbose") { log.SetLevel(log.DebugLevel) } ValidateGlobalConfig() }, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := rootCmd.Execute(); err != nil { fmt.Println(err) os.Exit(1) } } // ValidateGlobalConfig ensures required flags are set. func ValidateGlobalConfig() error { if viper.GetString("username") == "" { return fmt.Errorf("username is not set") } if viper.GetString("api-key") == "" { return fmt.Errorf("API key is not set") } return nil } // GetClient sets up and returns the Name.com API client. func GetClient() *goname.GoName { if apiClient != nil { return apiClient } configErr := ValidateGlobalConfig() if configErr != nil { log.WithError(configErr).Fatal("configuration error") } client := goname.New(viper.GetString("username"), viper.GetString("api-key")) client.BaseURL = viper.GetString("api-url") loginErr := client.Login() if loginErr != nil { log.WithError(loginErr).Fatal("could not authenticate") } apiClient = client return client } func init() { cobra.OnInitialize(initConfig) rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", `Path to config file (default "$XDG_CONFIG_HOME/namedns/.namedns.yaml")`) rootCmd.PersistentFlags().StringP("username", "u", "", "API username") rootCmd.PersistentFlags().StringP("api-key", "k", "", "API key") rootCmd.PersistentFlags().StringP("api-url", "", goname.NameAPIBaseURL, "API base URL") rootCmd.PersistentFlags().BoolP("verbose", "v", false, "Display debugging output") viper.SetEnvPrefix("namedns") viper.BindPFlag("username", rootCmd.PersistentFlags().Lookup("username")) viper.BindPFlag("api-key", rootCmd.PersistentFlags().Lookup("api-key")) viper.BindPFlag("api-url", rootCmd.PersistentFlags().Lookup("api-url")) viper.BindPFlag("verbose", rootCmd.PersistentFlags().Lookup("verbose")) } // initConfig reads in config file and ENV variables if set. func initConfig() { if cfgFile != "" { // Use config file from the flag. viper.SetConfigFile(cfgFile) } else { // Find XDG config directories. for _, xdgConfigDir := range xdg.AllConfigDirs() { viper.AddConfigPath(strings.Join([]string{xdgConfigDir, "namedns"}, "/")) } // Find home directory. home, homeErr := homedir.Dir() if homeErr != nil { log.WithError(homeErr).Warning("could not find home directory") } else { viper.AddConfigPath(home) } // Look for one in the current directory. wd, wdErr := os.Getwd() if wdErr != nil { log.WithError(wdErr).Warning("could not find current directory") } else { viper.AddConfigPath(wd) } // Search config with name ".namedns" (without extension). viper.SetConfigName(".namedns") } viper.AutomaticEnv() // read in environment variables that match // If a config file is found, read it in. if configReadErr := viper.ReadInConfig(); configReadErr == nil { log.WithField("file", viper.ConfigFileUsed()).Debug("using config from file") } }
package actions import ( "errors" "strings" "github.com/barrydev/api-3h-shop/src/common/connect" "github.com/barrydev/api-3h-shop/src/factories" "github.com/barrydev/api-3h-shop/src/model" ) func UpdateOrder(orderId int64, body *model.BodyOrder) (*model.Order, error) { queryString := "" var args []interface{} var set []string if body.CustomerId != nil { set = append(set, " customer_id=?") args = append(args, body.CustomerId) } if body.Status != nil { set = append(set, " status=?") args = append(args, body.Status) } if body.TotalPrice != nil { set = append(set, " total_price=?") args = append(args, body.TotalPrice) } if body.PaymentStatus != nil { set = append(set, " payment_status=?") args = append(args, body.PaymentStatus) } if body.FulfillmentStatus != nil { set = append(set, " fulfilment_status=?") args = append(args, body.FulfillmentStatus) } if body.PaidAt != nil { set = append(set, " paid_at=?") args = append(args, body.PaidAt) } if body.Note != nil { set = append(set, " note=?") args = append(args, body.Note) } if body.FulfilledAt != nil { set = append(set, " fulfilled_at=?") args = append(args, body.FulfilledAt) } if body.CancelledAt != nil { set = append(set, " cancelled_at=?") args = append(args, body.CancelledAt) } if len(set) > 0 { queryString += "SET" + strings.Join(set, ",") + "\n" } else { order, err := factories.FindOrderById(orderId) if err != nil { return nil, err } if order == nil { return nil, errors.New("order does not exists") } return order, nil } queryString += "WHERE _id=?" args = append(args, orderId) rowEffected, err := factories.UpdateOrder(&connect.QueryMySQL{ QueryString: queryString, Args: args, }) if err != nil { return nil, err } if rowEffected == nil { return nil, errors.New("update error") } return GetOrderById(orderId) }
package main import ( "fmt" "strconv" "strings" ) func main() { rules := parseInput() fmt.Println(dfBagCount(rules, "shiny gold")-1) } func dfBagCount(rules map[string][]connection, node string) int { children := rules[node] bags := 1 for _, c := range children { contained := dfBagCount(rules, c.c) if contained > 0 { bags += c.n * contained } } return bags } func main_1() { rules := parseInput() var valid int for colour := range rules { if colour == "shiny gold" { continue } if dfs(rules, colour, "shiny gold") { fmt.Printf("%s contains shiny gold\n", colour) valid++ } fmt.Println("") } fmt.Println(valid) } func dfs(rules map[string][]connection, node string, search string) bool { if node == search { return true } children := rules[node] for _, c := range children { if dfs(rules, c.c, search) { return true } } return false } type connection struct { n int c string } func parseInput() map[string][]connection { rules := map[string][]connection{} ss := strings.Split(input, "\n") for _, s := range ss { rule := strings.Split(s, "contain") root := strings.TrimSuffix(rule[0], " bags ") if rule[1] == " no other bags." { rules[root] = nil continue } cxns := strings.Split(rule[1], ",") for _, c := range cxns { raw := strings.Fields(c) n, _ := strconv.Atoi(raw[0]) colour := fmt.Sprintf("%s %s", raw[1], raw[2]) rules[root] = append(rules[root], connection{ n: n, c: colour, }) } } return rules } const sample = `light red bags contain 1 bright white bag, 2 muted yellow bags. dark orange bags contain 3 bright white bags, 4 muted yellow bags. bright white bags contain 1 shiny gold bag. muted yellow bags contain 2 shiny gold bags, 9 faded blue bags. shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags. dark olive bags contain 3 faded blue bags, 4 dotted black bags. vibrant plum bags contain 5 faded blue bags, 6 dotted black bags. faded blue bags contain no other bags. dotted black bags contain no other bags.` const input = `striped orange bags contain 1 vibrant green bag, 5 plaid yellow bags, 1 drab magenta bag. dark fuchsia bags contain 3 wavy indigo bags, 4 striped lime bags. clear maroon bags contain 2 clear gold bags, 5 bright salmon bags, 5 wavy tomato bags. faded tan bags contain 4 dim brown bags. wavy olive bags contain 3 faded gray bags, 2 posh brown bags, 3 striped cyan bags. light plum bags contain 4 vibrant plum bags. vibrant cyan bags contain 4 dotted gold bags, 4 bright indigo bags, 4 wavy lime bags, 3 clear plum bags. vibrant beige bags contain 1 wavy silver bag, 4 shiny indigo bags, 2 wavy aqua bags, 1 mirrored cyan bag. light tomato bags contain 5 muted green bags, 2 striped tomato bags, 4 faded aqua bags, 2 muted salmon bags. drab white bags contain 5 faded beige bags, 1 light purple bag, 1 striped white bag, 4 muted cyan bags. faded purple bags contain 5 posh lime bags. striped violet bags contain 1 dim bronze bag, 2 faded plum bags. pale teal bags contain 2 shiny salmon bags, 2 light tomato bags, 5 dim coral bags. dull lime bags contain 3 bright tomato bags, 3 dim magenta bags, 3 bright cyan bags, 2 dark teal bags. dim salmon bags contain 3 dark chartreuse bags. faded teal bags contain 2 shiny coral bags, 4 dark turquoise bags, 3 wavy black bags. dark green bags contain 5 vibrant green bags, 4 clear blue bags. dim black bags contain 2 vibrant bronze bags. light yellow bags contain 2 faded salmon bags, 4 muted aqua bags. drab brown bags contain 3 clear green bags, 2 pale aqua bags. dotted tomato bags contain 3 vibrant purple bags, 3 vibrant tomato bags, 3 clear lime bags, 5 dim gold bags. faded orange bags contain 1 pale brown bag. light black bags contain 1 striped silver bag, 3 dim brown bags, 2 bright cyan bags, 3 striped lime bags. plaid turquoise bags contain 5 muted fuchsia bags, 1 dull violet bag. light lime bags contain 1 clear lime bag, 5 wavy blue bags. plaid blue bags contain 2 dotted blue bags, 5 light brown bags, 5 posh teal bags. vibrant maroon bags contain 2 bright lavender bags. light orange bags contain 1 wavy magenta bag, 3 clear orange bags, 4 striped silver bags. bright beige bags contain 2 plaid red bags, 1 dull aqua bag, 3 bright fuchsia bags. striped crimson bags contain 3 faded maroon bags, 3 dull chartreuse bags, 4 mirrored red bags, 4 clear orange bags. posh silver bags contain 3 pale plum bags, 4 light salmon bags, 5 light purple bags. dull white bags contain 2 dark white bags. bright lime bags contain 4 clear indigo bags, 2 vibrant green bags. bright crimson bags contain 3 light lime bags. dull lavender bags contain 3 vibrant magenta bags. pale purple bags contain no other bags. vibrant coral bags contain 3 bright cyan bags, 2 bright tomato bags. posh salmon bags contain 4 vibrant magenta bags, 3 dull aqua bags. dull black bags contain 4 dotted black bags. shiny salmon bags contain 2 faded plum bags, 5 clear lavender bags, 1 light crimson bag, 2 light gray bags. vibrant red bags contain 3 dim bronze bags, 1 striped indigo bag, 1 vibrant silver bag. plaid beige bags contain 1 posh purple bag, 5 pale bronze bags, 1 dotted white bag. striped bronze bags contain 1 wavy salmon bag. shiny lavender bags contain 2 shiny salmon bags, 2 bright salmon bags, 4 posh fuchsia bags. plaid chartreuse bags contain 2 vibrant bronze bags, 1 dull chartreuse bag, 3 wavy tomato bags. bright chartreuse bags contain no other bags. wavy white bags contain 2 dim blue bags, 5 faded silver bags, 5 shiny silver bags, 2 shiny beige bags. drab beige bags contain 5 wavy tomato bags. mirrored teal bags contain 5 muted lavender bags, 4 dull chartreuse bags. dotted bronze bags contain 1 clear magenta bag, 2 muted silver bags, 5 muted cyan bags. dim red bags contain 5 shiny fuchsia bags, 3 dotted white bags. shiny crimson bags contain 2 dull white bags, 2 vibrant maroon bags. wavy plum bags contain 5 light crimson bags, 4 clear green bags. faded olive bags contain 5 posh cyan bags. mirrored gold bags contain 5 striped gold bags. dim cyan bags contain 5 shiny green bags, 5 wavy blue bags, 3 mirrored lime bags, 4 dark plum bags. vibrant orange bags contain 2 mirrored lime bags, 1 bright chartreuse bag, 3 pale crimson bags, 4 muted beige bags. pale beige bags contain 5 dull olive bags. posh lavender bags contain 1 faded red bag, 2 wavy maroon bags. plaid teal bags contain 3 dull black bags, 5 muted tan bags. wavy red bags contain 1 posh salmon bag, 1 light black bag, 2 drab olive bags, 4 drab beige bags. drab maroon bags contain 5 bright yellow bags. clear tan bags contain 1 light yellow bag, 2 mirrored indigo bags, 2 dotted olive bags, 3 dim magenta bags. striped turquoise bags contain 1 posh cyan bag, 1 clear crimson bag. posh turquoise bags contain 3 bright bronze bags, 5 bright orange bags. dark turquoise bags contain 3 posh yellow bags, 4 dull red bags, 5 plaid silver bags, 3 bright tomato bags. posh green bags contain 2 wavy bronze bags. muted green bags contain 2 bright fuchsia bags, 2 muted gold bags, 3 light orange bags. drab salmon bags contain 3 light black bags. dotted coral bags contain 4 pale chartreuse bags. dark yellow bags contain 5 faded red bags, 5 pale brown bags. muted teal bags contain 1 clear gold bag, 5 striped tomato bags, 2 dark teal bags. bright silver bags contain 2 bright gray bags. drab chartreuse bags contain 5 pale teal bags. muted plum bags contain 3 posh white bags, 1 bright red bag, 5 striped gold bags. dark bronze bags contain 2 bright cyan bags, 1 dim beige bag, 1 pale bronze bag, 3 light gray bags. dim fuchsia bags contain 3 dark black bags, 3 mirrored chartreuse bags, 3 faded gray bags. faded brown bags contain 3 wavy white bags, 1 dull violet bag. bright green bags contain 4 vibrant fuchsia bags, 2 shiny white bags, 3 mirrored blue bags, 4 posh aqua bags. dotted turquoise bags contain 1 shiny beige bag, 3 wavy blue bags. wavy chartreuse bags contain 5 dark plum bags. shiny orange bags contain 4 clear orange bags, 4 dark coral bags, 2 bright teal bags, 5 dotted brown bags. shiny gray bags contain 3 dull olive bags, 3 muted aqua bags. clear blue bags contain 5 bright salmon bags, 2 shiny gold bags. vibrant olive bags contain 5 bright salmon bags, 1 muted green bag, 4 wavy olive bags. muted gold bags contain 3 striped plum bags, 4 dull violet bags, 1 shiny green bag. plaid aqua bags contain 4 shiny purple bags, 1 wavy lime bag, 3 striped violet bags, 4 clear magenta bags. dim bronze bags contain no other bags. vibrant turquoise bags contain 5 wavy plum bags. light teal bags contain 4 clear plum bags, 2 drab crimson bags. light coral bags contain 1 drab violet bag, 3 light yellow bags, 2 faded maroon bags. bright cyan bags contain 5 dim crimson bags, 3 striped plum bags, 2 muted gold bags. wavy purple bags contain 4 vibrant tomato bags, 4 dull bronze bags, 1 dim coral bag. dull tan bags contain 5 muted fuchsia bags. bright white bags contain 5 pale blue bags. clear plum bags contain 2 muted lavender bags, 1 faded aqua bag, 1 faded plum bag, 3 dull indigo bags. vibrant tomato bags contain 1 posh purple bag, 5 mirrored lime bags, 3 vibrant turquoise bags, 4 clear maroon bags. muted tan bags contain 2 dim magenta bags, 4 dotted gray bags, 4 plaid orange bags, 2 pale green bags. dark brown bags contain 5 striped plum bags, 2 mirrored tomato bags, 4 faded plum bags, 3 light yellow bags. striped yellow bags contain 5 muted indigo bags, 4 bright black bags, 3 clear lime bags, 1 striped white bag. posh black bags contain 2 mirrored tomato bags, 4 posh gray bags. wavy black bags contain 5 pale orange bags, 3 posh violet bags, 2 dotted white bags, 2 faded red bags. muted indigo bags contain 2 bright fuchsia bags, 4 muted lavender bags. bright coral bags contain 1 posh chartreuse bag, 2 shiny gold bags. posh tomato bags contain 4 light violet bags, 1 mirrored olive bag. posh cyan bags contain 2 mirrored chartreuse bags, 4 dotted black bags, 5 vibrant gray bags. pale silver bags contain 1 light fuchsia bag, 4 dark red bags. plaid indigo bags contain 4 plaid red bags, 3 striped cyan bags, 1 muted silver bag, 2 wavy magenta bags. dotted indigo bags contain 1 mirrored magenta bag, 4 plaid gold bags, 3 dark coral bags, 2 vibrant silver bags. light chartreuse bags contain 5 shiny blue bags, 5 drab coral bags, 4 drab turquoise bags. pale tomato bags contain 3 wavy violet bags, 3 dotted tan bags, 4 drab brown bags. muted chartreuse bags contain 3 dim silver bags, 1 dim chartreuse bag, 3 striped green bags, 3 dim magenta bags. dull aqua bags contain 3 pale crimson bags, 2 drab aqua bags, 5 drab magenta bags, 3 clear gold bags. dark lime bags contain 3 light tan bags. dark orange bags contain 4 drab orange bags. faded black bags contain 4 dotted chartreuse bags, 4 wavy silver bags, 5 plaid olive bags, 2 pale blue bags. dull purple bags contain 1 clear gold bag, 5 wavy lavender bags. dotted gold bags contain 3 pale purple bags, 1 dark olive bag, 5 wavy beige bags, 2 mirrored aqua bags. pale bronze bags contain 2 shiny tomato bags, 5 vibrant turquoise bags. bright aqua bags contain 1 pale teal bag, 4 faded coral bags, 2 clear olive bags. faded beige bags contain 5 wavy yellow bags, 3 pale lavender bags, 1 vibrant coral bag. drab turquoise bags contain 1 shiny tomato bag, 3 clear lavender bags, 2 drab orange bags. light maroon bags contain 1 posh bronze bag, 3 dotted silver bags, 1 plaid silver bag, 1 muted teal bag. posh maroon bags contain 1 mirrored maroon bag, 1 posh black bag, 3 faded salmon bags, 4 posh gray bags. bright red bags contain 2 vibrant salmon bags. vibrant purple bags contain 2 faded violet bags. vibrant crimson bags contain 3 plaid yellow bags, 3 dotted yellow bags, 2 pale tan bags. dim green bags contain 3 faded plum bags, 4 posh turquoise bags, 4 muted aqua bags. striped gray bags contain 1 striped fuchsia bag. light salmon bags contain 5 dim coral bags, 5 posh brown bags, 3 shiny green bags, 4 striped beige bags. bright violet bags contain 5 dim maroon bags, 5 dark tan bags, 3 drab purple bags, 2 muted cyan bags. dotted chartreuse bags contain 2 posh lime bags. drab silver bags contain 1 dim coral bag, 3 pale chartreuse bags, 1 light salmon bag. mirrored lime bags contain 4 light orange bags, 1 clear orange bag, 3 wavy plum bags. muted violet bags contain 5 faded silver bags, 4 posh red bags, 2 drab aqua bags. dull salmon bags contain 2 dull maroon bags, 3 mirrored red bags. posh aqua bags contain 5 dim turquoise bags, 5 drab green bags, 2 striped red bags. muted white bags contain 3 light yellow bags, 3 pale yellow bags, 2 dull olive bags. wavy coral bags contain 3 wavy lime bags, 5 faded purple bags, 5 posh gray bags, 4 dark tomato bags. dark silver bags contain 1 dull chartreuse bag, 2 dull violet bags. dotted plum bags contain 1 mirrored violet bag, 3 dim crimson bags, 1 mirrored fuchsia bag, 1 light tan bag. mirrored purple bags contain 2 muted lime bags, 4 mirrored teal bags, 2 light tomato bags. drab yellow bags contain 4 drab violet bags, 3 striped orange bags. striped teal bags contain 3 muted cyan bags, 1 light fuchsia bag. drab red bags contain 2 faded coral bags, 1 bright crimson bag. pale orange bags contain 4 dull chartreuse bags, 4 clear orange bags. posh gold bags contain 2 faded gray bags, 5 striped indigo bags. light crimson bags contain no other bags. muted red bags contain 5 dim gray bags. clear cyan bags contain 5 faded salmon bags, 4 light brown bags, 3 muted purple bags, 5 striped beige bags. plaid cyan bags contain 2 clear yellow bags, 4 shiny gray bags, 5 pale green bags, 1 posh aqua bag. clear brown bags contain 4 faded lime bags, 2 dull bronze bags, 2 clear bronze bags. light tan bags contain 3 light bronze bags, 4 drab orange bags. dim indigo bags contain 4 shiny fuchsia bags. muted black bags contain 4 pale fuchsia bags, 3 bright maroon bags, 3 striped lavender bags. faded gray bags contain 3 wavy plum bags, 1 wavy magenta bag, 1 clear lavender bag. mirrored magenta bags contain 4 clear blue bags. dark aqua bags contain 1 vibrant blue bag, 3 pale aqua bags. dim lavender bags contain 1 clear magenta bag, 3 drab blue bags. bright gray bags contain 1 dull olive bag, 1 plaid lime bag, 5 clear blue bags. posh teal bags contain 4 pale yellow bags. pale plum bags contain 2 bright fuchsia bags, 3 dotted white bags, 5 pale crimson bags. mirrored bronze bags contain 1 faded yellow bag. dull plum bags contain 4 shiny coral bags, 3 posh turquoise bags, 2 muted teal bags. plaid silver bags contain 4 vibrant black bags, 4 dull olive bags. dull violet bags contain 1 vibrant magenta bag, 3 faded salmon bags, 1 mirrored aqua bag, 4 light crimson bags. dim yellow bags contain 5 clear coral bags, 2 striped white bags, 5 clear maroon bags. drab orange bags contain 4 dull white bags, 1 plaid magenta bag, 2 dull lavender bags, 4 dim cyan bags. wavy maroon bags contain 3 dull violet bags, 1 pale brown bag, 5 vibrant magenta bags, 5 mirrored aqua bags. wavy turquoise bags contain 5 wavy crimson bags, 1 posh plum bag, 4 faded turquoise bags, 5 striped cyan bags. clear yellow bags contain 2 faded purple bags, 2 plaid lime bags, 4 clear violet bags, 4 drab magenta bags. muted crimson bags contain 1 vibrant orange bag. drab violet bags contain 2 pale lavender bags, 2 light yellow bags. dull cyan bags contain 5 dim coral bags, 1 posh orange bag, 3 striped beige bags. shiny fuchsia bags contain 5 bright lime bags, 5 muted aqua bags, 5 dark teal bags, 5 faded indigo bags. pale chartreuse bags contain 4 dark plum bags, 5 clear gold bags, 2 wavy blue bags, 3 dim coral bags. dim orange bags contain 1 vibrant coral bag, 1 clear gold bag. shiny tan bags contain 4 plaid tan bags. dull bronze bags contain 5 dotted lavender bags, 2 wavy plum bags. muted bronze bags contain 3 striped white bags. wavy magenta bags contain 3 posh lime bags, 3 wavy plum bags, 5 bright chartreuse bags. muted cyan bags contain 1 pale yellow bag, 5 wavy maroon bags, 5 shiny gold bags. shiny beige bags contain 5 posh chartreuse bags, 3 muted indigo bags, 3 wavy plum bags. vibrant brown bags contain 2 shiny green bags, 2 pale purple bags, 3 posh gray bags. dim blue bags contain 4 clear silver bags, 4 muted blue bags, 2 clear lavender bags. posh chartreuse bags contain 2 wavy magenta bags. dim magenta bags contain 3 faded coral bags. plaid coral bags contain 2 drab gray bags, 4 dark orange bags, 2 dim cyan bags. dim coral bags contain 1 shiny salmon bag, 3 wavy plum bags, 1 light yellow bag, 4 light purple bags. clear violet bags contain 5 plaid olive bags, 5 mirrored violet bags, 3 muted lavender bags, 2 clear indigo bags. striped coral bags contain 3 dim fuchsia bags, 5 vibrant yellow bags, 4 vibrant plum bags, 1 mirrored turquoise bag. striped gold bags contain 5 vibrant blue bags, 4 pale blue bags, 1 drab beige bag, 3 dotted olive bags. dark beige bags contain 1 wavy gray bag. plaid red bags contain 2 faded gray bags, 5 muted olive bags, 3 dark silver bags. dark purple bags contain 4 clear maroon bags, 3 dark green bags. dull yellow bags contain 3 vibrant white bags, 5 dull indigo bags, 4 dotted teal bags. faded magenta bags contain 2 dark violet bags, 4 light indigo bags. pale green bags contain 4 striped fuchsia bags. faded blue bags contain 5 pale chartreuse bags, 5 plaid indigo bags, 5 posh fuchsia bags. dim brown bags contain 4 dim magenta bags. pale turquoise bags contain 3 bright salmon bags, 3 wavy yellow bags. dark coral bags contain 2 faded beige bags, 3 posh gold bags, 2 muted cyan bags, 3 dim lime bags. striped tan bags contain 3 vibrant bronze bags. drab gold bags contain 1 faded cyan bag. dotted green bags contain 1 dim green bag, 4 wavy red bags. mirrored salmon bags contain 1 posh gray bag. mirrored olive bags contain 1 dotted gold bag, 2 vibrant red bags. shiny blue bags contain 2 clear yellow bags. dotted white bags contain 1 wavy blue bag, 2 striped beige bags, 3 vibrant black bags, 5 striped fuchsia bags. dotted silver bags contain 4 dull violet bags, 3 clear magenta bags. muted lime bags contain 5 bright fuchsia bags, 4 dim violet bags. dim white bags contain 4 wavy plum bags, 4 clear coral bags, 5 faded lime bags. striped blue bags contain 2 striped violet bags, 5 clear lime bags, 4 muted tan bags. dotted fuchsia bags contain 3 pale blue bags, 5 wavy blue bags, 2 faded maroon bags. dark white bags contain 4 posh fuchsia bags, 5 dim cyan bags. pale violet bags contain 5 plaid fuchsia bags. muted aqua bags contain 3 mirrored tomato bags, 2 dim bronze bags, 1 pale purple bag, 5 mirrored aqua bags. wavy lime bags contain 1 vibrant green bag, 3 striped tomato bags, 5 dotted beige bags, 5 dark plum bags. dark salmon bags contain 3 faded plum bags, 3 pale blue bags, 1 mirrored red bag, 3 striped gold bags. dotted salmon bags contain 5 pale crimson bags, 3 dark white bags, 2 mirrored beige bags, 2 shiny salmon bags. vibrant white bags contain 4 dark brown bags, 1 faded plum bag, 1 plaid red bag, 1 dotted lime bag. muted brown bags contain 3 plaid violet bags. striped red bags contain 4 plaid bronze bags, 5 bright orange bags. vibrant teal bags contain 4 dull olive bags, 1 shiny aqua bag, 2 muted olive bags. drab green bags contain 2 muted olive bags. light blue bags contain 1 muted cyan bag, 4 pale teal bags, 5 dotted white bags, 3 posh gray bags. light gold bags contain 4 wavy black bags, 5 wavy gray bags, 4 pale teal bags, 4 pale violet bags. posh violet bags contain 5 shiny magenta bags, 4 dark white bags, 4 posh lime bags, 4 plaid silver bags. striped silver bags contain 3 pale crimson bags, 4 posh lime bags, 3 dim bronze bags. dull tomato bags contain 4 plaid lime bags, 3 striped lavender bags, 4 clear turquoise bags, 3 dull lime bags. dull blue bags contain 2 muted tan bags, 4 posh brown bags, 2 vibrant beige bags. light magenta bags contain 5 drab salmon bags, 1 bright red bag. dotted teal bags contain 2 pale yellow bags, 3 posh red bags, 2 drab aqua bags, 1 muted indigo bag. light beige bags contain 1 drab fuchsia bag. muted fuchsia bags contain 2 shiny cyan bags, 4 bright chartreuse bags, 2 striped indigo bags. muted gray bags contain 2 vibrant brown bags, 5 clear violet bags. bright maroon bags contain 4 dark indigo bags, 5 plaid olive bags. wavy tomato bags contain 3 vibrant gray bags. dim purple bags contain 3 dull lime bags. mirrored blue bags contain 3 pale aqua bags. drab crimson bags contain 2 muted brown bags, 1 faded fuchsia bag. muted turquoise bags contain 1 dull olive bag, 5 dull silver bags, 1 vibrant gray bag, 4 plaid violet bags. posh lime bags contain 1 pale purple bag, 1 vibrant black bag, 3 clear green bags. light lavender bags contain 5 vibrant lime bags, 1 light coral bag. striped maroon bags contain 3 wavy yellow bags, 3 faded silver bags, 5 dim olive bags, 3 muted tan bags. shiny olive bags contain 2 bright olive bags. mirrored fuchsia bags contain 3 dim gray bags. drab blue bags contain 2 light salmon bags, 3 muted olive bags, 1 dark black bag. drab magenta bags contain 4 light yellow bags, 2 muted aqua bags, 5 vibrant magenta bags, 4 vibrant silver bags. light cyan bags contain 1 dull chartreuse bag, 1 dim bronze bag. plaid lime bags contain 1 muted white bag, 2 striped fuchsia bags, 5 vibrant gray bags. wavy tan bags contain 4 dotted beige bags, 4 striped cyan bags, 4 light salmon bags, 2 shiny gray bags. faded yellow bags contain 1 bright teal bag, 1 striped salmon bag, 4 striped plum bags, 5 drab orange bags. shiny silver bags contain 2 vibrant green bags. mirrored aqua bags contain 4 vibrant black bags, 2 faded coral bags, 1 bright tomato bag, 1 pale purple bag. dark tomato bags contain 4 drab beige bags, 5 bright gray bags, 5 shiny teal bags, 1 pale blue bag. pale coral bags contain 5 striped orange bags, 4 muted red bags. mirrored coral bags contain 4 plaid olive bags, 1 faded indigo bag. shiny violet bags contain 5 bright tomato bags. posh beige bags contain 2 dim brown bags. dull crimson bags contain 3 mirrored chartreuse bags, 1 shiny teal bag, 3 shiny cyan bags. drab lavender bags contain 1 shiny tomato bag, 5 posh cyan bags. clear lavender bags contain 1 bright fuchsia bag, 1 pale crimson bag. wavy fuchsia bags contain 5 wavy tomato bags, 5 wavy bronze bags, 5 mirrored tomato bags. faded turquoise bags contain 3 muted crimson bags, 3 pale crimson bags, 4 drab gray bags, 1 dull white bag. pale blue bags contain 1 shiny gold bag, 5 dull silver bags, 5 posh lime bags. dim beige bags contain 1 vibrant red bag. bright tomato bags contain 2 dull olive bags, 1 dim crimson bag, 5 faded salmon bags. pale gold bags contain 2 dark blue bags. muted tomato bags contain 5 mirrored blue bags, 1 dull crimson bag, 4 mirrored cyan bags, 4 wavy blue bags. wavy beige bags contain 2 wavy lime bags, 3 dotted salmon bags. dotted violet bags contain 5 bright orange bags, 5 posh olive bags, 3 dark violet bags. vibrant black bags contain 4 bright chartreuse bags. light olive bags contain 1 dotted yellow bag, 5 shiny coral bags, 1 drab green bag, 2 vibrant lime bags. plaid crimson bags contain 3 shiny fuchsia bags. mirrored yellow bags contain 1 shiny red bag, 2 muted lime bags. pale tan bags contain 2 pale crimson bags, 4 pale green bags. pale yellow bags contain 1 clear maroon bag, 5 wavy silver bags, 4 faded purple bags, 4 faded plum bags. mirrored silver bags contain 5 pale lavender bags, 5 clear orange bags, 4 faded lime bags. drab tomato bags contain 2 dull lavender bags. drab bronze bags contain 5 shiny indigo bags, 3 pale purple bags. muted salmon bags contain 2 clear green bags, 4 faded coral bags, 1 faded salmon bag. posh brown bags contain 2 vibrant magenta bags, 1 muted salmon bag. striped magenta bags contain 2 shiny white bags. drab teal bags contain 1 vibrant red bag, 5 striped violet bags, 1 muted olive bag. mirrored red bags contain 4 bright chartreuse bags. muted magenta bags contain 4 dotted blue bags, 2 light bronze bags. clear orange bags contain 4 striped cyan bags. dotted maroon bags contain 1 plaid olive bag. mirrored white bags contain 1 shiny tomato bag, 1 plaid lime bag. mirrored tomato bags contain 5 pale brown bags, 4 clear green bags, 4 mirrored chartreuse bags. bright magenta bags contain 3 bright salmon bags, 4 posh fuchsia bags, 3 dotted tan bags. plaid salmon bags contain 2 bright white bags, 5 mirrored lime bags. pale black bags contain 2 faded tomato bags, 3 dotted brown bags, 2 faded turquoise bags. posh gray bags contain 2 mirrored chartreuse bags. muted beige bags contain 3 posh lime bags, 5 wavy plum bags, 2 light crimson bags, 2 pale purple bags. pale red bags contain 3 pale beige bags, 2 vibrant yellow bags. bright indigo bags contain 5 mirrored violet bags, 4 mirrored tomato bags, 3 clear lavender bags, 1 muted gold bag. shiny teal bags contain no other bags. drab plum bags contain 5 mirrored chartreuse bags, 1 light brown bag. clear tomato bags contain 4 faded coral bags. light aqua bags contain 5 bright fuchsia bags, 4 posh red bags, 2 light gray bags, 2 bright tomato bags. shiny tomato bags contain 5 muted turquoise bags. dull olive bags contain no other bags. dim chartreuse bags contain 4 posh aqua bags. bright turquoise bags contain 4 pale olive bags. pale aqua bags contain 3 muted aqua bags, 5 shiny plum bags, 3 striped indigo bags, 5 pale blue bags. shiny indigo bags contain 1 posh tan bag, 3 dotted teal bags. faded salmon bags contain no other bags. dark magenta bags contain 2 striped plum bags, 1 dull brown bag, 2 wavy tan bags, 2 faded olive bags. vibrant silver bags contain 5 wavy tomato bags, 3 dull chartreuse bags, 3 posh lime bags, 2 dull violet bags. wavy violet bags contain 1 dim crimson bag, 5 drab white bags, 2 drab purple bags. dull green bags contain 1 posh red bag, 5 shiny teal bags. posh tan bags contain 4 pale yellow bags, 1 dim beige bag, 1 shiny tomato bag. shiny purple bags contain 4 shiny lavender bags, 2 plaid violet bags, 2 drab beige bags, 2 pale blue bags. plaid lavender bags contain 3 faded purple bags, 1 wavy purple bag. dotted lime bags contain 5 muted white bags, 2 posh olive bags, 2 pale crimson bags, 5 dark plum bags. shiny brown bags contain 3 pale tan bags, 5 shiny green bags, 4 vibrant brown bags. clear gold bags contain 4 striped plum bags, 1 bright fuchsia bag, 3 faded salmon bags. drab cyan bags contain 2 muted purple bags, 2 dull aqua bags, 3 shiny gray bags. dark violet bags contain 1 plaid salmon bag, 3 vibrant salmon bags. shiny aqua bags contain 3 mirrored tomato bags. dotted blue bags contain 3 clear white bags, 4 dull chartreuse bags, 2 light cyan bags. dull beige bags contain 2 vibrant red bags, 5 muted olive bags. wavy blue bags contain 2 dark brown bags. wavy aqua bags contain 4 wavy teal bags, 5 muted maroon bags, 3 faded gray bags, 5 plaid blue bags. dull brown bags contain 5 posh olive bags, 2 striped violet bags, 4 mirrored lime bags. striped salmon bags contain 4 faded coral bags, 5 vibrant blue bags, 4 shiny magenta bags, 3 plaid red bags. mirrored maroon bags contain 1 mirrored lime bag, 2 wavy salmon bags, 2 wavy cyan bags. clear magenta bags contain 1 vibrant black bag, 3 vibrant coral bags. clear salmon bags contain 5 bright plum bags, 5 light beige bags, 2 plaid aqua bags. bright yellow bags contain 5 striped green bags, 3 muted violet bags. dim crimson bags contain 4 dull olive bags, 1 faded coral bag, 3 clear green bags, 4 pale purple bags. light violet bags contain 2 shiny indigo bags, 1 clear maroon bag, 2 dull blue bags. drab coral bags contain 2 posh blue bags, 1 dim lime bag, 3 shiny turquoise bags, 3 faded fuchsia bags. plaid bronze bags contain 2 vibrant teal bags. dull magenta bags contain 4 dull tan bags, 4 plaid yellow bags, 2 dim turquoise bags. plaid tan bags contain 1 mirrored maroon bag, 5 muted maroon bags. shiny maroon bags contain 1 shiny coral bag, 5 dotted tomato bags. faded bronze bags contain 5 bright purple bags, 4 vibrant tomato bags. pale magenta bags contain 3 vibrant green bags, 3 dotted olive bags. dark blue bags contain 3 dull gray bags. dim teal bags contain 1 dotted plum bag, 3 faded olive bags, 4 dotted orange bags. faded crimson bags contain 3 vibrant violet bags, 4 wavy plum bags, 1 wavy coral bag, 1 vibrant lime bag. dotted black bags contain 2 striped purple bags. mirrored indigo bags contain 4 wavy cyan bags, 3 dull gold bags, 1 dull lime bag. muted lavender bags contain 5 striped silver bags, 2 posh brown bags. mirrored lavender bags contain 1 pale lime bag, 2 drab olive bags, 1 wavy green bag. drab lime bags contain 2 faded plum bags, 3 muted purple bags, 1 faded teal bag, 2 vibrant plum bags. bright blue bags contain 3 drab aqua bags, 4 striped tomato bags. drab purple bags contain 2 faded purple bags, 4 shiny turquoise bags, 5 dark black bags, 1 pale plum bag. mirrored chartreuse bags contain 4 clear green bags. wavy gold bags contain 1 plaid olive bag, 5 drab teal bags, 5 dotted salmon bags, 2 dull tan bags. shiny green bags contain 1 dim crimson bag, 1 dotted olive bag. plaid tomato bags contain 1 clear gold bag. striped chartreuse bags contain 5 muted lavender bags, 5 plaid white bags. vibrant violet bags contain 1 shiny cyan bag, 5 drab magenta bags, 5 drab olive bags. vibrant gray bags contain 2 vibrant black bags, 5 pale purple bags. striped plum bags contain 1 mirrored chartreuse bag. mirrored brown bags contain 5 light plum bags, 2 dark blue bags, 5 shiny aqua bags, 3 vibrant turquoise bags. posh crimson bags contain 4 striped orange bags, 3 wavy salmon bags, 4 dull cyan bags. plaid purple bags contain 1 dotted beige bag, 4 shiny teal bags, 4 wavy magenta bags, 5 pale chartreuse bags. clear purple bags contain 1 faded purple bag. plaid brown bags contain 5 light brown bags, 5 drab fuchsia bags, 1 clear bronze bag, 2 dark black bags. dim violet bags contain 2 bright lavender bags, 4 muted indigo bags, 1 bright white bag. plaid fuchsia bags contain 3 muted olive bags, 2 clear olive bags. posh white bags contain 3 dull aqua bags, 2 striped silver bags. dotted gray bags contain 2 vibrant orange bags, 3 muted salmon bags. clear silver bags contain 1 faded indigo bag, 1 dim violet bag. dull fuchsia bags contain 1 vibrant turquoise bag. shiny lime bags contain 2 wavy teal bags, 1 clear blue bag. bright gold bags contain 2 dotted gray bags, 3 dark yellow bags, 3 light yellow bags, 4 faded maroon bags. dark teal bags contain 5 light crimson bags, 5 clear orange bags, 4 vibrant magenta bags. drab black bags contain 3 clear green bags. light silver bags contain 2 clear black bags. striped cyan bags contain 4 clear lavender bags, 1 faded salmon bag, 4 faded plum bags, 3 shiny teal bags. plaid yellow bags contain 5 shiny teal bags. mirrored turquoise bags contain 1 dull chartreuse bag. shiny yellow bags contain 5 dotted crimson bags. vibrant yellow bags contain 2 striped beige bags, 5 dark white bags. faded violet bags contain 4 drab tan bags. plaid orange bags contain 2 dim crimson bags, 1 faded cyan bag, 4 mirrored cyan bags, 1 bright indigo bag. muted coral bags contain 2 faded lime bags, 1 plaid blue bag, 5 posh white bags. faded lime bags contain 5 wavy lavender bags, 5 muted green bags, 3 shiny beige bags, 4 wavy lime bags. pale crimson bags contain 3 mirrored chartreuse bags. dull teal bags contain 4 clear blue bags, 2 mirrored red bags, 4 posh turquoise bags, 2 drab black bags. clear teal bags contain 4 clear brown bags, 4 mirrored fuchsia bags. drab fuchsia bags contain 2 mirrored black bags, 5 plaid silver bags, 4 drab tan bags. dotted purple bags contain 5 faded red bags, 5 dull bronze bags. mirrored cyan bags contain 4 clear green bags, 5 muted cyan bags, 3 dull violet bags, 5 vibrant yellow bags. pale white bags contain 1 vibrant green bag, 2 wavy magenta bags, 5 dotted olive bags, 5 drab magenta bags. faded red bags contain 5 posh turquoise bags, 3 dotted gold bags, 5 light salmon bags, 5 faded bronze bags. posh plum bags contain 1 shiny brown bag, 2 vibrant silver bags, 1 bright beige bag, 1 clear maroon bag. dotted tan bags contain 2 pale bronze bags, 3 drab tan bags. dark cyan bags contain 5 wavy fuchsia bags, 3 posh beige bags. clear gray bags contain 2 muted silver bags, 4 drab violet bags. faded aqua bags contain 5 vibrant green bags, 3 dim crimson bags. vibrant indigo bags contain 1 drab violet bag, 3 dull gray bags, 4 wavy lavender bags, 2 shiny fuchsia bags. posh red bags contain 1 clear magenta bag, 2 pale bronze bags. clear turquoise bags contain 1 plaid magenta bag, 4 dotted tan bags. mirrored plum bags contain 3 plaid purple bags, 3 dull bronze bags, 3 pale olive bags. bright salmon bags contain 2 mirrored chartreuse bags, 3 pale brown bags, 5 faded plum bags, 1 striped cyan bag. clear black bags contain 4 mirrored fuchsia bags, 5 dim bronze bags, 2 drab violet bags. striped indigo bags contain 1 vibrant black bag, 1 striped fuchsia bag, 4 shiny gray bags, 5 clear orange bags. wavy teal bags contain 4 striped lime bags. striped white bags contain 4 wavy lime bags, 3 clear magenta bags, 3 dull lavender bags. clear fuchsia bags contain 2 faded brown bags, 3 shiny coral bags, 1 vibrant blue bag, 1 posh silver bag. muted blue bags contain 3 pale fuchsia bags, 5 posh fuchsia bags, 2 dim magenta bags, 1 wavy lavender bag. pale brown bags contain no other bags. posh fuchsia bags contain 2 dim crimson bags, 5 dull chartreuse bags, 4 dim bronze bags, 5 vibrant orange bags. wavy salmon bags contain 4 vibrant black bags. clear aqua bags contain 2 clear tan bags, 3 dim gray bags, 1 bright lavender bag. light green bags contain 5 dim tan bags, 3 shiny chartreuse bags, 4 bright crimson bags. wavy yellow bags contain 4 pale brown bags. dull indigo bags contain 3 plaid lime bags, 5 wavy silver bags, 4 vibrant coral bags, 3 mirrored tomato bags. dim plum bags contain 4 dotted magenta bags, 5 dim crimson bags, 3 wavy blue bags, 4 mirrored black bags. dull gold bags contain 5 bright lavender bags, 2 vibrant magenta bags, 4 dull lavender bags. plaid plum bags contain 2 muted cyan bags, 3 shiny purple bags, 2 drab olive bags. pale lavender bags contain 1 vibrant magenta bag, 3 mirrored violet bags, 1 vibrant green bag, 2 faded purple bags. drab indigo bags contain 1 vibrant lavender bag, 4 drab aqua bags. dotted olive bags contain 3 faded plum bags, 5 wavy plum bags, 1 pale brown bag. dotted beige bags contain 5 bright tomato bags, 4 pale purple bags, 4 dim crimson bags, 4 dull gold bags. clear coral bags contain 1 dark teal bag, 2 bright purple bags, 4 clear blue bags, 4 pale orange bags. light bronze bags contain 4 muted cyan bags, 2 posh brown bags. dull red bags contain 2 vibrant red bags, 1 muted gold bag, 2 clear coral bags. mirrored black bags contain 2 dull olive bags. shiny cyan bags contain 1 mirrored aqua bag, 3 bright tomato bags, 5 striped indigo bags. light purple bags contain 3 bright salmon bags, 5 vibrant black bags, 2 clear orange bags, 4 plaid olive bags. dark lavender bags contain 3 dark turquoise bags, 2 dotted black bags. vibrant bronze bags contain 3 wavy olive bags. vibrant salmon bags contain 3 dull olive bags, 4 bright silver bags, 3 muted green bags. pale lime bags contain 1 wavy yellow bag, 4 dark tomato bags, 5 faded aqua bags, 4 muted beige bags. plaid gray bags contain 2 wavy yellow bags, 5 drab turquoise bags, 3 dim gray bags. mirrored green bags contain 5 dull cyan bags. dim tan bags contain 5 dotted teal bags, 2 clear yellow bags, 1 posh maroon bag. pale indigo bags contain 5 striped indigo bags, 5 clear indigo bags, 1 drab tan bag. mirrored violet bags contain 2 muted salmon bags, 3 dull violet bags. pale salmon bags contain 1 mirrored cyan bag, 4 pale gray bags, 3 mirrored fuchsia bags, 1 striped beige bag. plaid olive bags contain 3 dark brown bags. clear white bags contain 4 plaid violet bags, 4 dull lavender bags. posh yellow bags contain 5 light gray bags, 3 clear green bags, 3 mirrored cyan bags, 2 plaid yellow bags. bright purple bags contain 1 mirrored cyan bag, 4 dim magenta bags, 2 dotted olive bags, 1 posh tan bag. dark gray bags contain 3 dim turquoise bags, 3 mirrored tomato bags. shiny chartreuse bags contain 1 dark brown bag, 2 mirrored magenta bags, 1 bright silver bag, 5 shiny gray bags. dull silver bags contain 3 dull chartreuse bags, 2 dull violet bags, 4 muted aqua bags. clear lime bags contain 4 dull silver bags. faded lavender bags contain 1 pale tan bag, 2 clear turquoise bags, 3 muted green bags, 1 muted lime bag. light white bags contain 1 mirrored gold bag, 1 dotted gold bag, 3 pale beige bags. posh olive bags contain 3 mirrored aqua bags, 2 shiny cyan bags. striped olive bags contain 1 vibrant tomato bag, 3 dotted salmon bags, 3 plaid purple bags, 2 dim brown bags. vibrant gold bags contain 4 light plum bags. faded gold bags contain 5 shiny green bags, 4 drab orange bags, 5 faded beige bags, 5 dark blue bags. faded silver bags contain 4 drab blue bags. drab gray bags contain 1 pale gray bag. posh indigo bags contain 1 vibrant teal bag, 1 vibrant aqua bag. mirrored crimson bags contain 5 mirrored turquoise bags, 4 mirrored aqua bags, 3 wavy olive bags, 1 dark teal bag. dark olive bags contain 4 dim violet bags, 2 dotted silver bags, 2 dull crimson bags, 1 striped fuchsia bag. bright tan bags contain 5 shiny violet bags, 5 light gray bags. pale olive bags contain 3 faded black bags, 1 dim aqua bag. drab tan bags contain 2 faded plum bags, 2 posh fuchsia bags, 2 wavy blue bags, 5 dotted lavender bags. striped purple bags contain 4 posh gray bags, 1 shiny plum bag, 1 mirrored crimson bag, 2 plaid orange bags. dotted magenta bags contain 4 dull cyan bags, 1 bright cyan bag. faded white bags contain 2 dark tomato bags, 2 muted silver bags. pale cyan bags contain 3 light tan bags, 3 dotted brown bags, 3 vibrant aqua bags, 3 drab gray bags. vibrant tan bags contain 5 striped gold bags, 5 dull gray bags, 2 bright turquoise bags, 3 muted blue bags. dark crimson bags contain 4 vibrant magenta bags, 2 striped aqua bags, 1 muted tan bag, 3 vibrant green bags. dotted red bags contain 1 dull green bag, 3 pale green bags, 5 pale tomato bags, 2 light coral bags. dim gold bags contain 3 dark black bags, 5 dotted lavender bags, 2 faded gray bags, 5 dim beige bags. faded chartreuse bags contain 1 clear aqua bag, 2 mirrored black bags, 1 striped coral bag. dark gold bags contain 1 drab coral bag, 3 dark teal bags. light fuchsia bags contain 4 faded salmon bags, 4 shiny gray bags. posh blue bags contain 2 mirrored tomato bags, 1 drab blue bag, 4 drab tomato bags, 5 dotted lavender bags. plaid green bags contain 5 light cyan bags, 3 dim turquoise bags, 1 drab teal bag. faded tomato bags contain 4 muted blue bags, 1 striped lime bag. clear olive bags contain 2 dull silver bags, 2 vibrant yellow bags, 1 pale purple bag, 2 wavy lavender bags. shiny gold bags contain 2 muted aqua bags, 3 bright salmon bags, 4 striped violet bags, 2 posh brown bags. clear beige bags contain 5 faded plum bags, 3 dull lavender bags. pale fuchsia bags contain 2 wavy lavender bags, 2 striped indigo bags, 2 posh gold bags. wavy silver bags contain 4 wavy plum bags, 1 shiny salmon bag. dotted yellow bags contain 3 striped plum bags, 5 wavy coral bags. dotted crimson bags contain 1 dotted silver bag. muted olive bags contain 5 mirrored tomato bags. vibrant blue bags contain 1 clear blue bag, 2 mirrored lime bags, 2 dull aqua bags. shiny white bags contain 3 pale purple bags. clear chartreuse bags contain 4 dotted lavender bags, 3 mirrored cyan bags. faded green bags contain 4 plaid lime bags, 4 vibrant olive bags, 3 dark green bags, 4 shiny lime bags. plaid violet bags contain 1 striped silver bag, 2 bright chartreuse bags. clear green bags contain no other bags. vibrant plum bags contain 5 clear green bags, 2 shiny teal bags. bright olive bags contain 5 light plum bags, 4 drab beige bags. striped beige bags contain 5 wavy silver bags, 1 shiny teal bag, 3 light gray bags. faded plum bags contain 3 clear green bags, 5 mirrored chartreuse bags, 1 dim bronze bag, 1 faded coral bag. mirrored tan bags contain 3 bright indigo bags, 1 light yellow bag, 2 muted aqua bags, 2 muted beige bags. bright black bags contain 5 pale blue bags, 4 bright coral bags, 2 wavy plum bags, 2 posh lime bags. striped aqua bags contain 5 muted lavender bags, 1 pale blue bag, 5 vibrant gray bags. dull chartreuse bags contain 1 vibrant black bag, 4 faded salmon bags. wavy gray bags contain 3 dull gray bags, 3 light blue bags, 4 pale teal bags. dim tomato bags contain 3 mirrored chartreuse bags. bright orange bags contain 2 dull indigo bags, 5 plaid violet bags, 2 light crimson bags, 2 wavy salmon bags. wavy crimson bags contain 3 dotted maroon bags, 1 vibrant lime bag, 2 pale indigo bags, 1 striped aqua bag. dim lime bags contain 5 mirrored magenta bags, 2 plaid beige bags. posh magenta bags contain 5 dull violet bags, 3 wavy gray bags. vibrant lavender bags contain 4 muted violet bags, 3 clear crimson bags. mirrored gray bags contain 5 light crimson bags, 5 dull cyan bags. dark indigo bags contain 3 dark turquoise bags, 5 mirrored turquoise bags. muted silver bags contain 2 dim coral bags, 1 dotted olive bag. shiny black bags contain 1 muted salmon bag, 2 faded silver bags. faded indigo bags contain 4 vibrant gray bags, 1 wavy salmon bag, 3 bright indigo bags. dim aqua bags contain 1 drab aqua bag, 4 dotted beige bags, 4 faded coral bags. bright plum bags contain 4 drab olive bags, 5 dim crimson bags, 1 vibrant green bag. light indigo bags contain 5 mirrored chartreuse bags. posh purple bags contain 4 plaid magenta bags, 3 pale purple bags. wavy bronze bags contain 4 muted aqua bags. pale gray bags contain 5 clear orange bags, 1 bright lavender bag, 4 muted salmon bags. clear bronze bags contain 4 light black bags, 2 drab aqua bags. wavy green bags contain 1 muted lavender bag, 1 light yellow bag, 1 wavy olive bag. bright brown bags contain 1 dotted black bag, 1 plaid magenta bag, 4 posh olive bags. vibrant green bags contain 5 bright cyan bags, 3 mirrored lime bags, 5 mirrored tan bags. dull orange bags contain 2 posh chartreuse bags. dotted lavender bags contain 5 vibrant gray bags, 4 plaid silver bags. dim maroon bags contain 2 pale chartreuse bags, 4 striped beige bags, 2 shiny gold bags. shiny turquoise bags contain 2 light orange bags. striped brown bags contain 5 muted brown bags, 4 dotted bronze bags, 5 striped plum bags, 5 striped green bags. plaid magenta bags contain 5 faded salmon bags, 5 dark black bags, 5 muted indigo bags, 4 pale blue bags. faded cyan bags contain 5 muted lavender bags, 1 bright fuchsia bag, 1 mirrored tomato bag. muted yellow bags contain 1 drab salmon bag, 5 mirrored cyan bags, 5 drab coral bags. bright teal bags contain 4 dotted lime bags, 2 plaid magenta bags, 3 mirrored tomato bags, 5 striped cyan bags. wavy indigo bags contain 3 dotted silver bags, 4 clear brown bags, 3 posh gray bags, 1 dull red bag. striped tomato bags contain 1 dotted olive bag. dim olive bags contain 1 dotted indigo bag, 2 dotted lime bags, 2 muted silver bags. plaid black bags contain 3 dim green bags, 1 posh bronze bag, 2 wavy orange bags, 1 muted gold bag. dark red bags contain 1 dotted beige bag, 5 dotted olive bags, 1 drab turquoise bag, 1 light tan bag. light red bags contain 1 muted lavender bag, 1 muted teal bag, 5 bright white bags, 4 dotted fuchsia bags. posh coral bags contain 5 posh silver bags, 1 vibrant olive bag. wavy lavender bags contain 3 bright indigo bags, 4 dim bronze bags. striped lavender bags contain 1 striped tomato bag. wavy cyan bags contain 2 shiny salmon bags, 4 drab aqua bags, 4 faded coral bags, 1 dotted olive bag. light brown bags contain 2 faded gray bags, 5 dark brown bags. dark black bags contain 2 posh brown bags. dark tan bags contain 1 striped cyan bag, 3 plaid salmon bags. muted maroon bags contain 4 bright bronze bags, 3 dotted lavender bags, 4 pale fuchsia bags, 1 vibrant red bag. faded fuchsia bags contain 5 light purple bags, 3 dim gray bags. shiny bronze bags contain 3 mirrored magenta bags, 3 light aqua bags, 3 drab plum bags, 1 vibrant salmon bag. light gray bags contain 1 striped fuchsia bag, 1 shiny gold bag, 3 clear gold bags, 3 bright fuchsia bags. dim turquoise bags contain 5 dull aqua bags. faded coral bags contain no other bags. dark maroon bags contain 1 dark turquoise bag, 4 light fuchsia bags, 5 shiny plum bags. striped black bags contain 2 faded tomato bags, 1 vibrant turquoise bag, 4 muted bronze bags. plaid gold bags contain 4 wavy aqua bags, 4 vibrant white bags. dim gray bags contain 5 wavy silver bags. wavy brown bags contain 3 bright bronze bags, 2 dotted gray bags. bright fuchsia bags contain 2 mirrored aqua bags, 4 shiny green bags, 5 vibrant black bags, 1 bright tomato bag. dark chartreuse bags contain 5 shiny white bags, 4 dotted salmon bags, 5 shiny purple bags, 5 faded red bags. dark plum bags contain 2 clear gold bags, 2 pale gray bags. dull maroon bags contain 2 mirrored chartreuse bags, 1 clear orange bag, 4 mirrored black bags, 5 clear violet bags. plaid maroon bags contain 3 bright purple bags, 3 bright red bags, 3 faded purple bags. dull coral bags contain 5 drab gray bags, 2 dotted chartreuse bags. shiny coral bags contain 3 vibrant black bags, 4 bright chartreuse bags, 3 faded indigo bags. clear crimson bags contain 1 drab tomato bag, 4 mirrored white bags, 2 dim violet bags. drab aqua bags contain 2 dull chartreuse bags, 1 pale crimson bag, 4 posh lime bags. vibrant aqua bags contain 1 wavy cyan bag. striped fuchsia bags contain 5 bright tomato bags, 2 wavy plum bags, 2 faded plum bags, 4 clear green bags. shiny plum bags contain 2 posh red bags, 3 faded plum bags, 2 striped beige bags, 1 mirrored turquoise bag. muted purple bags contain 5 bright white bags, 4 dotted chartreuse bags, 1 drab black bag. posh bronze bags contain 3 muted cyan bags, 4 faded coral bags, 3 wavy plum bags. light turquoise bags contain 4 mirrored silver bags, 2 bright gray bags, 5 mirrored maroon bags. vibrant fuchsia bags contain 5 faded tomato bags, 1 posh fuchsia bag, 3 posh maroon bags, 1 plaid salmon bag. muted orange bags contain 1 striped gray bag, 2 dull bronze bags, 1 dull silver bag. dotted cyan bags contain 4 clear olive bags. dull gray bags contain 5 mirrored maroon bags, 4 dim coral bags, 2 muted olive bags. clear red bags contain 3 dark fuchsia bags, 4 mirrored fuchsia bags. clear indigo bags contain 4 muted salmon bags. striped green bags contain 4 dull maroon bags. vibrant chartreuse bags contain 1 muted aqua bag, 5 clear lime bags. mirrored beige bags contain 3 dark teal bags. vibrant lime bags contain 4 plaid fuchsia bags. dotted brown bags contain 3 bright gray bags, 5 wavy yellow bags, 1 muted maroon bag. striped lime bags contain 3 muted lavender bags. vibrant magenta bags contain 4 faded coral bags, 1 dim bronze bag. drab olive bags contain 3 shiny indigo bags, 1 dim lime bag. dotted orange bags contain 5 muted white bags, 3 striped gray bags. bright lavender bags contain 4 faded purple bags, 1 bright chartreuse bag. pale maroon bags contain 5 vibrant gray bags, 4 shiny red bags, 3 drab aqua bags, 4 wavy tan bags. dotted aqua bags contain 1 mirrored lime bag. wavy orange bags contain 3 dull green bags, 4 light lavender bags. faded maroon bags contain 3 vibrant gray bags, 2 muted lavender bags. dim silver bags contain 2 dotted chartreuse bags, 4 clear violet bags, 2 clear red bags. dull turquoise bags contain 5 faded olive bags, 3 drab magenta bags. shiny red bags contain 4 clear tan bags, 2 dull red bags, 5 dark tomato bags. posh orange bags contain 4 muted lavender bags, 5 dim violet bags, 5 striped aqua bags. plaid white bags contain 5 mirrored cyan bags, 2 light brown bags, 3 muted tan bags. shiny magenta bags contain 3 faded coral bags, 5 posh gray bags. bright bronze bags contain 4 wavy blue bags, 2 clear violet bags. mirrored orange bags contain 2 dim green bags, 2 striped red bags, 4 drab plum bags, 1 pale teal bag.`
//---------------------------------------------Paquetes E Imports------------------------------------------------------- package AnalisisYComandos import ( "../Metodos" "../Variables" "bufio" "bytes" "fmt" "github.com/asaskevich/govalidator" "github.com/gookit/color" "os" "strconv" "strings" "unsafe" ) //-----------------------------------------------------Métodos---------------------------------------------------------- func VerificarComandoFdisk() { //Variables var CrearParticion bool var ArregloParametros []string //Asignación CrearParticion = true //Verificación De Parametros if len(Variables.ArregloComandos) > 1 { for Contador := 1; Contador <= len(Variables.ArregloComandos) - 1; Contador++ { //Obtener Parametro Variables.ArregloComandos[Contador] = Metodos.Trim(Variables.ArregloComandos[Contador]) ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[Contador]) ArregloParametros[0] = strings.ToLower(ArregloParametros[0]) ArregloParametros[0] = Metodos.Trim(ArregloParametros[0]) if ArregloParametros[0] == "delete" { VerificarDeleteFdisk() CrearParticion = false break } else if ArregloParametros[0] == "add" { VerificarAddFdisk() CrearParticion = false break } } if CrearParticion { VerificarCrearFdisk() } } else { color.HEX("#de4843", false).Println("Debe De Colocar Todos Los Parametros Obligatorios") fmt.Println("") } } func VerificarDeleteFdisk() { //Variables var Delete bool var Path bool var Name bool var ParametroExtra bool var ArregloParametros []string var ArregloArchivo []string var ContadorDelete int var ContadorPath int var ContadorName int //Asignación Delete = false Path = false Name = false ParametroExtra = false ContadorDelete = 0 ContadorPath = 0 ContadorName = 0 Variables.MapComandos = make(map[string]string) //Verificación De Parametros if len(Variables.ArregloComandos) > 1 { for Contador := 1; Contador <= len(Variables.ArregloComandos) - 1; Contador++ { //Obtener Parametro Variables.ArregloComandos[Contador] = Metodos.Trim(Variables.ArregloComandos[Contador]) ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[Contador]) ArregloParametros[0] = strings.ToLower(ArregloParametros[0]) ArregloParametros[0] = Metodos.Trim(ArregloParametros[0]) switch ArregloParametros[0] { case "delete": if ContadorDelete == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = strings.ToLower(Metodos.Trim(ArregloParametros[1])) if ArregloParametros[1] == "fast" { Variables.MapComandos["delete"] = "fast" Delete = true } else if ArregloParametros[1] == "full" { Variables.MapComandos["delete"] = "full" Delete = true } else { color.HEX("#de4843", false).Println("En El Parametro delete Debe De Ingresar La palabra full O fast") fmt.Println("") Delete = false } ContadorDelete++ } else { Delete = false } } else { ContadorDelete++ } case "path": if ContadorPath == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1]) ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) Path = Metodos.ExisteRuta(ArregloParametros[1]) if Path { ArregloArchivo = Metodos.SplitArchivo(ArregloParametros[1]) if len(ArregloArchivo) > 1 { if ArregloArchivo[1] == "dsk" { Variables.MapComandos["path"] = ArregloParametros[1] Path = true } else { color.HEX("#de4843", false).Println("La Extension Del Archivo Debe De Ser .dsk") fmt.Println("") Path = false } } else { color.HEX("#de4843", false).Println("Debe Indicar La Extension Del Archivo") fmt.Println("") Path = false } } ContadorPath++ } else { Path = false } } else { ContadorPath++ } case "name": if ContadorName == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1]) ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) Variables.MapComandos["name"] = Metodos.Trim(ArregloParametros[1]) Name = true ContadorName++ } else { Name = false } } else { ContadorName++ } default: ParametroExtra = true } } } if Path && Name && Delete && !ParametroExtra && ContadorPath == 1 && ContadorDelete == 1 && ContadorName == 1 { VerificarMBRDelete() } else { if ParametroExtra { color.HEX("#de4843", false).Println("Parametro Especificado No Valido") color.HEX("#de4843", false).Println("Parametros Validos: ") color.HEX("#de4843", false).Println("1). -path-> (Obligatorio)") color.HEX("#de4843", false).Println( "2). -delete-> (Obligatorio)") color.HEX("#de4843", false).Println( "3). -name-> (Obligatorio)") fmt.Println("") } if !Path { color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -path-> o") color.HEX("#de4843", false).Println("El Archivo No Existe") fmt.Println("") } if !Delete { color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -Delete-> o") color.HEX("#de4843", false).Println("Existe Error En La Sintaxis") fmt.Println("") } if !Name { color.HEX("#de4843", false).Println("No Se Encuentra el Parametro -name-> o") color.HEX("#de4843", false).Println("Existe Error En La Sintaxis") fmt.Println("") } if ContadorDelete > 1 || ContadorPath > 1 || ContadorName > 1 { color.HEX("#de4843", false).Println("Existen Demasiados Parametros") fmt.Println("") } } } func VerificarAddFdisk() { //Variables var Add bool var Path bool var Name bool var Unit bool var ParametroExtra bool var ArregloParametros []string var ArregloArchivo []string var ContadorAdd int var ContadorPath int var ContadorName int var ContadorUnit int var ContadorAuxiliar int //Asignación Add = false Path = false Name = false Unit = true ParametroExtra = false ContadorAdd = 0 ContadorPath = 0 ContadorName = 0 ContadorUnit = 0 ContadorAuxiliar = 0 Variables.MapComandos = make(map[string]string) Variables.MapComandos["unit"] = "1024" //Verificación De Parametros if len(Variables.ArregloComandos) > 1 { for Contador := 1; Contador <= len(Variables.ArregloComandos) - 1; Contador++ { //Obtener Parametro Variables.ArregloComandos[Contador] = Metodos.Trim(Variables.ArregloComandos[Contador]) ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[Contador]) ArregloParametros[0] = strings.ToLower(ArregloParametros[0]) ArregloParametros[0] = Metodos.Trim(ArregloParametros[0]) switch ArregloParametros[0] { case "add": ContadorAuxiliar = Contador + 1 if ContadorAuxiliar < len(Variables.ArregloComandos) { //Obtener Parametro Variables.ArregloComandos[ContadorAuxiliar] = Metodos.Trim(Variables.ArregloComandos[ContadorAuxiliar]) ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[ContadorAuxiliar]) //Verificar Si ES Digito if govalidator.IsInt(ArregloParametros[0]) { ArregloParametros = append(ArregloParametros, "-" + ArregloParametros[0]) Contador += 1 } else { //Obtener Parametro Variables.ArregloComandos[Contador] = Metodos.Trim(Variables.ArregloComandos[Contador]) ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[Contador]) } } if ContadorAdd == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) var Tamanio int var ErrorEntero error Tamanio, ErrorEntero = strconv.Atoi(ArregloParametros[1]) if ErrorEntero != nil { color.HEX("#de4843", false).Println("El Parametro add Debe Ser Un Número") fmt.Println("") } else { if Tamanio > 0 || Tamanio < 0 { Variables.MapComandos["add"] = ArregloParametros[1] Add = true } else { Add = false color.HEX("#de4843", false).Println("El Parametro Add No Puede Ser 0") fmt.Println("") } ContadorAdd++ } } else { Add = false } } else { ContadorAdd++ } case "path": if ContadorPath == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1]) ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) Path = Metodos.ExisteRuta(ArregloParametros[1]) if Path { ArregloArchivo = Metodos.SplitArchivo(ArregloParametros[1]) if len(ArregloArchivo) > 1 { if ArregloArchivo[1] == "dsk" { Variables.MapComandos["path"] = ArregloParametros[1] Path = true } else { color.HEX("#de4843", false).Println("La Extension Del Archivo Debe De Ser .dsk") fmt.Println("") Path = false } } else { color.HEX("#de4843", false).Println("Debe Indicar La Extension Del Archivo") fmt.Println("") Path = false } } ContadorPath++ } else { Path = false } } else { ContadorPath++ } case "name": if ContadorName == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1]) ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) Variables.MapComandos["name"] = Metodos.Trim(ArregloParametros[1]) Name = true ContadorName++ } else { Name = false } } else { ContadorName++ } case "unit": if ContadorUnit == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = strings.ToLower(Metodos.Trim(ArregloParametros[1])) if ArregloParametros[1] == "k" { Variables.MapComandos["unit"] = "1024" Unit = true } else if ArregloParametros[1] == "m" { Variables.MapComandos["unit"] = "1048576" Unit = true } else if ArregloParametros[1] == "b" { Variables.MapComandos["unit"] = "1" Unit = true } else { color.HEX("#de4843", false).Println("En El Parametro Unit Debe De Ingresar La Letra m (Megabytes) O La Letra k (Kylobytes) O La Letra b (Bytes)") fmt.Println("") Unit = false } ContadorUnit++ } else { Unit = false } } else { ContadorUnit++ } default: ParametroExtra = true } } } if Path && Name && Add && Unit && !ParametroExtra && ContadorPath == 1 && ContadorAdd == 1 && ContadorName == 1 && (ContadorUnit == 1 || ContadorUnit == 0) { VerificarMBRAdd() } else { if ParametroExtra { color.HEX("#de4843", false).Println("Parametro Especificado No Valido") color.HEX("#de4843", false).Println("Parametros Validos: ") color.HEX("#de4843", false).Println("1). -path-> (Obligatorio)") color.HEX("#de4843", false).Println( "2). -add-> (Obligatorio)") color.HEX("#de4843", false).Println( "3). -name-> (Obligatorio)") color.HEX("#de4843", false).Println( "4). -unit-> (Opcional)") fmt.Println("") } if !Path { color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -path-> o") color.HEX("#de4843", false).Println("El Archivo No Existe") fmt.Println("") } if !Add { color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -add-> o") color.HEX("#de4843", false).Println("Existe Error En La Sintaxis") fmt.Println("") } if !Name { color.HEX("#de4843", false).Println("No Se Encuentra el Parametro -name-> o") color.HEX("#de4843", false).Println("Existe Error En La Sintaxis") fmt.Println("") } if !Unit { color.HEX("#de4843", false).Println("No Se Encuentra el Parametro -unit-> o") color.HEX("#de4843", false).Println("Existe Error En La Sintaxis") fmt.Println("") } if ContadorAdd > 1 || ContadorPath > 1 || ContadorName > 1 || ContadorUnit > 1 { color.HEX("#de4843", false).Println("Existen Demasiados Parametros") fmt.Println("") } } } func VerificarCrearFdisk() { //Variables var Size bool var Path bool var Name bool var Unit bool var Type bool var Fit bool var ParametroExtra bool var ArregloParametros []string var ArregloArchivo []string var ContadorSize int var ContadorPath int var ContadorName int var ContadorUnit int var ContadorType int var ContadorFit int //Asignación Size = false Path = false Name = false Unit = true Type = true Fit = true ParametroExtra = false ContadorSize = 0 ContadorPath = 0 ContadorName = 0 ContadorUnit = 0 ContadorType = 0 ContadorFit = 0 Variables.MapComandos = make(map[string]string) Variables.MapComandos["unit"] = "1024" Variables.MapComandos["type"] = "p" Variables.MapComandos["fit"] = "wf" //Verificación De Parametros if len(Variables.ArregloComandos) > 1 { for Contador := 1; Contador <= len(Variables.ArregloComandos) - 1; Contador++ { //Obtener Parametro Variables.ArregloComandos[Contador] = Metodos.Trim(Variables.ArregloComandos[Contador]) ArregloParametros = Metodos.SplitParametro(Variables.ArregloComandos[Contador]) ArregloParametros[0] = strings.ToLower(ArregloParametros[0]) ArregloParametros[0] = Metodos.Trim(ArregloParametros[0]) switch ArregloParametros[0] { case "size": if ContadorSize == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) var Tamanio int var ErrorEntero error Tamanio, ErrorEntero = strconv.Atoi(ArregloParametros[1]) if ErrorEntero != nil { color.HEX("#de4843", false).Println("El Parametro Size Debe Ser Un Número") fmt.Println("") } else { if Tamanio > 0 { Variables.MapComandos["size"] = ArregloParametros[1] Size = true } else { Size = false color.HEX("#de4843", false).Println("El Parametro Size Debe Ser Un Número Mayor A 0") fmt.Println("") } ContadorSize++ } } else { Size = false } } else { ContadorSize++ } case "path": if ContadorPath == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1]) ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) Path = Metodos.ExisteRuta(ArregloParametros[1]) if Path { ArregloArchivo = Metodos.SplitArchivo(ArregloParametros[1]) if len(ArregloArchivo) > 1 { if ArregloArchivo[1] == "dsk" { Variables.MapComandos["path"] = ArregloParametros[1] Path = true } else { color.HEX("#de4843", false).Println("La Extension Del Archivo Debe De Ser .dsk") fmt.Println("") Path = false } } else { color.HEX("#de4843", false).Println("Debe Indicar La Extension Del Archivo") fmt.Println("") Path = false } } ContadorPath++ } else { Path = false } } else { ContadorPath++ } case "name": if ContadorName == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = Metodos.QuitarComillas(ArregloParametros[1]) ArregloParametros[1] = Metodos.Trim(ArregloParametros[1]) Variables.MapComandos["name"] = Metodos.Trim(ArregloParametros[1]) Name = true ContadorName++ } else { Name = false } } else { ContadorName++ } case "unit": if ContadorUnit == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = strings.ToLower(Metodos.Trim(ArregloParametros[1])) if ArregloParametros[1] == "k" { Variables.MapComandos["unit"] = "1024" Unit = true } else if ArregloParametros[1] == "m" { Variables.MapComandos["unit"] = "1048576" Unit = true } else if ArregloParametros[1] == "b" { Variables.MapComandos["unit"] = "1" Unit = true } else { color.HEX("#de4843", false).Println("En El Parametro Unit Debe De Ingresar La Letra m (Megabytes) O La Letra k (Kylobytes) O La Letra b (Bytes)") fmt.Println("") Unit = false } ContadorUnit++ } else { Unit = false } } else { ContadorUnit++ } case "type": if ContadorType == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = strings.ToLower(Metodos.Trim(ArregloParametros[1])) if ArregloParametros[1] == "p" { Variables.MapComandos["type"] = "p" Type = true } else if ArregloParametros[1] == "e" { Variables.MapComandos["type"] = "e" Type = true } else if ArregloParametros[1] == "l" { Variables.MapComandos["type"] = "l" Type = true } else { color.HEX("#de4843", false).Println("En El Parametro Type Debe De Ingresar La Letra p (Primaria) O La Letra e (Extendida) O La Letra l (Logica)") fmt.Println("") Type = false } ContadorType++ } else { Type = false } } else { ContadorType++ } case "fit": if ContadorFit == 0 { if len(ArregloParametros) > 1 { ArregloParametros[1] = strings.ToLower(Metodos.Trim(ArregloParametros[1])) if ArregloParametros[1] == "bf" { Variables.MapComandos["fit"] = "bf" Fit = true } else if ArregloParametros[1] == "ff" { Variables.MapComandos["fit"] = "ff" Fit = true } else if ArregloParametros[1] == "wf" { Variables.MapComandos["fit"] = "wf" Fit = true } else { color.HEX("#de4843", false).Println("En El Parametro Fit Debe De Ingresar Las Letras bf (Best Fit) O Las Letras ff (First Fit) O Las Letras wf (Worst Fit)") fmt.Println("") Fit = false } ContadorFit++ } else { Fit = false } } else { ContadorFit++ } default: ParametroExtra = true } } } if Path && Size && Name && Unit && Fit && Type && !ParametroExtra && ContadorPath == 1 && ContadorSize == 1 && ContadorName == 1 && (ContadorUnit == 1 || ContadorUnit == 0) && (ContadorFit == 1 || ContadorFit == 0) && (ContadorType == 1 || ContadorType == 0){ VerificarMBRCrear() } else { if ParametroExtra { color.HEX("#de4843", false).Println("Parametro Especificado No Valido") color.HEX("#de4843", false).Println("Parametros Validos: ") color.HEX("#de4843", false).Println("1). -path-> (Obligatorio)") color.HEX("#de4843", false).Println( "2). -size-> (Obligatorio)") color.HEX("#de4843", false).Println( "3). -name-> (Obligatorio)") color.HEX("#de4843", false).Println( "4). -unit-> (Opcional)") color.HEX("#de4843", false).Println( "5). -fit-> (Opcional)") color.HEX("#de4843", false).Println( "6). -type-> (Opcional)") fmt.Println("") } if !Path { color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -path-> o") color.HEX("#de4843", false).Println("No Existe El Archivo") fmt.Println("") } if !Size { color.HEX("#de4843", false).Println("No Se Encuentra El Parametro -size-> o") color.HEX("#de4843", false).Println("Existe Error En La Sintaxis") fmt.Println("") } if !Name { color.HEX("#de4843", false).Println("No Se Encuentra el Parametro -name-> o") color.HEX("#de4843", false).Println("Existe Error En La Sintaxis") fmt.Println("") } if !Unit { color.HEX("#de4843", false).Println("Existe Error En La Sintaxis En El Paremtro -unit->") fmt.Println("") } if !Fit { color.HEX("#de4843", false).Println("Existe Error En La Sintaxis En El Paremtro -fit->") fmt.Println("") } if !Type { color.HEX("#de4843", false).Println("Existe Error En La Sintaxis En El Paremtro -type->") fmt.Println("") } if ContadorSize > 1 || ContadorPath > 1 || ContadorName > 1 || ContadorUnit > 1 || ContadorFit > 1 || ContadorType > 1 { color.HEX("#de4843", false).Println("Existen Demasiados Parametros") fmt.Println("") } } } func VerificarMBRCrear() { //Variables var MBRAuxiliar Variables.MBREstructura var Bandera bool var BanderaExtendida bool var InicioListaLogica int64 var SizeExtendida int64 //Asignacion MBRAuxiliar, Bandera = Metodos.LeerArchivoBinarioArraglo(Variables.MapComandos["path"]) if Bandera { if !VerificarNombreParticionCrear(MBRAuxiliar) { if Variables.MapComandos["type"] == "l" { BanderaExtendida, InicioListaLogica, SizeExtendida = VerficiarExisteParticionExtendida(MBRAuxiliar) if BanderaExtendida { if VerificarSizeParticionExtendida(InicioListaLogica, SizeExtendida) { ComandoFdiskCrearParticionLogica(InicioListaLogica, SizeExtendida) } else { color.HEX("#de4843", false).Println("Error No Hay Espacio Suficiente Para Crear La Partición Logica") fmt.Println("") } } else { color.HEX("#de4843", false).Println("Error Debe Existir Un Particion Extendida Para Crear Particiones Logicas") fmt.Println("") } } else if Variables.MapComandos["type"] == "p" || Variables.MapComandos["type"] == "e" { if !VerificarSizeParticionCrear(MBRAuxiliar) { if !VerificarTipoParticionCrear(MBRAuxiliar) { ComandoFdiskCrearParticion(MBRAuxiliar) } } else { color.HEX("#de4843", false).Println("Error No Hay Espacio Suficiente Para Crear La Partición") fmt.Println("") } } } else { color.HEX("#de4843", false).Println("Error Ya Existe Una Particion Con El Nombre Indicado") fmt.Println("") } } else { color.HEX("#de4843", false).Println("Error Al Ejecutar El Comando fdisk") color.HEX("#de4843", false).Println("El Disco Se Encuentra Corrupto") fmt.Println("") } } func VerificarMBRDelete() { //Variables var MBRAuxiliar Variables.MBREstructura var Bandera bool var ExisteNombre bool var PrimariaExtendida bool var NumeroParticion int var InicioExtendida int64 //Asignacion MBRAuxiliar, Bandera = Metodos.LeerArchivoBinarioArraglo(Variables.MapComandos["path"]) if Bandera { ExisteNombre, PrimariaExtendida, NumeroParticion, InicioExtendida = VerificarNombreParticionDelete(MBRAuxiliar) if ExisteNombre { if PrimariaExtendida { ComandoFdiskDeleteParticion(MBRAuxiliar, NumeroParticion) } else { ComandoFdiskDeleteParticionLogica(InicioExtendida) } } else { color.HEX("#de4843", false).Println("No Existe La Particion Indicada En El Disco") fmt.Println("") } } else { color.HEX("#de4843", false).Println("Error Al Ejecutar El Comando fdisk") color.HEX("#de4843", false).Println("El Disco Se Encuentra Corrupto") fmt.Println("") } } func VerificarMBRAdd() { //Variables var MBRAuxiliar Variables.MBREstructura var Bandera bool var ExisteNombre bool var PrimariaExtendida bool var NumeroParticion int var InicioExtendida int64 var SizeExtendida int64 //Asignacion MBRAuxiliar, Bandera = Metodos.LeerArchivoBinarioArraglo(Variables.MapComandos["path"]) if Bandera { ExisteNombre, PrimariaExtendida, NumeroParticion, InicioExtendida, SizeExtendida = VerificarNombreParticionAdd(MBRAuxiliar) if ExisteNombre { if PrimariaExtendida { ComandoFdiskAddParticion(MBRAuxiliar, NumeroParticion) } else { ComandoFdiskAddParticionLogica(InicioExtendida, SizeExtendida) } } else { color.HEX("#de4843", false).Println("No Existe La Particion Indicada En El Disco") fmt.Println("") } } else { color.HEX("#de4843", false).Println("Error Al Ejecutar El Comando fdisk") color.HEX("#de4843", false).Println("El Disco Se Encuentra Corrupto") fmt.Println("") } } func VerificarNombreParticionCrear(MBRAuxiliar Variables.MBREstructura) bool { //Variables var Nombre string var NombreArray1 string var NombreArray2 string var NombreArray3 string var NombreArray4 string var NombreExtendida string var Bandera bool var InicioExtendida int64 var ArregloEBR []Variables.EBREstructura //Asignación Nombre = Metodos.Trim(strings.ToLower(Variables.MapComandos["name"])) Bandera = false InicioExtendida = 0 NombreArray1 = string(bytes.Trim(MBRAuxiliar.Particion1MBR.NamePart[:], "\x00")) NombreArray2 = string(bytes.Trim(MBRAuxiliar.Particion2MBR.NamePart[:], "\x00")) NombreArray3 = string(bytes.Trim(MBRAuxiliar.Particion3MBR.NamePart[:], "\x00")) NombreArray4 = string(bytes.Trim(MBRAuxiliar.Particion4MBR.NamePart[:], "\x00")) if MBRAuxiliar.Particion1MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray1) { Bandera = true if MBRAuxiliar.Particion1MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion1MBR.InicioPart } } } if MBRAuxiliar.Particion2MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray2) { Bandera = true } if MBRAuxiliar.Particion2MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion2MBR.InicioPart } } if MBRAuxiliar.Particion3MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray3) { Bandera = true } if MBRAuxiliar.Particion3MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion3MBR.InicioPart } } if MBRAuxiliar.Particion4MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray4) { Bandera = true } if MBRAuxiliar.Particion4MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion4MBR.InicioPart } } if InicioExtendida != 0 { ArregloEBR = ObtenerEBR(InicioExtendida) for Contador := 0; Contador < len(ArregloEBR); Contador++ { NombreExtendida = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00")) if strings.EqualFold(Variables.MapComandos["name"], NombreExtendida) { Bandera = true } } } return Bandera } func VerificarSizeParticionCrear(MBRAuxiliar Variables.MBREstructura) bool { //Variables var Bandera bool var SizeUsado int64 var SizeDisco int64 //Asignacion Bandera = false SizeUsado = 0 SizeDisco = MBRAuxiliar.SizeMbr if MBRAuxiliar.Particion1MBR.SizePart != 0 { SizeUsado += MBRAuxiliar.Particion1MBR.SizePart } if MBRAuxiliar.Particion2MBR.SizePart != 0 { SizeUsado += MBRAuxiliar.Particion2MBR.SizePart } if MBRAuxiliar.Particion3MBR.SizePart != 0 { SizeUsado += MBRAuxiliar.Particion3MBR.SizePart } if MBRAuxiliar.Particion4MBR.SizePart != 0 { SizeUsado += MBRAuxiliar.Particion4MBR.SizePart } if SizeUsado >= SizeDisco { Bandera = true } else { Bandera = false } return Bandera } func VerificarTipoParticionCrear(MBRAuxiliar Variables.MBREstructura) bool { //Variables var Bandera bool var Extendidas int var Primarias int var Total int //Asignación Bandera = false Extendidas = 0 Primarias = 0 Total = 0 if MBRAuxiliar.Particion1MBR.SizePart != 0 { if string(MBRAuxiliar.Particion1MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion1MBR.TipoPart) == "e" { Extendidas++ } } if MBRAuxiliar.Particion2MBR.SizePart != 0 { if string(MBRAuxiliar.Particion2MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion2MBR.TipoPart) == "e" { Extendidas++ } } if MBRAuxiliar.Particion3MBR.SizePart != 0 { if string(MBRAuxiliar.Particion3MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion3MBR.TipoPart) == "e" { Extendidas++ } } if MBRAuxiliar.Particion4MBR.SizePart != 0 { if string(MBRAuxiliar.Particion4MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion4MBR.TipoPart) == "e" { Extendidas++ } } Total = Primarias + Extendidas if Total < 4 { if Variables.MapComandos["type"] == "p" { Bandera = false } else if Variables.MapComandos["type"] == "e" { if Extendidas == 1 { color.HEX("#de4843", false).Println("Unicamente Se Pueden Crear Una Particion Extendida En El Disco") fmt.Println("") Bandera = true } else { Bandera = false } } } else if Total == 4 { color.HEX("#de4843", false).Println("Ya No Se Pueden Crear Mas Particiones Ya Que Existen 4") fmt.Println("") Bandera = true } return Bandera } func VerificarNombreParticionDelete(MBRAuxiliar Variables.MBREstructura) (bool, bool, int, int64) { //Variables var Nombre string var NombreArray1 string var NombreArray2 string var NombreArray3 string var NombreArray4 string var NombreExtendida string var Bandera bool var PrimariaExtendida bool var NumeroParticion int var InicioExtendida int64 var ArregloEBR []Variables.EBREstructura //Asignación Nombre = Metodos.Trim(strings.ToLower(Variables.MapComandos["name"])) Bandera = false NumeroParticion = 0 NombreArray1 = string(bytes.Trim(MBRAuxiliar.Particion1MBR.NamePart[:], "\x00")) NombreArray2 = string(bytes.Trim(MBRAuxiliar.Particion2MBR.NamePart[:], "\x00")) NombreArray3 = string(bytes.Trim(MBRAuxiliar.Particion3MBR.NamePart[:], "\x00")) NombreArray4 = string(bytes.Trim(MBRAuxiliar.Particion4MBR.NamePart[:], "\x00")) if MBRAuxiliar.Particion1MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray1) { Bandera = true PrimariaExtendida = true NumeroParticion = 1 if MBRAuxiliar.Particion1MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion1MBR.InicioPart } } } if MBRAuxiliar.Particion2MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray2) { Bandera = true PrimariaExtendida = true NumeroParticion = 2 } if MBRAuxiliar.Particion2MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion2MBR.InicioPart } } if MBRAuxiliar.Particion3MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray3) { Bandera = true PrimariaExtendida = true NumeroParticion = 3 } if MBRAuxiliar.Particion3MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion3MBR.InicioPart } } if MBRAuxiliar.Particion4MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray4) { Bandera = true PrimariaExtendida = true NumeroParticion = 4 } if MBRAuxiliar.Particion4MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion4MBR.InicioPart } } if InicioExtendida != 0 { ArregloEBR = ObtenerEBR(InicioExtendida) for Contador := 0; Contador < len(ArregloEBR); Contador++ { NombreExtendida = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00")) if strings.EqualFold(Variables.MapComandos["name"], NombreExtendida) { Bandera = true } } } return Bandera, PrimariaExtendida, NumeroParticion, InicioExtendida } func VerficiarExisteParticionExtendida(MBRAuxiliar Variables.MBREstructura) (bool, int64, int64) { //Variables var Bandera bool var Extendidas int var Primarias int var InicioParticion int64 var SizeParticion int64 //Asignación Bandera = false Extendidas = 0 Primarias = 0 if MBRAuxiliar.Particion1MBR.SizePart != 0 { if string(MBRAuxiliar.Particion1MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion1MBR.TipoPart) == "e" { InicioParticion = MBRAuxiliar.Particion1MBR.InicioPart SizeParticion = MBRAuxiliar.Particion1MBR.SizePart Extendidas++ } } if MBRAuxiliar.Particion2MBR.SizePart != 0 { if string(MBRAuxiliar.Particion2MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion2MBR.TipoPart) == "e" { InicioParticion = MBRAuxiliar.Particion2MBR.InicioPart SizeParticion = MBRAuxiliar.Particion2MBR.SizePart Extendidas++ } } if MBRAuxiliar.Particion3MBR.SizePart != 0 { if string(MBRAuxiliar.Particion3MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion3MBR.TipoPart) == "e" { InicioParticion = MBRAuxiliar.Particion3MBR.InicioPart SizeParticion = MBRAuxiliar.Particion3MBR.SizePart Extendidas++ } } if MBRAuxiliar.Particion4MBR.SizePart != 0 { if string(MBRAuxiliar.Particion4MBR.TipoPart) == "p" { Primarias++ } else if string(MBRAuxiliar.Particion4MBR.TipoPart) == "e" { InicioParticion = MBRAuxiliar.Particion4MBR.InicioPart SizeParticion = MBRAuxiliar.Particion4MBR.SizePart Extendidas++ } } if Extendidas == 1 { Bandera = true } return Bandera, InicioParticion, SizeParticion } func VerificarSizeParticionExtendida(InicioListaExtendida int64, SizeExtendida int64) bool { //Variables var Bandera bool var SizeUsado int var SizeEBR int var EBRAuxiliar Variables.EBREstructura //Asignación Bandera = true SizeEBR = int(unsafe.Sizeof(Variables.EBREstructura{})) SizeUsado = 0 for { //Leer EBR EBRAuxiliar, Bandera = Metodos.LeerArchivoBinarioEBR(Variables.MapComandos["path"], InicioListaExtendida) //Lista Corrupta if !Bandera { return false } SizeUsado += int(EBRAuxiliar.SizeEBR) + SizeEBR InicioListaExtendida = EBRAuxiliar.SiguienteEBR if EBRAuxiliar.SiguienteEBR == -1 { break } } if SizeUsado >= int(SizeExtendida) { Bandera = false } return Bandera } func ObtenerEBR(InicioListaExtendida int64) []Variables.EBREstructura { //Variables var Contador int var Bandera bool var EBRAuxiliar Variables.EBREstructura var ArregloEBR []Variables.EBREstructura //Asignación Contador = 0 for { //Leer EBR EBRAuxiliar, Bandera = Metodos.LeerArchivoBinarioEBR(Variables.MapComandos["path"], InicioListaExtendida) //Lista Corrupta if !Bandera { return ArregloEBR } //fmt.Println("Size: ", EBRAuxiliar.SizeEBR, "Inicio: ", EBRAuxiliar.InicioEBR, "Siguiente: ", EBRAuxiliar.SiguienteEBR, "Nombre: ", string(EBRAuxiliar.NameEBR[:])) ArregloEBR = append(ArregloEBR, EBRAuxiliar) InicioListaExtendida = ArregloEBR[Contador].SiguienteEBR Contador++ if EBRAuxiliar.SiguienteEBR == -1 { break } } return ArregloEBR } func VerificarNombreParticionAdd(MBRAuxiliar Variables.MBREstructura) (bool, bool, int, int64, int64) { //Variables var Nombre string var NombreArray1 string var NombreArray2 string var NombreArray3 string var NombreArray4 string var NombreExtendida string var Bandera bool var PrimariaExtendida bool var NumeroParticion int var InicioExtendida int64 var SizeExtendida int64 var ArregloEBR []Variables.EBREstructura //Asignación Nombre = Metodos.Trim(strings.ToLower(Variables.MapComandos["name"])) Bandera = false NumeroParticion = 0 SizeExtendida = 0 NombreArray1 = string(bytes.Trim(MBRAuxiliar.Particion1MBR.NamePart[:], "\x00")) NombreArray2 = string(bytes.Trim(MBRAuxiliar.Particion2MBR.NamePart[:], "\x00")) NombreArray3 = string(bytes.Trim(MBRAuxiliar.Particion3MBR.NamePart[:], "\x00")) NombreArray4 = string(bytes.Trim(MBRAuxiliar.Particion4MBR.NamePart[:], "\x00")) if MBRAuxiliar.Particion1MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray1) { Bandera = true PrimariaExtendida = true NumeroParticion = 1 if MBRAuxiliar.Particion1MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion1MBR.InicioPart SizeExtendida = MBRAuxiliar.Particion1MBR.SizePart } } } if MBRAuxiliar.Particion2MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray2) { Bandera = true PrimariaExtendida = true NumeroParticion = 2 } if MBRAuxiliar.Particion2MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion2MBR.InicioPart SizeExtendida = MBRAuxiliar.Particion2MBR.SizePart } } if MBRAuxiliar.Particion3MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray3) { Bandera = true PrimariaExtendida = true NumeroParticion = 3 } if MBRAuxiliar.Particion3MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion3MBR.InicioPart SizeExtendida = MBRAuxiliar.Particion3MBR.SizePart } } if MBRAuxiliar.Particion4MBR.SizePart != 0 { if strings.EqualFold(Nombre, NombreArray4) { Bandera = true PrimariaExtendida = true NumeroParticion = 4 } if MBRAuxiliar.Particion4MBR.TipoPart == 'e' { InicioExtendida = MBRAuxiliar.Particion4MBR.InicioPart SizeExtendida = MBRAuxiliar.Particion4MBR.SizePart } } if InicioExtendida != 0 { ArregloEBR = ObtenerEBR(InicioExtendida) for Contador := 0; Contador < len(ArregloEBR); Contador++ { NombreExtendida = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00")) if strings.EqualFold(Variables.MapComandos["name"], NombreExtendida) { Bandera = true } } } return Bandera, PrimariaExtendida, NumeroParticion, InicioExtendida, SizeExtendida } func ComandoFdiskCrearParticion(MBRAuxiliar Variables.MBREstructura) { //Variables var ParticionAuxiliar Variables.ParticionEstructura var EBRAuxiliar Variables.EBREstructura var Bandera bool var Size int var Unit int var SizeTotal int //Asignación ParticionAuxiliar = Variables.ParticionEstructura{} EBRAuxiliar = Variables.EBREstructura{} Bandera = false Size = 0 Unit = 0 SizeTotal = 0 //Crear Nuevo Disco Metodos.LimpiaDisco() Metodos.CreaDisco(int(MBRAuxiliar.SizeMbr) + 201) Metodos.LlenaDisco(0, int(unsafe.Sizeof(Variables.MBREstructura{}))) Metodos.LLenarParticiones(MBRAuxiliar) Metodos.GeneraEspacios() //Crear Nueva Particion //Estado De La Particion ParticionAuxiliar.StatusPart = 'n' //Tipo De Particion if Variables.MapComandos["type"] == "p" { ParticionAuxiliar.TipoPart = 'p' } else if Variables.MapComandos["type"] == "e" { ParticionAuxiliar.TipoPart = 'e' } else if Variables.MapComandos["type"] == "l" { ParticionAuxiliar.TipoPart = 'l' } //Fit De Particion if Variables.MapComandos["fit"] == "bf" { ParticionAuxiliar.FitPart = 'b' } else if Variables.MapComandos["fit"] == "ff" { ParticionAuxiliar.FitPart = 'f' } else if Variables.MapComandos["fit"] == "wf" { ParticionAuxiliar.FitPart = 'w' } //Tamaño De La Partición Size, _ = strconv.Atoi(Variables.MapComandos["size"]) Unit, _ = strconv.Atoi(Variables.MapComandos["unit"]) SizeTotal = Size * Unit ParticionAuxiliar.SizePart = int64(SizeTotal) for Contador := 0; Contador <= 200 - 1; Contador++ { if Metodos.EspaciosDisponibles[Contador].Disponible { if SizeTotal <= Metodos.EspaciosDisponibles[Contador].Tamano { //Inicio Particion ParticionAuxiliar.InicioPart = int64(Metodos.EspaciosDisponibles[Contador].P1) Bandera = true break } } } //Nombre De La Partición copy(ParticionAuxiliar.NamePart[:], Variables.MapComandos["name"]) if Bandera { //Buscar Particion Vacia if ParticionAuxiliar.TipoPart == 'e' { //Crear EBR Particion Extendida EBRAuxiliar.StatusEBR = 'n' EBRAuxiliar.FitEBR = 'w' EBRAuxiliar.InicioEBR = ParticionAuxiliar.InicioPart EBRAuxiliar.SizeEBR = 0 EBRAuxiliar.SiguienteEBR = -1 copy(EBRAuxiliar.NameEBR[:], "none") //Escribir EBR Metodos.EscribirArchivoBinarioEBR(EBRAuxiliar, ParticionAuxiliar.InicioPart) } if MBRAuxiliar.Particion1MBR.SizePart == 0 { MBRAuxiliar.Particion1MBR = ParticionAuxiliar Metodos.EscribirArchivoBinarioArreglo(MBRAuxiliar) color.Success.Println("Particion Creada Con Exito") fmt.Println("") } else if MBRAuxiliar.Particion2MBR.SizePart == 0 { MBRAuxiliar.Particion2MBR = ParticionAuxiliar Metodos.EscribirArchivoBinarioArreglo(MBRAuxiliar) color.Success.Println("Particion Creada Con Exito") fmt.Println("") } else if MBRAuxiliar.Particion3MBR.SizePart == 0 { MBRAuxiliar.Particion3MBR = ParticionAuxiliar Metodos.EscribirArchivoBinarioArreglo(MBRAuxiliar) color.Success.Println("Particion Creada Con Exito") fmt.Println("") } else if MBRAuxiliar.Particion4MBR.SizePart == 0 { MBRAuxiliar.Particion4MBR = ParticionAuxiliar Metodos.EscribirArchivoBinarioArreglo(MBRAuxiliar) color.Success.Println("Particion Creada Con Exito") fmt.Println("") } } else { color.HEX("#de4843", false).Println("No Existe Espacio Disponible Para La Particion Indicada") fmt.Println("") } } func ComandoFdiskCrearParticionLogica(InicioListaLogica int64, SizeExtendida int64) { //Variables var Size int var Unit int var SizeTotal int var ContadorAuxiliar int var InicioPart int64 var InicioReal int64 var SizePart int64 var Bandera bool var SiCambio int var ParticionAuxiliar Variables.EBREstructura var ArregloEBR []Variables.EBREstructura var ArregloAntes []Variables.EBREstructura var ArregloDespues []Variables.EBREstructura //Asignación Bandera = false ParticionAuxiliar = Variables.EBREstructura{} ArregloEBR = make([]Variables.EBREstructura, 0) ArregloEBR = ObtenerEBR(InicioListaLogica) // Crear Nueva Particion // Estado De La Particion ParticionAuxiliar.StatusEBR = 'n' //Fit De Particion if Variables.MapComandos["fit"] == "bf" { ParticionAuxiliar.FitEBR = 'b' } else if Variables.MapComandos["fit"] == "ff" { ParticionAuxiliar.FitEBR = 'f' } else if Variables.MapComandos["fit"] == "wf" { ParticionAuxiliar.FitEBR = 'w' } // Siguiente Particion ParticionAuxiliar.SiguienteEBR = -1 //Nombre De La Partición copy(ParticionAuxiliar.NameEBR[:], Variables.MapComandos["name"]) // Crear Disco Virtual Metodos.LimpiaDisco() Metodos.CreaDisco(int(SizeExtendida)) // Rellenar Particiones Existentes for Contador := 0; Contador < len(ArregloEBR); Contador++ { InicioPart = ArregloEBR[Contador].InicioEBR InicioReal = InicioPart - InicioListaLogica SizePart = ArregloEBR[Contador].SizeEBR SizeTotal = 0 if SizePart != 0 { SizeTotal = int(SizePart + int64(unsafe.Sizeof(Variables.EBREstructura{}))) } Metodos.LlenaDisco(int(InicioReal), SizeTotal) } Metodos.GeneraEspacios() //Tamaño De La Partición Size, _ = strconv.Atoi(Variables.MapComandos["size"]) Unit, _ = strconv.Atoi(Variables.MapComandos["unit"]) SizeTotal = Size * Unit ParticionAuxiliar.SizeEBR = int64(SizeTotal) SizeTotal += int(unsafe.Sizeof(Variables.EBREstructura{})) for Contador := 0; Contador <= 200 - 1; Contador++ { if Metodos.EspaciosDisponibles[Contador].Disponible { if SizeTotal <= Metodos.EspaciosDisponibles[Contador].Tamano { //Inicio Particion ParticionAuxiliar.InicioEBR = int64(Metodos.EspaciosDisponibles[Contador].P1) + InicioListaLogica Bandera = true break } } } if Bandera { //Reorganizar EBRS for Contador := 0; Contador < len(ArregloEBR); Contador++ { if ArregloEBR[Contador].InicioEBR < ParticionAuxiliar.InicioEBR { SiCambio = 0 } else if ArregloEBR[Contador].InicioEBR > ParticionAuxiliar.InicioEBR { SiCambio = 1 } if SiCambio == 0 { ArregloAntes = append(ArregloAntes, ArregloEBR[Contador]) } else if SiCambio == 1 { ArregloDespues = append(ArregloDespues, ArregloEBR[Contador]) } } // Escribir Nuevo Arreglo ArregloEBR = make([]Variables.EBREstructura, 0) if len(ArregloAntes) != 0 { for Contador := 0; Contador < len(ArregloAntes); Contador++ { ArregloEBR = append(ArregloEBR, ArregloAntes[Contador]) ContadorAuxiliar = Contador } ArregloEBR[ContadorAuxiliar].SiguienteEBR = ParticionAuxiliar.InicioEBR } if len(ArregloDespues) != 0 { // Cambiar Puntero ParticionAuxiliar.SiguienteEBR = ArregloDespues[0].InicioEBR ArregloEBR = append(ArregloEBR, ParticionAuxiliar) for Contador := 0; Contador < len(ArregloDespues); Contador++ { ArregloEBR = append(ArregloEBR, ArregloDespues[Contador]) } } else { // Agregar Ultima Particion ArregloEBR = append(ArregloEBR, ParticionAuxiliar) } // Escribir EBR for Contador := 0; Contador < len(ArregloEBR); Contador++ { //Escribir EBR Metodos.EscribirArchivoBinarioEBR(ArregloEBR[Contador], ArregloEBR[Contador].InicioEBR) } color.Success.Println("Particion Creada Con Exito!") fmt.Println("") } else { color.HEX("#de4843", false).Println("No Existe Espacio Disponible Para La Particion Indicada") fmt.Println("") } } func ComandoFdiskDeleteParticion(MBRAuxiliar Variables.MBREstructura, NumeroParticion int) { //Variables var Lectura *bufio.Reader var Cadena string var AvisoError error var PosicionFinal int64 //Asignaciones Lectura = bufio.NewReader(os.Stdin) //Ciclo Mensaje for { color.HEX("#c9265c", false).Print("Seguro Que Desea Eliminar La Particion? s/n: ") fmt.Print("") Cadena, AvisoError = Lectura.ReadString('\n') _ = AvisoError Cadena = strings.ToLower(Cadena) Cadena = Metodos.Trim(Cadena) if Cadena == "s" { if NumeroParticion == 1 { if Variables.MapComandos["delete"] == "full" { PosicionFinal = MBRAuxiliar.Particion1MBR.InicioPart + MBRAuxiliar.Particion1MBR.SizePart Metodos.EscribirArchivoBinarioArregloDelete(MBRAuxiliar, MBRAuxiliar.Particion1MBR.InicioPart, PosicionFinal) MBRAuxiliar.Particion1MBR = Variables.ParticionEstructura{} } else if Variables.MapComandos["delete"] == "fast" { MBRAuxiliar.Particion1MBR = Variables.ParticionEstructura{} } } else if NumeroParticion == 2 { if Variables.MapComandos["delete"] == "full" { PosicionFinal = MBRAuxiliar.Particion2MBR.InicioPart + MBRAuxiliar.Particion2MBR.SizePart Metodos.EscribirArchivoBinarioArregloDelete(MBRAuxiliar, MBRAuxiliar.Particion2MBR.InicioPart, PosicionFinal) MBRAuxiliar.Particion2MBR = Variables.ParticionEstructura{} } else if Variables.MapComandos["delete"] == "fast" { MBRAuxiliar.Particion2MBR = Variables.ParticionEstructura{} } } else if NumeroParticion == 3 { if Variables.MapComandos["delete"] == "full" { PosicionFinal = MBRAuxiliar.Particion3MBR.InicioPart + MBRAuxiliar.Particion3MBR.SizePart Metodos.EscribirArchivoBinarioArregloDelete(MBRAuxiliar, MBRAuxiliar.Particion3MBR.InicioPart, PosicionFinal) MBRAuxiliar.Particion3MBR = Variables.ParticionEstructura{} } else if Variables.MapComandos["delete"] == "fast" { MBRAuxiliar.Particion3MBR = Variables.ParticionEstructura{} } } else if NumeroParticion == 4 { if Variables.MapComandos["delete"] == "full" { PosicionFinal = MBRAuxiliar.Particion4MBR.InicioPart + MBRAuxiliar.Particion4MBR.SizePart Metodos.EscribirArchivoBinarioArregloDelete(MBRAuxiliar, MBRAuxiliar.Particion4MBR.InicioPart, PosicionFinal) MBRAuxiliar.Particion4MBR = Variables.ParticionEstructura{} } else if Variables.MapComandos["delete"] == "fast" { MBRAuxiliar.Particion4MBR = Variables.ParticionEstructura{} } } Metodos.EscribirArchivoBinarioArreglo(MBRAuxiliar) color.Success.Println("Particion Eliminada Con Exito") fmt.Println("") break } else if Cadena == "n" { color.HEX("#c9265c", false).Println("Particion No Eliminada!") fmt.Println("") break } else { color.HEX("#de4843", false).Println("Debe De Ingresar s o n") fmt.Println("") } } } func ComandoFdiskDeleteParticionLogica(InicioExtendida int64) { //Variables var Lectura *bufio.Reader var Cadena string var NombreExtendida string var ContadorAuxiliar int var SiCambio bool var EBREliminado bool var AvisoError error var EBRAuxiliar Variables.EBREstructura var EBREliminar Variables.EBREstructura var ArregloEBR []Variables.EBREstructura var ArregloAntes []Variables.EBREstructura var ArregloDespues []Variables.EBREstructura //Asignaciones Lectura = bufio.NewReader(os.Stdin) ArregloEBR = ObtenerEBR(InicioExtendida) SiCambio = true EBREliminado = false //Ciclo Mensaje for { color.HEX("#c9265c", false).Print("Seguro Que Desea Eliminar La Particion? s/n: ") fmt.Print("") Cadena, AvisoError = Lectura.ReadString('\n') _ = AvisoError Cadena = strings.ToLower(Cadena) Cadena = Metodos.Trim(Cadena) if Cadena == "s" { //Reorganizar EBRS for Contador := 0; Contador < len(ArregloEBR); Contador++ { NombreExtendida = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00")) if strings.EqualFold(NombreExtendida, Variables.MapComandos["name"]) { EBREliminar = ArregloEBR[Contador] SiCambio = false EBREliminado = true } if SiCambio { if !EBREliminado { ArregloAntes = append(ArregloDespues, ArregloEBR[Contador]) } } else { if !EBREliminado { ArregloDespues = append(ArregloDespues, ArregloEBR[Contador]) } } EBREliminado = false } // Escribir Nuevo Arreglo ArregloEBR = make([]Variables.EBREstructura, 0) if len(ArregloAntes) >= 1 { for Contador := 0; Contador < len(ArregloAntes); Contador++ { ArregloEBR = append(ArregloEBR, ArregloAntes[Contador]) ContadorAuxiliar = Contador } if len(ArregloDespues) != 0 { ArregloEBR[ContadorAuxiliar].SiguienteEBR = ArregloDespues[0].InicioEBR } else { ArregloEBR[ContadorAuxiliar].SiguienteEBR = -1 } } else { //Crear EBR Particion Extendida EBRAuxiliar.StatusEBR = 'n' EBRAuxiliar.FitEBR = 'w' EBRAuxiliar.InicioEBR = InicioExtendida EBRAuxiliar.SizeEBR = 0 if len(ArregloDespues) > 0 { EBRAuxiliar.SiguienteEBR = ArregloDespues[0].InicioEBR } else { EBRAuxiliar.SiguienteEBR = -1 } copy(EBRAuxiliar.NameEBR[:], "none") ArregloEBR = append(ArregloEBR, EBRAuxiliar) } if len(ArregloDespues) != 0 { for Contador := 0; Contador < len(ArregloDespues); Contador++ { ArregloEBR = append(ArregloEBR, ArregloDespues[Contador]) } } //Tipo De Borrado if Variables.MapComandos["delete"] == "full" { if len(ArregloEBR) > 1 { Metodos.EscribirArchivoBinarioEBRDelete(EBREliminar, EBREliminar.InicioEBR) } } else if Variables.MapComandos["delete"] == "fast" { // Nada } // Escribir EBR for Contador := 0; Contador < len(ArregloEBR); Contador++ { //Escribir EBR Metodos.EscribirArchivoBinarioEBR(ArregloEBR[Contador], ArregloEBR[Contador].InicioEBR) } color.Success.Println("Particion Eliminada Con Exito!") fmt.Println("") break } else if Cadena == "n" { color.HEX("#c9265c", false).Println("Particion No Eliminada!") fmt.Println("") break } else { color.HEX("#de4843", false).Println("Debe De Ingresar s o n") fmt.Println("") } } } func ComandoFdiskAddParticion(MBRAuxiliar Variables.MBREstructura, NumeroParticion int) { //Variables var CantidadAdd int var UnitAdd int var NuevoSize int var Bandera bool var MenosEspacio bool var MBRModificar Variables.ParticionEstructura //Asignaciones CantidadAdd, _ = strconv.Atoi(Variables.MapComandos["add"]) UnitAdd, _ = strconv.Atoi(Variables.MapComandos["unit"]) CantidadAdd = CantidadAdd * UnitAdd MBRModificar = Variables.ParticionEstructura{} Bandera = false MenosEspacio = false if NumeroParticion == 1 { NuevoSize = int(MBRAuxiliar.Particion1MBR.SizePart) + CantidadAdd MBRModificar = MBRAuxiliar.Particion1MBR if CantidadAdd > 0 { //Crear Disco Metodos.LimpiaDisco() Metodos.CreaDisco(int(MBRAuxiliar.SizeMbr) + 200) Metodos.LlenaDisco(0, 200) Metodos.LLenarParticionesAdd(MBRAuxiliar, 1) Metodos.LlenaDisco(int(MBRAuxiliar.Particion1MBR.InicioPart), 1) Metodos.GeneraEspacios() for Contador := 0; Contador <= 200 - 1; Contador++ { if Metodos.EspaciosDisponibles[Contador].Disponible { if Metodos.EspaciosDisponibles[Contador].P1 == int(MBRAuxiliar.Particion1MBR.InicioPart) + 1 { if NuevoSize <= Metodos.EspaciosDisponibles[Contador].Tamano + 1 { MBRModificar.SizePart = int64(NuevoSize) Bandera = true break } } } } MBRAuxiliar.Particion1MBR = MBRModificar } else if CantidadAdd < 0 { if NuevoSize > 0 { MBRAuxiliar.Particion1MBR.SizePart = int64(NuevoSize) Bandera = true } else { Bandera = false MenosEspacio = true } } } else if NumeroParticion == 2 { NuevoSize = int(MBRAuxiliar.Particion2MBR.SizePart) + CantidadAdd MBRModificar = MBRAuxiliar.Particion2MBR if CantidadAdd > 0 { //Crear Disco Metodos.LimpiaDisco() Metodos.CreaDisco(int(MBRAuxiliar.SizeMbr) + 200) Metodos.LlenaDisco(0, 200) Metodos.LLenarParticionesAdd(MBRAuxiliar, 2) Metodos.LlenaDisco(int(MBRAuxiliar.Particion2MBR.InicioPart), 1) Metodos.GeneraEspacios() for Contador := 0; Contador <= 200 - 1; Contador++ { if Metodos.EspaciosDisponibles[Contador].Disponible { if Metodos.EspaciosDisponibles[Contador].P1 == int(MBRAuxiliar.Particion2MBR.InicioPart) + 1 { if NuevoSize <= Metodos.EspaciosDisponibles[Contador].Tamano + 1 { MBRModificar.SizePart = int64(NuevoSize) Bandera = true break } } } } MBRAuxiliar.Particion2MBR = MBRModificar } else if CantidadAdd < 0 { if NuevoSize > 0 { MBRAuxiliar.Particion2MBR.SizePart = int64(NuevoSize) Bandera = true } else { Bandera = false MenosEspacio = true } } } else if NumeroParticion == 3 { NuevoSize = int(MBRAuxiliar.Particion3MBR.SizePart) + CantidadAdd MBRModificar = MBRAuxiliar.Particion3MBR if CantidadAdd > 0 { MBRModificar = MBRAuxiliar.Particion3MBR NuevoSize = int(MBRAuxiliar.Particion3MBR.SizePart) + CantidadAdd //Crear Disco Metodos.LimpiaDisco() Metodos.CreaDisco(int(MBRAuxiliar.SizeMbr) + 200) Metodos.LlenaDisco(0, 200) Metodos.LLenarParticionesAdd(MBRAuxiliar, 3) Metodos.LlenaDisco(int(MBRAuxiliar.Particion3MBR.InicioPart), 1) Metodos.GeneraEspacios() for Contador := 0; Contador <= 200 - 1; Contador++ { if Metodos.EspaciosDisponibles[Contador].Disponible { if Metodos.EspaciosDisponibles[Contador].P1 == int(MBRAuxiliar.Particion3MBR.InicioPart) + 1 { if NuevoSize <= Metodos.EspaciosDisponibles[Contador].Tamano + 1 { MBRModificar.SizePart = int64(NuevoSize) Bandera = true break } } } } MBRAuxiliar.Particion3MBR = MBRModificar } else if CantidadAdd < 0 { if NuevoSize > 0 { MBRAuxiliar.Particion3MBR.SizePart = int64(NuevoSize) Bandera = true } else { Bandera = false MenosEspacio = true } } } else if NumeroParticion == 4 { NuevoSize = int(MBRAuxiliar.Particion4MBR.SizePart) + CantidadAdd MBRModificar = MBRAuxiliar.Particion4MBR if CantidadAdd > 0 { MBRModificar = MBRAuxiliar.Particion4MBR NuevoSize = int(MBRAuxiliar.Particion4MBR.SizePart) + CantidadAdd //Crear Disco Metodos.LimpiaDisco() Metodos.CreaDisco(int(MBRAuxiliar.SizeMbr) + 200) Metodos.LlenaDisco(0, 200) Metodos.LLenarParticionesAdd(MBRAuxiliar, 4) Metodos.LlenaDisco(int(MBRAuxiliar.Particion4MBR.InicioPart), 1) Metodos.GeneraEspacios() for Contador := 0; Contador <= 200 - 1; Contador++ { if Metodos.EspaciosDisponibles[Contador].Disponible { if Metodos.EspaciosDisponibles[Contador].P1 == int(MBRAuxiliar.Particion4MBR.InicioPart) + 1 { if NuevoSize <= Metodos.EspaciosDisponibles[Contador].Tamano + 1 { MBRModificar.SizePart = int64(NuevoSize) Bandera = true break } } } } MBRAuxiliar.Particion4MBR = MBRModificar } else if CantidadAdd < 0 { if NuevoSize > 0 { MBRAuxiliar.Particion4MBR.SizePart = int64(NuevoSize) Bandera = true } else { Bandera = false MenosEspacio = true } } } if Bandera { Metodos.EscribirArchivoBinarioArreglo(MBRAuxiliar) color.Success.Println("Particion Modificada Con Exito") fmt.Println("") } else { if MenosEspacio { color.HEX("#de4843", false).Println("No Se Puede Quitar El Espacio Indicado Ya Que Es Mayor Al Tamaño De La Particion") fmt.Println("") } else { color.HEX("#de4843", false).Println("No Hay Espacio Disponible Para Extender La Particion") fmt.Println("") } } } func ComandoFdiskAddParticionLogica(InicioExtendida int64, SizeExtendida int64) { //Variables var CantidadAdd int var UnitAdd int var NuevoSize int var SizeTotal int var InicioPart int64 var InicioReal int64 var SizePart int64 var NombreArray string var Bandera bool var MenosEspacio bool var EBRModificar Variables.EBREstructura var ArregloEBR []Variables.EBREstructura //Asignaciones CantidadAdd, _ = strconv.Atoi(Variables.MapComandos["add"]) UnitAdd, _ = strconv.Atoi(Variables.MapComandos["unit"]) CantidadAdd = CantidadAdd * UnitAdd EBRModificar = Variables.EBREstructura{} Bandera = false MenosEspacio = false ArregloEBR = make([]Variables.EBREstructura, 0) ArregloEBR = ObtenerEBR(InicioExtendida) InicioPart = 0 SizePart = 0 NuevoSize = 0 NombreArray = "" // Rellenar Particiones Existentes for Contador := 0; Contador < len(ArregloEBR); Contador++ { NombreArray = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00")) if strings.EqualFold(Metodos.Trim(Variables.MapComandos["name"]), NombreArray) { EBRModificar = ArregloEBR[Contador] NuevoSize = int(ArregloEBR[Contador].SizeEBR) + CantidadAdd } } if CantidadAdd > 0 { //Crear Particion Extendida Metodos.LimpiaDisco() Metodos.CreaDisco(int(SizeExtendida)) // Rellenar Particiones Existentes for Contador := 0; Contador < len(ArregloEBR); Contador++ { InicioPart = ArregloEBR[Contador].InicioEBR InicioReal = InicioPart - InicioExtendida SizePart = ArregloEBR[Contador].SizeEBR SizeTotal = 0 NombreArray = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00")) if !strings.EqualFold(Variables.MapComandos["name"], NombreArray) { if SizePart != 0 { SizeTotal = int(SizePart + int64(unsafe.Sizeof(Variables.EBREstructura{}))) } Metodos.LlenaDisco(int(InicioReal), SizeTotal) } else { EBRModificar = ArregloEBR[Contador] NuevoSize = int(EBRModificar.SizeEBR) + CantidadAdd } } Metodos.LlenaDisco(int(EBRModificar.InicioEBR - InicioExtendida), 1) Metodos.GeneraEspacios() for Contador := 0; Contador <= 200 - 1; Contador++ { if Metodos.EspaciosDisponibles[Contador].Disponible { if Metodos.EspaciosDisponibles[Contador].P1 == int(EBRModificar.InicioEBR) - int(InicioExtendida) + 1 { if NuevoSize <= Metodos.EspaciosDisponibles[Contador].Tamano + 1 { EBRModificar.SizeEBR = int64(NuevoSize) Bandera = true break } } } } } else if CantidadAdd < 0 { if NuevoSize > 0 { EBRModificar.SizeEBR = int64(NuevoSize) Bandera = true } else { Bandera = false MenosEspacio = true } } // Rellenar Particiones Existentes for Contador := 0; Contador < len(ArregloEBR); Contador++ { NombreArray = string(bytes.Trim(ArregloEBR[Contador].NameEBR[:], "\x00")) if strings.EqualFold(Variables.MapComandos["name"], NombreArray) { ArregloEBR[Contador] = EBRModificar } } // Escribir EBR for Contador := 0; Contador < len(ArregloEBR); Contador++ { //Escribir EBR Metodos.EscribirArchivoBinarioEBRAdd(ArregloEBR[Contador], ArregloEBR[Contador].InicioEBR) } if Bandera { color.Success.Println("Particion Modificada Con Exito") fmt.Println("") } else { if MenosEspacio { color.HEX("#de4843", false).Println("No Se Puede Quitar El Espacio Indicado Ya Que Es Mayor Al Tamaño De La Particion") fmt.Println("") } else { color.HEX("#de4843", false).Println("No Hay Espacio Disponible Para Extender La Particion") fmt.Println("") } } }
//go:generate goagen bootstrap -d github.com/odiak/MoneyForest/design package main import ( "context" "fmt" "net/http" "time" "github.com/go-pg/pg" "github.com/go-pg/pg/orm" "github.com/goadesign/goa" "github.com/goadesign/goa/middleware" "github.com/odiak/MoneyForest/app" "github.com/odiak/MoneyForest/config" "github.com/odiak/MoneyForest/constants" "github.com/odiak/MoneyForest/controllers" "github.com/odiak/MoneyForest/store" ) func main() { // Create service service := goa.New("MoneyForest") // Mount middleware service.Use(middleware.RequestID()) service.Use(middleware.LogRequest(true)) service.Use(middleware.ErrorHandler(service, true)) service.Use(middleware.Recover()) db := pg.Connect(config.PgOptions) db.OnQueryProcessed(func(event *pg.QueryProcessedEvent) { query, err := event.FormattedQuery() if err != nil { panic(err) } service.LogInfo(fmt.Sprintf("SQL Query: %s, %s", time.Since(event.StartTime), query)) }) app.UseAPIKeyAuthMiddleware(service, NewAPIKeyMiddleware(db)) app.MountUserController(service, controllers.NewUserController(service, db)) app.MountAccountController(service, controllers.NewAccountController(service, db)) app.MountCategoryController(service, controllers.NewCategoryController(service, db)) app.MountTransactionController(service, controllers.NewTransactionController(service, db)) // Start service if err := service.ListenAndServe(fmt.Sprintf(":%d", config.Port)); err != nil { service.LogError("startup", "err", err) } } func NewAPIKeyMiddleware(db orm.DB) goa.Middleware { scheme := app.NewAPIKeyAuthSecurity() return func(h goa.Handler) goa.Handler { return func(ctx context.Context, rw http.ResponseWriter, req *http.Request) error { key := req.Header.Get(scheme.Name) if len(key) == 0 { return goa.ErrUnauthorized("missing auth token") } ut := store.UserToken{} err := db.Model(&ut). Column("user_token.*", "User"). Where("user_token.token = ?", key). Select() if err != nil { if err == pg.ErrNoRows { return goa.ErrUnauthorized("invalid auth token") } return goa.ErrInternal("unknown error") } goa.LogInfo(ctx, "valid auth token", "token", key) ctx = context.WithValue(ctx, constants.CurrentUserKey, ut.User) return h(ctx, rw, req) } } }
package fixtures import ( . "github.com/polydawn/refmt/tok" ) // sequences_Number contains what it says on the tin -- but be warned: // bytes are not representable in all formats. // // JSON can't clearly represent binary bytes; typically in practice transforms // to b64 strings are used, but this is application specific territory. var sequences_Bytes = []Sequence{ {"short byte array", []Token{ {Type: TBytes, Bytes: []byte(`value`)}, // Note 'Length' field not used; would be redundant. }, }, {"long zero byte array", []Token{ {Type: TBytes, Bytes: make([]byte, 400)}, }, }, }
package vsphere // MachinePool stores the configuration for a machine pool installed // on vSphere. type MachinePool struct { // NumCPUs is the total number of virtual processor cores to assign a vm. // // +optional NumCPUs int32 `json:"cpus"` // NumCoresPerSocket is the number of cores per socket in a vm. The number // of vCPUs on the vm will be NumCPUs/NumCoresPerSocket. // // +optional NumCoresPerSocket int32 `json:"coresPerSocket"` // Memory is the size of a VM's memory in MB. // // +optional MemoryMiB int64 `json:"memoryMB"` // OSDisk defines the storage for instance. // // +optional OSDisk `json:"osDisk"` // Zones defines available zones // Zones is available in TechPreview. // // +omitempty Zones []string `json:"zones,omitempty"` } // OSDisk defines the disk for a virtual machine. type OSDisk struct { // DiskSizeGB defines the size of disk in GB. // // +optional DiskSizeGB int32 `json:"diskSizeGB"` } // Set sets the values from `required` to `p`. func (p *MachinePool) Set(required *MachinePool) { if required == nil || p == nil { return } if required.NumCPUs != 0 { p.NumCPUs = required.NumCPUs } if required.NumCoresPerSocket != 0 { p.NumCoresPerSocket = required.NumCoresPerSocket } if required.MemoryMiB != 0 { p.MemoryMiB = required.MemoryMiB } if required.OSDisk.DiskSizeGB != 0 { p.OSDisk.DiskSizeGB = required.OSDisk.DiskSizeGB } if len(required.Zones) > 0 { p.Zones = required.Zones } }
package types import ( "time" "github.com/jinzhu/gorm" _ "github.com/jinzhu/gorm/dialects/mysql" // Importing this for gorm to designate the db driver uuid "github.com/satori/go.uuid" "github.com/tespo/satya/v2/scoping" ) // // Barcode describes a barcode on a Pod // type Barcode struct { ID uuid.UUID `json:"id" gorm:"type:char(36);primary_key" scope:"barcode.id"` PodID uuid.UUID `json:"pod_id" gorm:"type:char(36);foreign_key" scope:"barcode.pod_id"` Sku string `json:"sku" scope:"barcode.sku"` Code string `json:"code" scope:"barcode.code"` LabelTall string `json:"label_tall" scope:"barcode.label_tall"` LabelWide string `json:"label_wide" scope:"barcode.label_wide"` CreatedAt time.Time `json:"created_at" scope:"barcode.created_at"` UpdatedAt time.Time `json:"updated_at" scope:"barcode.updated_at"` DeletedAt *time.Time `json:"deleted_at"` } // // BeforeCreate will set a UUID rather than a numeric ID // func (barcode *Barcode) BeforeCreate(scope *gorm.Scope) error { uuid := uuid.NewV4() if barcode.ID.String() == "00000000-0000-0000-0000-000000000000" { return scope.SetColumn("ID", uuid) } return nil } // // Barcodes is a slice of Barcode // type Barcodes []Barcode // // Get gets all barcodes // func (barcodes *Barcodes) Get(db *gorm.DB) error { if err := db.Find(&barcodes).Error; err != nil { return err } return nil } // // GetByID gets one barcode by id // func (barcode *Barcode) GetByID(db *gorm.DB, id uuid.UUID) error { if err := db.Where("id = ?", id).First(&barcode).Error; err != nil { return err } return nil } // // GetOneByQuery gets one barcode by id // func (barcode *Barcode) GetOneByQuery(db *gorm.DB, where string, query ...interface{}) error { if err := db.Where(where, query...).First(&barcode).Error; err != nil { return err } return nil } // // GetByQuery gets one barcode by id // func (barcodes *Barcodes) GetByQuery(db *gorm.DB, where string, query ...interface{}) error { if err := db.Where(where, query...).Find(&barcodes).Error; err != nil { return err } return nil } // // UnscopedGetByID gets one barcode by id // func (barcode *Barcode) UnscopedGetByID(db *gorm.DB, id uuid.UUID) error { if err := db.Unscoped().Where("id = ?", id).Find(&barcode).Error; err != nil { return err } return nil } // // Create makes an Barcode // func (barcode *Barcode) Create(db *gorm.DB) error { if err := db.Create(&barcode).Scan(&barcode).Error; err != nil { return err } return nil } // // Update makes an Barcode // func (barcode *Barcode) Update(db *gorm.DB) error { if err := db.Save(&barcode).Error; err != nil { return err } return nil } // // Delete will soft delete an barcode // func (barcode *Barcode) Delete(db *gorm.DB, id uuid.UUID) error { if err := db.Where("id = ?", id).Delete(&barcode).Error; err != nil { return err } return nil } // // Scope limits the fields being returned based on the // passed in scopes // func (barcode *Barcode) Scope(scopes []string) { *barcode = scoping.FilterByScopes(scopes, *barcode).(Barcode) } // // Scope limits the fields being returned based on the // passed in scopes // func (barcodes *Barcodes) Scope(scopes []string) { barcodeSlice := *barcodes for i, barcode := range barcodeSlice { barcodeSlice[i] = scoping.FilterByScopes(scopes, barcode).(Barcode) } *barcodes = barcodeSlice }
package g var AllLabelsMap = map[string]bool{ LabelProduct: true, LabelCompany: true, LabelStock: true, LabelChain: true, } var LabelProduct string = "Product" var LabelCompany string = "Company" var LabelStock string = "Stock" var LabelChain string = "Chain"
package queries import ( "context" "database/sql" "reflect" "strings" "github.com/friendsofgo/errors" "github.com/volatiletech/sqlboiler/v4/boil" "github.com/volatiletech/strmangle" ) type loadRelationshipState struct { ctx context.Context exec boil.Executor loaded map[string]struct{} toLoad []string mods map[string]Applicator } func (l loadRelationshipState) hasLoaded(depth int) bool { _, ok := l.loaded[l.buildKey(depth)] return ok } func (l loadRelationshipState) setLoaded(depth int) { l.loaded[l.buildKey(depth)] = struct{}{} } func (l loadRelationshipState) buildKey(depth int) string { buf := strmangle.GetBuffer() for i, piece := range l.toLoad[:depth+1] { if i != 0 { buf.WriteByte('.') } buf.WriteString(piece) } str := buf.String() strmangle.PutBuffer(buf) return str } // eagerLoad loads all of the model's relationships // // toLoad should look like: // []string{"Relationship", "Relationship.NestedRelationship"} ... etc // obj should be one of: // *[]*struct or *struct // bkind should reflect what kind of thing it is above func eagerLoad(ctx context.Context, exec boil.Executor, toLoad []string, mods map[string]Applicator, obj interface{}, bkind bindKind) error { state := loadRelationshipState{ ctx: ctx, // defiant to the end, I know this is frowned upon exec: exec, loaded: map[string]struct{}{}, mods: mods, } for _, toLoad := range toLoad { state.toLoad = strings.Split(toLoad, ".") if err := state.loadRelationships(0, obj, bkind); err != nil { return err } } return nil } // loadRelationships dynamically calls the template generated eager load // functions of the form: // // func (t *TableR) LoadRelationshipName(exec Executor, singular bool, obj interface{}) // // The arguments to this function are: // - t is not considered here, and is always passed nil. The function exists on a loaded // struct to avoid a circular dependency with boil, and the receiver is ignored. // - exec is used to perform additional queries that might be required for loading the relationships. // - bkind is passed in to identify whether or not this was a single object // or a slice that must be loaded into. // - obj is the object or slice of objects, always of the type *obj or *[]*obj as per bind. // // We start with a normal select before eager loading anything: select * from a; // Then we start eager loading things, it can be represented by a DAG // a1, a2 select id, a_id from b where id in (a1, a2) // / | \ // b1 b2 b3 select id, b_id from c where id in (b2, b3, b4) // / | \ \ // c1 c2 c3 c4 // // That's to say that we descend the graph of relationships, and at each level // we gather all the things up we want to load into, load them, and then move // to the next level of the graph. func (l loadRelationshipState) loadRelationships(depth int, obj interface{}, bkind bindKind) error { typ := reflect.TypeOf(obj).Elem() if bkind == kindPtrSliceStruct { typ = typ.Elem().Elem() } loadingFrom := reflect.ValueOf(obj) if loadingFrom.IsNil() { return nil } if !l.hasLoaded(depth) { if err := l.callLoadFunction(depth, loadingFrom, typ, bkind); err != nil { return err } } // Check if we can stop if depth+1 >= len(l.toLoad) { return nil } // *[]*struct -> []*struct // *struct -> struct loadingFrom = reflect.Indirect(loadingFrom) // If it's singular we can just immediately call without looping if bkind == kindStruct { return l.loadRelationshipsRecurse(depth, loadingFrom) } // If we were an empty slice to begin with, bail, probably a useless check if loadingFrom.Len() == 0 { return nil } // Collect eagerly loaded things to send into next eager load call slice, nextBKind, err := collectLoaded(l.toLoad[depth], loadingFrom) if err != nil { return err } // If we could collect nothing we're done if slice.Len() == 0 { return nil } ptr := reflect.New(slice.Type()) ptr.Elem().Set(slice) return l.loadRelationships(depth+1, ptr.Interface(), nextBKind) } // callLoadFunction finds the loader struct, finds the method that we need // to call and calls it. func (l loadRelationshipState) callLoadFunction(depth int, loadingFrom reflect.Value, typ reflect.Type, bkind bindKind) error { current := l.toLoad[depth] ln, found := typ.FieldByName(loaderStructName) // It's possible a Loaders struct doesn't exist on the struct. if !found { return errors.Errorf("attempted to load %s but no L struct was found", current) } // Attempt to find the LoadRelationshipName function loadMethod, found := ln.Type.MethodByName(loadMethodPrefix + current) if !found { return errors.Errorf("could not find %s%s method for eager loading", loadMethodPrefix, current) } ctxArg := reflect.ValueOf(l.ctx) // Hack to allow nil executors execArg := reflect.ValueOf(l.exec) if !execArg.IsValid() { execArg = reflect.ValueOf((*sql.DB)(nil)) } // Get a loader instance from anything we have, *struct, or *[]*struct val := reflect.Indirect(loadingFrom) if bkind == kindPtrSliceStruct { if val.Len() == 0 { return nil } val = val.Index(0) if val.IsNil() { return nil } val = reflect.Indirect(val) } methodArgs := make([]reflect.Value, 0, 5) methodArgs = append(methodArgs, val.FieldByName(loaderStructName)) if ctxArg.IsValid() { methodArgs = append(methodArgs, ctxArg) } methodArgs = append(methodArgs, execArg, reflect.ValueOf(bkind == kindStruct), loadingFrom) if mods, ok := l.mods[l.buildKey(depth)]; ok { methodArgs = append(methodArgs, reflect.ValueOf(mods)) } else { methodArgs = append(methodArgs, applicatorSentinelVal) } ret := loadMethod.Func.Call(methodArgs) if intf := ret[0].Interface(); intf != nil { return errors.Wrapf(intf.(error), "failed to eager load %s", current) } l.setLoaded(depth) return nil } // loadRelationshipsRecurse is a helper function for taking a reflect.Value and // Basically calls loadRelationships with: obj.R.EagerLoadedObj // Called with an obj of *struct func (l loadRelationshipState) loadRelationshipsRecurse(depth int, obj reflect.Value) error { key := l.toLoad[depth] r, err := findRelationshipStruct(obj) if err != nil { return errors.Wrapf(err, "failed to append loaded %s", key) } loadedObject := reflect.Indirect(r).FieldByName(key) if loadedObject.IsNil() { return nil } bkind := kindStruct if derefed := reflect.Indirect(loadedObject); derefed.Kind() != reflect.Struct { bkind = kindPtrSliceStruct // Convert away any helper slice types // elemType is *elem (from []*elem or helperSliceType) // sliceType is *[]*elem elemType := derefed.Type().Elem() sliceType := reflect.PtrTo(reflect.SliceOf(elemType)) loadedObject = loadedObject.Addr().Convert(sliceType) } return l.loadRelationships(depth+1, loadedObject.Interface(), bkind) } // collectLoaded traverses the next level of the graph and picks up all // the values that we need for the next eager load query. // // For example when loadingFrom is [parent1, parent2] // // parent1 -> child1 // \-> child2 // parent2 -> child3 // // This should return [child1, child2, child3] func collectLoaded(key string, loadingFrom reflect.Value) (reflect.Value, bindKind, error) { // Pull the first one so we can get the types out of it in order to // create the proper type of slice. current := reflect.Indirect(loadingFrom.Index(0)) lnFrom := loadingFrom.Len() r, err := findRelationshipStruct(current) if err != nil { return reflect.Value{}, 0, errors.Wrapf(err, "failed to collect loaded %s", key) } loadedObject := reflect.Indirect(r).FieldByName(key) loadedType := loadedObject.Type() // Should be *obj or []*obj bkind := kindPtrSliceStruct if loadedType.Elem().Kind() == reflect.Struct { bkind = kindStruct loadedType = reflect.SliceOf(loadedType) } else { // Ensure that we get rid of all the helper "XSlice" types loadedType = reflect.SliceOf(loadedType.Elem()) } collection := reflect.MakeSlice(loadedType, 0, 0) i := 0 for { switch bkind { case kindStruct: if !loadedObject.IsNil() { collection = reflect.Append(collection, loadedObject) } case kindPtrSliceStruct: collection = reflect.AppendSlice(collection, loadedObject) } i++ if i >= lnFrom { break } current = reflect.Indirect(loadingFrom.Index(i)) r, err = findRelationshipStruct(current) if err != nil { return reflect.Value{}, 0, errors.Wrapf(err, "failed to collect loaded %s", key) } loadedObject = reflect.Indirect(r).FieldByName(key) } return collection, kindPtrSliceStruct, nil } func findRelationshipStruct(obj reflect.Value) (reflect.Value, error) { relationshipStruct := obj.FieldByName(relationshipStructName) if !relationshipStruct.IsValid() { return reflect.Value{}, errors.New("relationship struct was invalid") } else if relationshipStruct.IsNil() { return reflect.Value{}, errors.New("relationship struct was nil") } return relationshipStruct, nil } var ( applicatorSentinel Applicator applicatorSentinelVal = reflect.ValueOf(&applicatorSentinel).Elem() ) // SetFromEmbeddedStruct sets `to` value from embedded struct // of the `from` struct or slice of structs. // Expects `to` and `from` to be a pair of pre-allocated **struct or *[]*struct. // Returns false if types do not match. func SetFromEmbeddedStruct(to interface{}, from interface{}) bool { toPtrVal := reflect.ValueOf(to) fromPtrVal := reflect.ValueOf(from) if toPtrVal.Kind() != reflect.Ptr || fromPtrVal.Kind() != reflect.Ptr { return false } toStructTyp, ok := singularStructType(to) if !ok { return false } fromStructTyp, ok := singularStructType(from) if !ok { return false } fieldNum, ok := embeddedStructFieldNum(fromStructTyp, toStructTyp) if !ok { return false } toVal := toPtrVal.Elem() if toVal.Kind() == reflect.Interface { toVal = reflect.ValueOf(toVal.Interface()) } fromVal := fromPtrVal.Elem() if fromVal.Kind() == reflect.Interface { fromVal = reflect.ValueOf(fromVal.Interface()) } if toVal.Kind() == reflect.Ptr && toVal.Elem().Kind() == reflect.Struct && fromVal.Kind() == reflect.Ptr && fromVal.Elem().Kind() == reflect.Struct { toVal.Set(fromVal.Elem().Field(fieldNum).Addr()) return true } toKind := toPtrVal.Type().Elem().Kind() fromKind := fromPtrVal.Type().Elem().Kind() if toKind == reflect.Slice && fromKind == reflect.Slice { toSlice := reflect.MakeSlice(toVal.Type(), fromVal.Len(), fromVal.Len()) for i := 0; i < fromVal.Len(); i++ { toSlice.Index(i).Set(fromVal.Index(i).Elem().Field(fieldNum).Addr()) } toVal.Set(toSlice) return true } return false } // singularStructType returns singular struct type // from **struct or *[]*struct types. // Used for Load* methods during binding. func singularStructType(obj interface{}) (reflect.Type, bool) { val := reflect.Indirect(reflect.ValueOf(obj)) if val.Kind() == reflect.Interface { val = reflect.ValueOf(val.Interface()) } typ := val.Type() inSlice := false SWITCH: switch typ.Kind() { case reflect.Ptr: typ = typ.Elem() goto SWITCH case reflect.Slice: if inSlice { // Slices inside other slices are not supported return nil, false } inSlice = true typ = typ.Elem() goto SWITCH case reflect.Struct: return typ, true default: return nil, false } } // embeddedStructFieldNum returns the index of embedded struct field of type `emb` inside `obj` struct. func embeddedStructFieldNum(obj reflect.Type, emb reflect.Type) (int, bool) { for i := 0; i < obj.NumField(); i++ { v := obj.Field(i) if v.Type.Kind() == reflect.Struct && v.Anonymous && v.Type == emb { return i, true } } return 0, false }
// Copyright 2018-2019 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package k8sTest import ( "context" "fmt" "time" . "github.com/cilium/cilium/test/ginkgo-ext" "github.com/cilium/cilium/test/helpers" . "github.com/onsi/gomega" ) // This tests the Istio 1.4.3 integration, following the configuration // instructions specified in the Istio Getting Started Guide in // Documentation/gettingstarted/istio.rst. // Changes to the Getting Started Guide may require re-generating or copying // the following manifests: // - istio-crds.yaml // - istio-cilium.yaml // - bookinfo-v1-istio.yaml // - bookinfo-v2-istio.yaml // Cf. the comments below for each manifest. var _ = Describe("K8sIstioTest", func() { var ( // istioSystemNamespace is the default namespace into which Istio is // installed. istioSystemNamespace = "istio-system" // istioCRDYAMLPath is the file generated from istio-init during a // step in Documentation/gettingstarted/istio.rst to setup // Istio 1.4.3. In the GSG the file is directly piped to kubectl. istioCRDYAMLPath = "" // istioYAMLPath is the istio-cilium.yaml file generated following the // instructions in Documentation/gettingstarted/istio.rst to setup // Istio 1.4.3. mTLS is enabled. istioYAMLPath = "" // istioServiceNames is the subset of Istio services in the Istio // namespace that are accessed from sidecar proxies. istioServiceNames = []string{ // All the services created by Istio are listed here, but only // those that we care about are uncommented. // "istio-citadel", // "istio-galley", "istio-ingressgateway", "istio-pilot", // "istio-policy", // "istio-telemetry", // "prometheus", } // wgetCommand is the command used in this test because the Istio apps // do not provide curl. wgetCommand = fmt.Sprintf("wget --tries=2 --connect-timeout %d", helpers.CurlConnectTimeout) kubectl *helpers.Kubectl uptimeCancel context.CancelFunc teardownTimeout = 10 * time.Minute ciliumFilename string ) BeforeAll(func() { k8sVersion := helpers.GetCurrentK8SEnv() switch k8sVersion { case "1.7", "1.8", "1.9", "1.10", "1.11", "1.12", "1.13": Skip(fmt.Sprintf("Istio 1.4.3 doesn't support K8S %s", k8sVersion)) } kubectl = helpers.CreateKubectl(helpers.K8s1VMName(), logger) istioCRDYAMLPath = helpers.ManifestGet(kubectl.BasePath(), "istio-crds.yaml") istioYAMLPath = helpers.ManifestGet(kubectl.BasePath(), "istio-cilium.yaml") ciliumFilename = helpers.TimestampFilename("cilium.yaml") DeployCiliumAndDNS(kubectl, ciliumFilename) By("Creating the istio-system namespace") res := kubectl.NamespaceCreate(istioSystemNamespace) res.ExpectSuccess("unable to create namespace %q", istioSystemNamespace) By("Creating the Istio CRDs") res = kubectl.ApplyDefault(istioCRDYAMLPath) res.ExpectSuccess("unable to create Istio CRDs") By("Waiting for Istio CRDs to be ready") err := kubectl.WaitForCRDCount("istio.io|certmanager.k8s.io", 23, helpers.HelperTimeout) Expect(err).To(BeNil(), "Istio CRDs are not ready after timeout") By("Creating the Istio system PODs") res = kubectl.ApplyDefault(istioYAMLPath) res.ExpectSuccess("unable to create Istio resources") }) AfterAll(func() { By("Deleting the Istio resources") _ = kubectl.Delete(istioYAMLPath) By("Deleting the Istio CRDs") _ = kubectl.Delete(istioCRDYAMLPath) By("Waiting all terminating PODs to disappear") err := kubectl.WaitCleanAllTerminatingPods(teardownTimeout) ExpectWithOffset(1, err).To(BeNil(), "terminating Istio PODs are not deleted after timeout") By("Deleting the istio-system namespace") _ = kubectl.NamespaceDelete(istioSystemNamespace) kubectl.DeleteCiliumDS() kubectl.CloseSSHClient() }) JustBeforeEach(func() { var err error uptimeCancel, err = kubectl.BackgroundReport("uptime") Expect(err).To(BeNil(), "Cannot start background report process") }) JustAfterEach(func() { uptimeCancel() kubectl.ValidateNoErrorsInLogs(CurrentGinkgoTestDescription().Duration) }) AfterFailed(func() { kubectl.CiliumReport(helpers.CiliumNamespace, "cilium endpoint list", "cilium bpf proxy list") }) // This is defined as a separate function to be called from the test below // so that we properly capture test artifacts if any of the assertions fail // (see https://github.com/cilium/cilium/pull/8508). waitIstioReady := func() { // Ignore one-time jobs and Prometheus. All other pods in the // namespaces have an "istio" label. By("Waiting for Istio pods to be ready") // First wait for at least one POD to get into running state so that WaitforPods // below does not succeed if there are no PODs with the "istio" label. err := kubectl.WaitforNPodsRunning(istioSystemNamespace, "-l istio", 1, helpers.HelperTimeout) ExpectWithOffset(1, err).To(BeNil(), "No Istio POD is Running after timeout in namespace %q", istioSystemNamespace) // Then wait for all the Istio PODs to get Ready // Note that this succeeds if there are no PODs matching the filter (-l istio -n istio-system). err = kubectl.WaitforPods(istioSystemNamespace, "-l istio", helpers.HelperTimeout) ExpectWithOffset(1, err).To(BeNil(), "Istio pods are not ready after timeout in namespace %q", istioSystemNamespace) for _, name := range istioServiceNames { By("Waiting for Istio service %q to be ready", name) err = kubectl.WaitForServiceEndpoints( istioSystemNamespace, "", name, helpers.HelperTimeout) ExpectWithOffset(1, err).Should(BeNil(), "Service %q is not ready after timeout", name) } for _, name := range istioServiceNames { By("Waiting for DNS to resolve Istio service %q", name) err = kubectl.WaitForKubeDNSEntry(name, istioSystemNamespace) ExpectWithOffset(1, err).To(BeNil(), "DNS entry is not ready after timeout") } } // This is a subset of Services's "Bookinfo Demo" test suite, with the pods // injected with Istio sidecar proxies and Istio mTLS enabled. Context("Istio Bookinfo Demo", func() { var ( resourceYAMLPaths []string policyPaths []string ) AfterEach(func() { for _, resourcePath := range resourceYAMLPaths { By("Deleting resource in file %q", resourcePath) // Explicitly do not check result to avoid having assertions in AfterEach. _ = kubectl.Delete(resourcePath) } for _, policyPath := range policyPaths { By("Deleting policy in file %q", policyPath) // Explicitly do not check result to avoid having assertions in AfterEach. _ = kubectl.Delete(policyPath) } }) // shouldConnect checks that srcPod can connect to dstURI. shouldConnect := func(srcPod, dstURI string) bool { By("Checking that %q can connect to %q", srcPod, dstURI) res := kubectl.ExecPodCmd( helpers.DefaultNamespace, srcPod, fmt.Sprintf("%s %s", wgetCommand, dstURI)) if !res.WasSuccessful() { GinkgoPrint("Unable to connect from %q to %q: %s", srcPod, dstURI, res.OutputPrettyPrint()) return false } return true } // shouldNotConnect checks that srcPod cannot connect to dstURI. shouldNotConnect := func(srcPod, dstURI string) bool { By("Checking that %q cannot connect to %q", srcPod, dstURI) res := kubectl.ExecPodCmd( helpers.DefaultNamespace, srcPod, fmt.Sprintf("%s %s", wgetCommand, dstURI)) if res.WasSuccessful() { GinkgoPrint("Was able to connect from %q to %q, but expected no connection: %s", srcPod, dstURI, res.OutputPrettyPrint()) return false } return true } // formatLabelArgument formats the provided key-value pairs as labels for use in // querying Kubernetes. formatLabelArgument := func(firstKey, firstValue string, nextLabels ...string) string { baseString := fmt.Sprintf("-l %s=%s", firstKey, firstValue) if nextLabels == nil { return baseString } else if len(nextLabels)%2 != 0 { Fail("must provide even number of arguments for label key-value pairings") } else { for i := 0; i < len(nextLabels); i += 2 { baseString = fmt.Sprintf("%s,%s=%s", baseString, nextLabels[i], nextLabels[i+1]) } } return baseString } // formatAPI is a helper function which formats a URI to access. formatAPI := func(service, port, resource string) string { target := fmt.Sprintf( "%s.%s.svc.cluster.local:%s", service, helpers.DefaultNamespace, port) if resource != "" { return fmt.Sprintf("%s/%s", target, resource) } return target } It("Tests bookinfo inter-service connectivity", func() { var err error version := "version" v1 := "v1" productPage := "productpage" reviews := "reviews" ratings := "ratings" details := "details" dnsChecks := []string{productPage, reviews, ratings, details} app := "app" health := "health" ratingsPath := "ratings/0" apiPort := "9080" podNameFilter := "{.items[*].metadata.name}" // Those YAML files are the bookinfo-v1.yaml and bookinfo-v2.yaml // manifests injected with Istio sidecars using those commands: // cd test/k8sT/manifests/ // istioctl kube-inject -f bookinfo-v1.yaml > bookinfo-v1-istio.yaml // istioctl kube-inject -f bookinfo-v2.yaml > bookinfo-v2-istio.yaml bookinfoV1YAML := helpers.ManifestGet(kubectl.BasePath(), "bookinfo-v1-istio.yaml") bookinfoV2YAML := helpers.ManifestGet(kubectl.BasePath(), "bookinfo-v2-istio.yaml") l7PolicyPath := helpers.ManifestGet(kubectl.BasePath(), "cnp-specs.yaml") waitIstioReady() // Create the L7 policy before creating the pods, in order to test // that the sidecar proxy mode doesn't deadlock on endpoint // creation in this case. policyPaths = []string{l7PolicyPath} for _, policyPath := range policyPaths { By("Creating policy in file %q", policyPath) _, err := kubectl.CiliumPolicyAction(helpers.DefaultNamespace, policyPath, helpers.KubectlApply, helpers.HelperTimeout) Expect(err).Should(BeNil(), "Unable to create policy %q", policyPath) } resourceYAMLPaths = []string{bookinfoV2YAML, bookinfoV1YAML} for _, resourcePath := range resourceYAMLPaths { By("Creating resources in file %q", resourcePath) res := kubectl.Create(resourcePath) res.ExpectSuccess("Unable to create resource %q", resourcePath) } // Wait for pods and endpoints to be ready before creating the // next resources to reduce the load on the next pod creations, // in order to reduce the probability of regeneration timeout. By("Waiting for Bookinfo pods to be ready") err = kubectl.WaitforPods(helpers.DefaultNamespace, "-l zgroup=bookinfo", helpers.HelperTimeout) Expect(err).Should(BeNil(), "Pods are not ready after timeout") By("Waiting for Bookinfo endpoints to be ready") err = kubectl.CiliumEndpointWaitReady() Expect(err).Should(BeNil(), "Endpoints are not ready after timeout") for _, service := range []string{details, ratings, reviews, productPage} { By("Waiting for Bookinfo service %q to be ready", service) err = kubectl.WaitForServiceEndpoints( helpers.DefaultNamespace, "", service, helpers.HelperTimeout) Expect(err).Should(BeNil(), "Service %q is not ready after timeout", service) } for _, name := range dnsChecks { By("Waiting for DNS to resolve Bookinfo service %q", name) err = kubectl.WaitForKubeDNSEntry(name, helpers.DefaultNamespace) Expect(err).To(BeNil(), "DNS entry is not ready after timeout") } By("Testing L7 filtering") reviewsPodV1, err := kubectl.GetPods(helpers.DefaultNamespace, formatLabelArgument(app, reviews, version, v1)).Filter(podNameFilter) Expect(err).Should(BeNil(), "Cannot get reviewsV1 pods") productpagePodV1, err := kubectl.GetPods(helpers.DefaultNamespace, formatLabelArgument(app, productPage, version, v1)).Filter(podNameFilter) Expect(err).Should(BeNil(), "Cannot get productpageV1 pods") // Connectivity checks often need to be repeated because Pilot // is eventually consistent, i.e. it may take some time for a // sidecar proxy to get updated with the configuration for another // new endpoint and it rejects egress traffic with 503s in the // meantime. err = helpers.WithTimeout(func() bool { allGood := true allGood = shouldConnect(reviewsPodV1.String(), formatAPI(ratings, apiPort, health)) && allGood allGood = shouldNotConnect(reviewsPodV1.String(), formatAPI(ratings, apiPort, ratingsPath)) && allGood allGood = shouldConnect(productpagePodV1.String(), formatAPI(details, apiPort, health)) && allGood allGood = shouldNotConnect(productpagePodV1.String(), formatAPI(ratings, apiPort, health)) && allGood allGood = shouldNotConnect(productpagePodV1.String(), formatAPI(ratings, apiPort, ratingsPath)) && allGood return allGood }, "Istio sidecar proxies are not configured", &helpers.TimeoutConfig{Timeout: helpers.HelperTimeout}) Expect(err).Should(BeNil(), "Cannot configure Istio sidecar proxies") }) }) })
package db import ( "database/sql" "fmt" ) type Database struct { DB *sql.DB } func (db Database) begin() (transaction *sql.Tx) { transaction, err := db.DB.Begin() if err != nil { fmt.Println(err) return nil } return transaction } func (db Database) prepare(query string) (statement *sql.Stmt) { statement, err := db.DB.Prepare(query) if err != nil { fmt.Println(err) return nil } return statement } func (db Database) Exec(query string, arguments ...interface{}) (result sql.Result, err error) { result, err = db.DB.Exec(query, arguments...) return } func (db Database) Query(query string, arguments ...interface{}) (rows *sql.Rows) { rows, err := db.DB.Query(query, arguments...) if err != nil { fmt.Println(err) return nil } return rows } func (db Database) queryRow(query string, arguments ...interface{}) (row *sql.Row) { row = db.DB.QueryRow(query, arguments...) return row } //singleQuery multiple query isolation func (db Database) SingleQuery(sql string, args ...interface{}) error { SQL := db.prepare(sql) tx := db.begin() _, err := tx.Stmt(SQL).Exec(args...) if err != nil { fmt.Println("singleQuery: ", err) tx.Rollback() } else { err = tx.Commit() if err != nil { fmt.Println(err) return err } fmt.Println("singleQuery successful") } return err } //InsertWithReturningID exec statement and return inserted ID func (db Database) InsertWithReturningID(sql string, args ...interface{}) int { var lastID int64 sql = sql + " RETURNING id;" row := db.queryRow(sql, args...) row.Scan(&lastID) id := int(lastID) fmt.Printf("insertWithReturningID: %d\n", id) return id } //singleQueryWithAffected multiple query isolation returns affected rows func (db Database) SingleQueryWithAffected(sql string, args ...interface{}) (int, error) { SQL := db.prepare(sql) tx := db.begin() result, err := tx.Stmt(SQL).Exec(args...) affectedCount, err := result.RowsAffected() id := int(affectedCount) if err != nil { fmt.Println("singleQuery: ", err) tx.Rollback() } else { err = tx.Commit() if err != nil { fmt.Println(err) return 0, err } fmt.Println("singleQuery successful") } return id, err } //Close func closes DB connection func (db Database) Close() { db.DB.Close() }
package maps import ( "fmt" "testing" ) type Vertex struct { Lat, Long float64 } func TestMaps(t *testing.T) { var m map[string]Vertex var stranger map[string]int m = make(map[string]Vertex) m["Bell Labs"] = Vertex{ 40.68433, -74.39967, } stranger = make(map[string]int) stranger["Hello World"] = 8 fmt.Println("{} {}", m, stranger) }
// 在前面的例子中,我们用互斥锁进行了明确的锁定来让共享的 // state 跨多个 Go 协程同步访问。另一个选择是使用内置的 Go // 协程和通道的的同步特性来达到同样的效果。这个基于通道的方 // 法和 Go 通过通信以及每个 Go 协程间通过通讯来共享内存,确 // 保每块数据有单独的 Go 协程所有的思路是一致的。 package main import ( "fmt" "math/rand" "sync/atomic" "time" ) // 在这个例子中,state 将被一个单独的 Go 协程拥有。 // 这就能够保证数据在并行读取时不会混乱。 // 为了对 state 进行读取或者写入, // 其他的 Go 协程将发送一条数据到拥有的 Go协程中,然后接收对应的回复。 // 结构体 `readOp` 和 `writeOp`封装这些请求,并且是拥有 Go 协程响应的一个方式。 type readOp struct { key int resp chan int } type writeOp struct { key int val int resp chan bool } func main() { // 和前面一样,我们将计算我们执行操作的次数。 var readOps uint64 = 0 var writeOps uint64 = 0 // `reads` 和 `writes` 通道分别将被其他 Go 协程用来发布读和写请求。 reads := make(chan *readOp) writes := make(chan *writeOp) // 这个就是拥有 `state` 的那个 Go 协程,和前面例子中的 // map一样,不过这里是被这个状态协程私有的。这个 Go 协程 // 反复响应到达的请求。先响应到达的请求,然后返回一个值到 // 响应通道 `resp` 来表示操作成功(或者是 `reads` 中请求的值) go func() { var state = make(map[int]int) for { select { case read := <-reads: read.resp <- state[read.key] case write := <-writes: state[write.key] = write.val write.resp <- true } } }() // 启动 100 个 Go 协程通过 `reads` 通道发起对 state 所有者 // Go 协程的读取请求。每个读取请求需要构造一个 `readOp`, // 发送它到 `reads` 通道中,并通过给定的 `resp` 通道接收 // 结果。 for r := 0; r < 100; r++ { go func() { for { read := &readOp{ key: rand.Intn(5), resp: make(chan int)} reads <- read <-read.resp atomic.AddUint64(&readOps, 1) time.Sleep(time.Millisecond) } }() } // 用相同的方法启动 10 个写操作。 for w := 0; w < 10; w++ { go func() { for { write := &writeOp{ key: rand.Intn(5), val: rand.Intn(100), resp: make(chan bool)} writes <- write <-write.resp atomic.AddUint64(&writeOps, 1) time.Sleep(time.Millisecond) } }() } // 让 Go 协程们跑 1s。 time.Sleep(time.Second) // 最后,获取并报告 `ops` 值。 readOpsFinal := atomic.LoadUint64(&readOps) fmt.Println("readOps:", readOpsFinal) writeOpsFinal := atomic.LoadUint64(&writeOps) fmt.Println("writeOps:", writeOpsFinal) //在这个特殊的例子中,基于 Go 协程的比基于互斥锁的稍复杂。 这在某些例子中会有用, // 例如,在你有其他通道包含其中或者当你 管理多个这样的互斥锁容易出错的时候。 // 你应该使用最自然 的方法,特别是关于程序正确性的时候。 }
package main import ( "bufio" "crypto/rand" "encoding/base64" "encoding/json" "fmt" "io" "io/ioutil" "log" "net/http" "os" "strings" "time" ) const ( ACCESS_TOKEN = "SECRET" DEV_SITE = "https://kapi.kakao.com" STORIES_PATH = "/v1/api/story/mystories" ) var ( err error client *http.Client workdir string randbuf []byte outputWriter *bufio.Writer ) type Story struct { Id string `json:"id"` Content string `json:"content"` MediaType string `json:"media_type"` CreatedAt string `json:"created_at"` Media []struct { Xlarge string `json:"xlarge"` Large string `json:"large"` Medium string `json:"medium"` Small string `json:"small"` Original string `json:"original"` } `json:"media,omitempty"` } func init() { client = &http.Client{} randbuf = make([]byte, 20) workdir = "backup_" + time.Now().Format("20060102_150405") err = os.Mkdir(workdir, os.ModeDir) if err != nil { panic(err) } outputFilename := workdir + "/output.txt" outf, err := os.Create(outputFilename) if err != nil { fmt.Errorf("error opening output file: %v, %v", outputFilename, err) } outputWriter = bufio.NewWriter(outf) } func nextImageName(url string) string { rand.Read(randbuf) r := base64.URLEncoding.EncodeToString(randbuf) i0 := strings.LastIndex(url, "/") + 1 i1 := strings.Index(url, "?") return fmt.Sprintf("%s_%s", r, url[i0:i1]) } func fetch(lastId string) []byte { url := DEV_SITE + STORIES_PATH if len(lastId) > 0 { url += "?last_id=" + lastId } req, err := http.NewRequest("GET", url, nil) if err != nil { panic(err) } req.Header.Add("Authorization", "Bearer "+ACCESS_TOKEN) resp, err := client.Do(req) defer resp.Body.Close() storyStr, err := ioutil.ReadAll(resp.Body) if err != nil { panic(err) } return storyStr } func unmarshal(raw []byte) *[]Story { var stories []Story err = json.Unmarshal(raw, &stories) if err != nil { panic(err) } return &stories } func persist(story Story) { log.Println("persisting story...", story) fmt.Fprintf(outputWriter, "id: %v\n", story.Id) fmt.Fprintf(outputWriter, "created_at: %v\n", story.CreatedAt) fmt.Fprintf(outputWriter, "content: %v\n", story.Content) if len(story.Media) > 0 { fmt.Fprintf(outputWriter, "media\n") for _, m := range story.Media { log.Println("downloading image:", m.Original) out, err := os.Create(workdir + "/" + nextImageName(m.Original)) if err != nil { panic(err) } defer out.Close() resp, err := http.Get(m.Original) defer resp.Body.Close() _, err = io.Copy(out, resp.Body) if err != nil { panic(err) } fmt.Fprintf(outputWriter, "- %v\n", out.Name()) } } fmt.Fprintf(outputWriter, "\n") } func main() { lastId := "" for { storyJson := fetch(lastId) stories := unmarshal(storyJson) if len(*stories) == 0 { break } for _, story := range *stories { persist(story) lastId = story.Id } } outputWriter.Flush() }
package gettingstarted import "fmt" // create own type type jamesBond int func HandsOnEx() { fmt.Println("[*] Welcome From Getting Started ") printEx() fundInt() fundConstants() fundControls() fundArrays() funcDefer() funcPanicRecover() } func funcPanicRecover() { fmt.Println("[-] Panic & Recover") defer func() { // recover() return value passed to panic call str := recover() fmt.Println("recover() ", str) }() panic("PANIC") } func funcDefer() { fmt.Println("[-] Defer Function Usage ") fmt.Println("Actually second() func called first place.") defer second() first() } func first() { fmt.Println("1st Sample Method") } func second() { fmt.Println("2nd Sample Method") } func fundArrays() { fmt.Println("[-] Arrays, Slices and Maps ") var x [5]float64 x[4] = 100 x[0] = 90 x[1] = 80 x[3] = 70 x[2] = 50 fmt.Println(x) var total float64 = 0 /* A single _ (underscore) is used to tell the compiler that we don't need this. */ for _, value := range x { total += value } fmt.Println("Average : ", total/float64(len(x))) fmt.Println("*[-] Slice") slice1 := []int{1, 2, 3} slice2 := append(slice1, 4, 5) slice3 := make([]int, 3) slice4 := make([]int, 3, 9) // len(slice4)=0, cap(slice4)=5 copy(slice3, slice2) fmt.Println(slice1, slice2, slice3, slice4) fmt.Println("*[-] Maps") /** Map is unordered collection of key-value pairs. - also known as associative array, hash table or a dictionary */ // xMap is a map of "string"s to "int"s. // xMap must be initialized before used. if not, runtime error: assignment to entry in nil map xMap := make(map[string]int) xMap["key-1"] = 10 xMap["key-2"] = 20 fmt.Println(xMap) elements := make(map[string]string) elements["H"] = "Hydrogen" elements["He"] = "Helium" elements["Li"] = "Lithium" elements["Be"] = "Beryllium" elements["B"] = "Boron" elements["C"] = "Carbon" elements["N"] = "Nitrogen" elements["O"] = "Oxygen" elements["F"] = "Fluorine" elements["Ne"] = "Neon" fmt.Println(elements) fmt.Println("delete Nitrogen") delete(elements, "N") fmt.Println(elements) /** Technically a map returns the zero value for the value type (which for strings is the empty string). */ fmt.Println("element with key 'UNKNOWN' : ", elements["UNKNOWN"]) // Better way to check if name, ok := elements["UNKNOWN"]; ok { fmt.Println(name, ok) } else { fmt.Println("Sorry there is no key with 'UNKNOWN' ") } moreMapEx() } func moreMapEx() { elements := map[string]map[string]string{ "H": map[string]string{ "name": "Hydrogen", "state": "gas", }, "He": map[string]string{ "name": "Helium", "state": "gas", }, "Li": map[string]string{ "name": "Lithium", "state": "solid", }, "Be": map[string]string{ "name": "Beryllium", "state": "solid", }, "B": map[string]string{ "name": "Boron", "state": "solid", }, "C": map[string]string{ "name": "Carbon", "state": "solid", }, "N": map[string]string{ "name": "Nitrogen", "state": "gas", }, "O": map[string]string{ "name": "Oxygen", "state": "gas", }, "F": map[string]string{ "name": "Fluorine", "state": "gas", }, "Ne": map[string]string{ "name": "Neon", "state": "gas", }, } if el, ok := elements["Li"]; ok { fmt.Println(el["name"], el["state"]) } } func fundControls() { fmt.Println("[-] ControlsStructures ") i := 1 fmt.Println("Print 1 to 10 Using For Loop") fmt.Println("Print even / odd Using If / else") var flag string for i <= 10 { if i%2 == 0 { flag = "even" } else { flag = "odd" } fmt.Print(i, flag, "\t") i += 1 } fmt.Println() } /** https://golang.org/pkg/fmt/ */ func printEx() { fmt.Println("[-] fmt print") var x jamesBond x = 7 s := fmt.Sprintf("00%v\t%T", x, x) fmt.Println("hello world\n", s) s = "Hello, 世界" // the uninterpreted bytes of the string or slice fmt.Printf("%s\n", s) // a double-quoted string safely escaped with Go syntax fmt.Printf("%q\n", s) // Unicode format for i, v := range s { fmt.Printf("%#U \t %d", v, i) } } /* Integer types : - uint8, uint16, uint32, uint64 - int8, int16, int32, int64 Float types : - float32, float64 Generally we should stick with float64 when working with the floating point numbers. */ func fundInt() { fmt.Println("[-] Fundamental Integer, Float, String") fmt.Println("1 + 1 =", 1+1) // [.0] is a floating point number fmt.Println("1 + 1 =", 1.0+1.0) // String /* indexed is starting at 0 . 2nd character will be printed out however, you will see '101' instead of 'e' */ fmt.Println("Hello World"[1]) } /* Constants - a simple, unchanging value - only exist at compile time - there are TYPED and UNTYPED constants - UNTYPED - do not have a fixed type - can be implicitly converted by the compiler */ func fundConstants() { fmt.Println("[-] TYPED / UNTYPED constants") // UNTYPED const x = 40 // TYPED const typedY float64 = 43.2 type hotDog int type hotCat float64 fmt.Println(x) fmt.Println(typedY) fmt.Printf("%T\n", x) fmt.Printf("%T\n", typedY) fmt.Printf("%T\n", hotDog(x)) // hotDog(typeY) will be error, so let me comment out // Cannot convert an expression of the type 'float64' to the type 'hotDog' //fmt.Printf("%T\n", hotDog(typedY)) // UNTYPED constant easier to use // no need to conversion to use fmt.Printf("%T\n", hotCat(x)) fmt.Printf("%T\n", hotCat(typedY)) }
/* * thresh: histogram thresholding * * input: * matrix: the integer matrix to be thresholded * nrows, ncols: the number of rows and columns * percent: the percentage of cells to retain * * output: * mask: a boolean matrix filled with true for cells that are kept * */ package main import ( "flag" "fmt" ) var is_bench = flag.Bool("is_bench", false, "") type ByteMatrix struct { Rows, Cols uint32 array []byte } func NewByteMatrix(r, c uint32) *ByteMatrix { return &ByteMatrix{r, c, make([]byte, r*c)} } func WrapBytes(r, c uint32, bytes []byte) *ByteMatrix { return &ByteMatrix{r, c, bytes} } func (m *ByteMatrix) Row(i uint32) []byte { return m.array[i*m.Cols : (i+1)*m.Cols] } func (m *ByteMatrix) Bytes() []byte { return m.array[0 : m.Rows*m.Cols] } var mask [][]bool func thresh(m *ByteMatrix, nrows, ncols, percent uint32) { var hist [100]int for _, v := range m.Bytes() { hist[v]++ } count := (nrows * ncols * percent) / 100 prefixsum := 0 threshold := 99 for ; threshold > 0; threshold-- { prefixsum += hist[threshold] if prefixsum > int(count) { break } } for i := uint32(0); i < nrows; i++ { row := m.Row(i) for j := range row { mask[i][j] = row[j] >= byte(threshold) } } } func main() { var nrows, ncols, percent uint32 flag.Parse() fmt.Scanf("%d%d", &nrows, &ncols) mask = make ([][]bool, nrows) for i := range mask { mask [i] = make ([]bool, ncols) } m := WrapBytes(nrows, ncols, make([]byte, nrows*ncols)) if !*is_bench { for i := uint32(0); i < nrows; i++ { row := m.Row(i) for j := range row { fmt.Scanf("%d", &row[j]) } fmt.Scanf(" \n") } } fmt.Scanf("\n%d", &percent) thresh(m, nrows, ncols, percent) /* //input to output for debugging fmt.Printf("%d %d\n", nrows, ncols) for i := uint32(0); i < nrows; i++ { row := m.Row(i) for j := uint32(0); j < ncols; j++ { fmt.Printf("%d ",row[j]) } fmt.Printf("\n") } fmt.Printf("%d\n", percent) */ if !*is_bench { fmt.Printf("%d %d\n", nrows, ncols) for i := uint32(0); i < nrows; i++ { for j := uint32(0); j < ncols; j++ { if mask[i][j] { fmt.Printf("1 ") } else { fmt.Printf("0 ") } } fmt.Printf("\n") } fmt.Printf("\n") } }
package main import ( "strconv" "github.com/tidwall/gjson" "github.com/tidwall/sjson" ) type Property struct { Properties *Properties Name string data interface{} InUse bool OnChange func() } func NewProperty(name string, properties *Properties) *Property { return &Property{ Properties: properties, Name: name, } } func (prop *Property) IsString() bool { _, isOK := prop.data.(string) return isOK } func (prop *Property) AsString() string { if prop.data == nil { prop.data = "" } return prop.data.(string) } func (prop *Property) AsJSON() gjson.Result { if prop.data == nil { prop.data = "{}" } return gjson.Parse(prop.data.(string)) } func (prop *Property) IsBool() bool { _, isOK := prop.data.(bool) return isOK } func (prop *Property) AsBool() bool { if prop.data == nil { prop.data = false } return prop.data.(bool) } func (prop *Property) IsNumber() bool { _, isOK := prop.data.(float64) return isOK } func (prop *Property) AsFloat() float64 { if prop.data == nil { prop.data = 0.0 } return prop.data.(float64) } func (prop *Property) AsMap() map[string]interface{} { if prop.data == nil { prop.data = map[string]interface{}{} } return prop.data.(map[string]interface{}) } func (prop *Property) Set(value interface{}) { if prop.data != value { if intData, isInt := value.(int); isInt { prop.data = float64(intData) } else { prop.data = value } if prop.OnChange != nil { prop.OnChange() } if prop.Properties != nil && prop.Properties.OnChange != nil { prop.Properties.OnChange(prop) } } } func (prop *Property) AsArrayOfInts() []int64 { if !prop.IsString() { prop.data = "{}" } out := []int64{} for _, value := range prop.AsJSON().Array() { out = append(out, value.Int()) } return out } func (prop *Property) SetInts(values ...int64) { jsonStr := "[" for _, v := range values { jsonStr += strconv.Itoa(int(v)) } jsonStr += "]" prop.Set(jsonStr) } // func (prop *Property) SetAfterParse(value interface{}) { // current := prop.AsJSON() // parsed, err := sjson.Set(current.String(), "#0", value) // prop.Set(parsed) // fmt.Println(parsed, err) // } // SetRaw sets the property, but without triggering OnChange func (prop *Property) SetRaw(value interface{}) { prop.data = value } // Contains serializable properties for a Card. type Properties struct { Props map[string]*Property DefinitionOrder []string OnChange func(property *Property) } func NewProperties() *Properties { return &Properties{ Props: map[string]*Property{}, DefinitionOrder: []string{}, } } func (properties *Properties) Has(name string) bool { if _, exists := properties.Props[name]; exists { return true } return false } func (properties *Properties) Get(name string) *Property { if _, exists := properties.Props[name]; !exists { properties.Props[name] = NewProperty(name, properties) properties.DefinitionOrder = append(properties.DefinitionOrder, name) } prop := properties.Props[name] prop.InUse = true return prop } func (properties *Properties) GetIfExists(propertyName string) *Property { if properties.Has(propertyName) { return properties.Get(propertyName) } return nil } func (properties *Properties) Remove(propertyName string) { delete(properties.Props, propertyName) for i, p := range properties.DefinitionOrder { if p == propertyName { properties.DefinitionOrder = append(properties.DefinitionOrder[:i], properties.DefinitionOrder[i+1:]...) break } } } func (properties *Properties) Serialize() string { data := "{}" for _, name := range properties.DefinitionOrder { prop := properties.Props[name] if prop.InUse { data, _ = sjson.Set(data, name, prop.data) } } return data } func (properties *Properties) Deserialize(data string) { // All Properties contained within this object should probably be cleared before parsing...? parsed := gjson.Parse(data) parsed.ForEach(func(key, value gjson.Result) bool { properties.Get(key.String()).SetRaw(value.Value()) return true }) }
package detectcoll import "log" var ( md5_shifts [64]uint = [64]uint{ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, } md5_constants [64]uint32 = [64]uint32{ 0xd76aa478, 0xe8c7b756, 0x242070db, 0xc1bdceee, 0xf57c0faf, 0x4787c62a, 0xa8304613, 0xfd469501, 0x698098d8, 0x8b44f7af, 0xffff5bb1, 0x895cd7be, 0x6b901122, 0xfd987193, 0xa679438e, 0x49b40821, 0xf61e2562, 0xc040b340, 0x265e5a51, 0xe9b6c7aa, 0xd62f105d, 0x02441453, 0xd8a1e681, 0xe7d3fbc8, 0x21e1cde6, 0xc33707d6, 0xf4d50d87, 0x455a14ed, 0xa9e3e905, 0xfcefa3f8, 0x676f02d9, 0x8d2a4c8a, 0xfffa3942, 0x8771f681, 0x6d9d6122, 0xfde5380c, 0xa4beea44, 0x4bdecfa9, 0xf6bb4b60, 0xbebfbc70, 0x289b7ec6, 0xeaa127fa, 0xd4ef3085, 0x04881d05, 0xd9d4d039, 0xe6db99e5, 0x1fa27cf8, 0xc4ac5665, 0xf4292244, 0x432aff97, 0xab9423a7, 0xfc93a039, 0x655b59c3, 0x8f0ccc92, 0xffeff47d, 0x85845dd1, 0x6fa87e4f, 0xfe2ce6e0, 0xa3014314, 0x4e0811a1, 0xf7537e82, 0xbd3af235, 0x2ad7d2bb, 0xeb86d391, } ) type md5_ihv [4]uint32 // An IV/IHV/working state type md5_mb [16]uint32 // A message block (input data converted to u32le) type MD5 struct { message_length uint64 // message length (in bits) ihv md5_ihv // IHV (or IV if no blocks have been processed) buf []byte // Left-over data from a previous Write() collisions bool // How many collisions were detected } type md5_delta struct { round int // Which round do we apply these changes at message_block md5_mb // Change to the message block working_state md5_ihv // Change to the working state negate bool zero bool msb bool } func append_u32le(ret []byte, n uint32) []byte { // Append an integer as 4 bytes in little-endian byte order ret = append(ret, byte(n)) ret = append(ret, byte(n>>8)) ret = append(ret, byte(n>>16)) ret = append(ret, byte(n>>24)) return ret } func NewMD5() *MD5 { // Return a new MD5 collision-detecting hash object return &MD5{ message_length: 0, ihv: [4]uint32{0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476}, } } func (s *MD5) Reset() { // Reset the hash object back to new *s = *(NewMD5()) } func (s *MD5) Size() int { // How many bytes Sum() returns return 16 } func (s *MD5) BlockSize() int { // The blocksize of the hash return 64 } func (s *MD5) DetectSum(ret []byte) ([]byte, bool) { // Append the hash output of all data written so far to ret and // return that. This doesn't modify the state of the hash object. t := *s // Copy s var padding []byte if len(t.buf) < 56 { padding = make([]byte, 64) } else { padding = make([]byte, 128) } copy(padding, t.buf) padding[len(t.buf)] = 0x80 padding[len(padding)-1] = byte(t.message_length >> 56) padding[len(padding)-2] = byte(t.message_length >> 48) padding[len(padding)-3] = byte(t.message_length >> 40) padding[len(padding)-4] = byte(t.message_length >> 32) padding[len(padding)-5] = byte(t.message_length >> 24) padding[len(padding)-6] = byte(t.message_length >> 16) padding[len(padding)-7] = byte(t.message_length >> 8) padding[len(padding)-8] = byte(t.message_length) for i := 0; i < len(padding); i += 64 { mb := create_md5_mb(padding[i : i+64]) t.process_mb(mb) } for i := 0; i < 4; i++ { ret = append_u32le(ret, t.ihv[i]) } return ret, !t.collisions } func (s *MD5) Sum(ret []byte) []byte { ret, ok := s.DetectSum(ret) if !ok { log.Printf("Detected collisions in hash %x", ret) } return ret } func (s *MD5) Write(b []byte) (n int, err error) { // MD5_Update() but in Go ;) s.message_length += uint64(len(b)) * 8 s.buf = append(s.buf, b...) for len(s.buf) >= 64 { mb := create_md5_mb(s.buf[0:64]) s.process_mb(mb) s.buf = s.buf[64:] } return len(b), nil } func create_md5_mb(data []byte) *md5_mb { // Take 64 bytes worth of data, and convert into 32-bit big-endian integers var mb md5_mb if len(data) != 64 { panic("Can only create message blocks from 64-byte data chunks") } for i := uint(0); i < 64; i++ { var shift uint = (i % 4) * 8 mb[i/4] |= (uint32(data[i]) << shift) } return &mb } func (s *MD5) process_mb(message_block *md5_mb) { var i int working_states := make([]md5_ihv, 64) a := s.ihv[0] b := s.ihv[1] c := s.ihv[2] d := s.ihv[3] for ; i < 16; i++ { f := (b & c) | ((^b) & d) m := message_block[i] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d working_states[i] = md5_ihv{a, b, c, d} } for ; i < 32; i++ { f := (d & b) | ((^d) & c) m := message_block[((5*i)+1)%16] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d working_states[i] = md5_ihv{a, b, c, d} } for ; i < 48; i++ { f := b ^ c ^ d m := message_block[((3*i)+5)%16] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d working_states[i] = md5_ihv{a, b, c, d} } for ; i < 64; i++ { f := c ^ (b | (^d)) m := message_block[(7*i)%16] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d working_states[i] = md5_ihv{a, b, c, d} } prev_ihv := s.ihv s.ihv[0] += a s.ihv[1] += b s.ihv[2] += c s.ihv[3] += d s.detect_collisions(message_block, working_states, prev_ihv) } func (s *MD5) detect_collisions(orig_message_block *md5_mb, working_states []md5_ihv, prev_ihv md5_ihv) { compare_ihv := func(ihv1, ihv2 md5_ihv) bool { result := (ihv1[0] ^ ihv2[0]) | (ihv1[1] ^ ihv2[1]) | (ihv1[2] ^ ihv2[2]) | (ihv1[3] ^ ihv2[3]) return result == 0 } compare_ihv_swapped_msb := func(ihv1, ihv2 md5_ihv) bool { result := (ihv1[0] ^ ihv2[0] ^ (1 << 31)) | (ihv1[1] ^ ihv2[1] ^ (1 << 31)) | (ihv1[2] ^ ihv2[2] ^ (1 << 31)) | (ihv1[3] ^ ihv2[3] ^ (1 << 31)) return result == 0 } for _, delta := range MD5_DELTA { message_block := *orig_message_block for i := 0; i < 16; i++ { message_block[i] += delta.message_block[i] } ws := working_states[delta.round] ws_msb := working_states[delta.round].add_msb() if delta.zero { ihv := reapply_md5(delta.round, &message_block, &ws) if compare_ihv(ihv, s.ihv) { s.collisions = true } } if delta.msb { ihv := reapply_md5(delta.round, &message_block, &ws_msb) if compare_ihv(ihv, s.ihv) { s.collisions = true } } if delta.negate { message_block = *orig_message_block for i := 0; i < 16; i++ { message_block[i] -= delta.message_block[i] } if delta.zero { ihv := reapply_md5(delta.round, &message_block, &ws) if compare_ihv(ihv, s.ihv) { s.collisions = true } } if delta.msb { ihv := reapply_md5(delta.round, &message_block, &ws_msb) if compare_ihv(ihv, s.ihv) { s.collisions = true } } } } // check for special den Boer & Bosselaers attack (zero difference block, differential path entirely MSB differences) ws := working_states[44].add_msb() ihv := reapply_md5(44, orig_message_block, &ws) // Swap WS MSB at round 44, and reapply if compare_ihv(ihv, s.ihv) { // If this made no difference to the result if compare_ihv_swapped_msb(ihv, prev_ihv) { // and only flipped the msb from the previous log.Print("Detected possible den Boar & Bosselaers attack") // FIXME: Check previous block for collision attack } else { s.collisions = true } } } func (x md5_ihv) add_msb() md5_ihv { x[0] += 1 << 31 x[1] += 1 << 31 x[2] += 1 << 31 x[3] += 1 << 31 return x } func reapply_md5(round int, message_block *md5_mb, working_state *md5_ihv) md5_ihv { x := unprocess_md5_block(round, message_block, working_state) y := process_md5_block(round+1, message_block, working_state) return md5_ihv{x[0] + y[0], x[1] + y[1], x[2] + y[2], x[3] + y[3]} } func unprocess_md5_block(start_round int, message_block *md5_mb, working_state *md5_ihv) md5_ihv { i := start_round a := working_state[0] b := working_state[1] c := working_state[2] d := working_state[3] for ; i >= 48; i-- { a, b, c, d = b, c, d, a f := c ^ (b | (^d)) m := message_block[(7*i)%16] a -= b a = rotl32(a, 32-md5_shifts[i]) a -= f + m + md5_constants[i] } for ; i >= 32; i-- { a, b, c, d = b, c, d, a f := b ^ c ^ d m := message_block[((3*i)+5)%16] a -= b a = rotl32(a, 32-md5_shifts[i]) a -= f + m + md5_constants[i] } for ; i >= 16; i-- { a, b, c, d = b, c, d, a f := (d & b) | ((^d) & c) m := message_block[((5*i)+1)%16] a -= b a = rotl32(a, 32-md5_shifts[i]) a -= f + m + md5_constants[i] } for ; i >= 0; i-- { a, b, c, d = b, c, d, a f := (b & c) | ((^b) & d) m := message_block[i] a -= b a = rotl32(a, 32-md5_shifts[i]) a -= f + m + md5_constants[i] } return md5_ihv{a, b, c, d} } func process_md5_block(start_round int, message_block *md5_mb, working_state *md5_ihv) md5_ihv { i := start_round a := working_state[0] b := working_state[1] c := working_state[2] d := working_state[3] for ; i < 16; i++ { f := (b & c) | ((^b) & d) m := message_block[i] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d } for ; i < 32; i++ { f := (d & b) | ((^d) & c) m := message_block[((5*i)+1)%16] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d } for ; i < 48; i++ { f := b ^ c ^ d m := message_block[((3*i)+5)%16] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d } for ; i < 64; i++ { f := c ^ (b | (^d)) m := message_block[(7*i)%16] b, c, d, a = b+rotl32((a+f+md5_constants[i]+m), md5_shifts[i]), b, c, d } return md5_ihv{a, b, c, d} }
// Copyright © 2020, 2021 Attestant Limited. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dirk import ( "context" "crypto/tls" "crypto/x509" "fmt" "regexp" "strconv" "strings" "sync" "time" eth2client "github.com/attestantio/go-eth2-client" api "github.com/attestantio/go-eth2-client/api/v1" "github.com/attestantio/go-eth2-client/spec/phase0" "github.com/attestantio/vouch/services/chaintime" "github.com/attestantio/vouch/services/metrics" "github.com/attestantio/vouch/services/validatorsmanager" "github.com/pkg/errors" "github.com/rs/zerolog" zerologger "github.com/rs/zerolog/log" "github.com/wealdtech/go-bytesutil" dirk "github.com/wealdtech/go-eth2-wallet-dirk" e2wtypes "github.com/wealdtech/go-eth2-wallet-types/v2" "golang.org/x/sync/semaphore" "google.golang.org/grpc/credentials" ) // Service is the manager for dirk accounts. type Service struct { mutex sync.RWMutex monitor metrics.AccountManagerMonitor clientMonitor metrics.ClientMonitor processConcurrency int64 endpoints []*dirk.Endpoint accountPaths []string credentials credentials.TransportCredentials accounts map[phase0.BLSPubKey]e2wtypes.Account validatorsManager validatorsmanager.Service domainProvider eth2client.DomainProvider farFutureEpoch phase0.Epoch currentEpochProvider chaintime.Service wallets map[string]e2wtypes.Wallet walletsMutex sync.RWMutex } // module-wide log. var log zerolog.Logger // New creates a new dirk account manager. func New(ctx context.Context, params ...Parameter) (*Service, error) { parameters, err := parseAndCheckParameters(params...) if err != nil { return nil, errors.Wrap(err, "problem with parameters") } // Set logging. log = zerologger.With().Str("service", "accountmanager").Str("impl", "dirk").Logger() if parameters.logLevel != log.GetLevel() { log = log.Level(parameters.logLevel) } credentials, err := credentialsFromCerts(ctx, parameters.clientCert, parameters.clientKey, parameters.caCert) if err != nil { return nil, errors.Wrap(err, "failed to build credentials") } endpoints := make([]*dirk.Endpoint, 0, len(parameters.endpoints)) for _, endpoint := range parameters.endpoints { endpointParts := strings.Split(endpoint, ":") if len(endpointParts) != 2 { log.Warn().Str("endpoint", endpoint).Msg("Malformed endpoint") continue } port, err := strconv.ParseUint(endpointParts[1], 10, 32) if err != nil { log.Warn().Str("endpoint", endpoint).Err(err).Msg("Malformed port") continue } if port == 0 { log.Warn().Str("endpoint", endpoint).Msg("Invalid port") continue } endpoints = append(endpoints, dirk.NewEndpoint(endpointParts[0], uint32(port))) } if len(endpoints) == 0 { return nil, errors.New("no valid endpoints specified") } farFutureEpoch, err := parameters.farFutureEpochProvider.FarFutureEpoch(ctx) if err != nil { return nil, errors.Wrap(err, "failed to obtain far future epoch") } s := &Service{ monitor: parameters.monitor, clientMonitor: parameters.clientMonitor, processConcurrency: parameters.processConcurrency, endpoints: endpoints, accountPaths: parameters.accountPaths, credentials: credentials, domainProvider: parameters.domainProvider, validatorsManager: parameters.validatorsManager, farFutureEpoch: farFutureEpoch, currentEpochProvider: parameters.currentEpochProvider, wallets: make(map[string]e2wtypes.Wallet), } log.Trace().Int64("process_concurrency", s.processConcurrency).Msg("Set process concurrency") if err := s.refreshAccounts(ctx); err != nil { return nil, errors.Wrap(err, "failed to fetch initial accounts") } if err := s.refreshValidators(ctx); err != nil { return nil, errors.Wrap(err, "failed to fetch initial validator states") } return s, nil } // Refresh refreshes the accounts from Dirk, and account validator state from // the validators provider. // This is a relatively expensive operation, so should not be run in the validating path. func (s *Service) Refresh(ctx context.Context) { if err := s.refreshAccounts(ctx); err != nil { log.Error().Err(err).Msg("Failed to refresh accounts") } if err := s.refreshValidators(ctx); err != nil { log.Error().Err(err).Msg("Failed to refresh validators") } } // refreshAccounts refreshes the accounts from Dirk. func (s *Service) refreshAccounts(ctx context.Context) error { // Create the relevant wallets. wallets := make([]e2wtypes.Wallet, 0, len(s.accountPaths)) pathsByWallet := make(map[string][]string) for _, path := range s.accountPaths { pathBits := strings.Split(path, "/") var paths []string var exists bool if paths, exists = pathsByWallet[pathBits[0]]; !exists { paths = make([]string, 0) } pathsByWallet[pathBits[0]] = append(paths, path) wallet, err := s.openWallet(ctx, pathBits[0]) if err != nil { log.Warn().Err(err).Str("wallet", pathBits[0]).Msg("Failed to open wallet") } else { wallets = append(wallets, wallet) } } verificationRegexes := accountPathsToVerificationRegexes(s.accountPaths) // Fetch accounts for each wallet in parallel. started := time.Now() accounts := make(map[phase0.BLSPubKey]e2wtypes.Account) var accountsMu sync.Mutex sem := semaphore.NewWeighted(s.processConcurrency) var wg sync.WaitGroup for i := range wallets { wg.Add(1) go func(ctx context.Context, sem *semaphore.Weighted, wg *sync.WaitGroup, i int, mu *sync.Mutex) { defer wg.Done() if err := sem.Acquire(ctx, 1); err != nil { log.Error().Err(err).Msg("Failed to acquire semaphore") return } defer sem.Release(1) log := log.With().Str("wallet", wallets[i].Name()).Logger() log.Trace().Dur("elapsed", time.Since(started)).Msg("Obtained semaphore") walletAccounts := s.fetchAccountsForWallet(ctx, wallets[i], verificationRegexes) log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Obtained accounts") accountsMu.Lock() for k, v := range walletAccounts { accounts[k] = v } accountsMu.Unlock() log.Trace().Dur("elapsed", time.Since(started)).Int("accounts", len(walletAccounts)).Msg("Imported accounts") }(ctx, sem, &wg, i, &accountsMu) } wg.Wait() log.Trace().Int("accounts", len(accounts)).Msg("Obtained accounts") if len(accounts) == 0 && len(s.accounts) != 0 { log.Warn().Msg("No accounts obtained; retaining old list") return nil } s.mutex.Lock() s.accounts = accounts s.mutex.Unlock() return nil } // openWallet opens a wallet, using an existing one if present. func (s *Service) openWallet(ctx context.Context, name string) (e2wtypes.Wallet, error) { s.walletsMutex.Lock() defer s.walletsMutex.Unlock() wallet, exists := s.wallets[name] var err error if !exists { wallet, err = dirk.OpenWallet(ctx, name, s.credentials, s.endpoints) if err != nil { return nil, err } s.wallets[name] = wallet } return wallet, nil } // refreshValidators refreshes the validator information for our known accounts. func (s *Service) refreshValidators(ctx context.Context) error { accountPubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts)) for pubKey := range s.accounts { accountPubKeys = append(accountPubKeys, pubKey) } if err := s.validatorsManager.RefreshValidatorsFromBeaconNode(ctx, accountPubKeys); err != nil { return errors.Wrap(err, "failed to refresh validators") } return nil } func credentialsFromCerts(ctx context.Context, clientCert []byte, clientKey []byte, caCert []byte) (credentials.TransportCredentials, error) { clientPair, err := tls.X509KeyPair(clientCert, clientKey) if err != nil { return nil, errors.Wrap(err, "failed to load client keypair") } tlsCfg := &tls.Config{ Certificates: []tls.Certificate{clientPair}, MinVersion: tls.VersionTLS13, } if caCert != nil { cp := x509.NewCertPool() if !cp.AppendCertsFromPEM(caCert) { return nil, errors.New("failed to add CA certificate") } tlsCfg.RootCAs = cp } return credentials.NewTLS(tlsCfg), nil } // ValidatingAccountsForEpoch obtains the validating accounts for a given epoch. func (s *Service) ValidatingAccountsForEpoch(ctx context.Context, epoch phase0.Epoch) (map[phase0.ValidatorIndex]e2wtypes.Account, error) { // stateCount is used to update metrics. stateCount := map[api.ValidatorState]uint64{ api.ValidatorStateUnknown: 0, api.ValidatorStatePendingInitialized: 0, api.ValidatorStatePendingQueued: 0, api.ValidatorStateActiveOngoing: 0, api.ValidatorStateActiveExiting: 0, api.ValidatorStateActiveSlashed: 0, api.ValidatorStateExitedUnslashed: 0, api.ValidatorStateExitedSlashed: 0, api.ValidatorStateWithdrawalPossible: 0, api.ValidatorStateWithdrawalDone: 0, } validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account) pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts)) for pubKey := range s.accounts { pubKeys = append(pubKeys, pubKey) } validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys) for index, validator := range validators { state := api.ValidatorToState(validator, epoch, s.farFutureEpoch) stateCount[state]++ if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting { account := s.accounts[validator.PublicKey] log.Trace(). Str("name", account.Name()). Str("public_key", fmt.Sprintf("%x", account.PublicKey().Marshal())). Uint64("index", uint64(index)). Str("state", state.String()). Msg("Validating account") validatingAccounts[index] = account } } // Update metrics if this is the current epoch. if epoch == s.currentEpochProvider.CurrentEpoch() { stateCount[api.ValidatorStateUnknown] += uint64(len(s.accounts) - len(validators)) for state, count := range stateCount { s.monitor.Accounts(strings.ToLower(state.String()), count) } } return validatingAccounts, nil } // ValidatingAccountsForEpochByIndex obtains the specified validating accounts for a given epoch. func (s *Service) ValidatingAccountsForEpochByIndex(ctx context.Context, epoch phase0.Epoch, indices []phase0.ValidatorIndex) (map[phase0.ValidatorIndex]e2wtypes.Account, error) { validatingAccounts := make(map[phase0.ValidatorIndex]e2wtypes.Account) pubKeys := make([]phase0.BLSPubKey, 0, len(s.accounts)) for pubKey := range s.accounts { pubKeys = append(pubKeys, pubKey) } indexPresenceMap := make(map[phase0.ValidatorIndex]bool) for _, index := range indices { indexPresenceMap[index] = true } validators := s.validatorsManager.ValidatorsByPubKey(ctx, pubKeys) for index, validator := range validators { if _, present := indexPresenceMap[index]; !present { continue } state := api.ValidatorToState(validator, epoch, s.farFutureEpoch) if state == api.ValidatorStateActiveOngoing || state == api.ValidatorStateActiveExiting { validatingAccounts[index] = s.accounts[validator.PublicKey] } } return validatingAccounts, nil } // accountPathsToVerificationRegexes turns account paths in to regexes to allow verification. func accountPathsToVerificationRegexes(paths []string) []*regexp.Regexp { regexes := make([]*regexp.Regexp, 0, len(paths)) for _, path := range paths { log := log.With().Str("path", path).Logger() parts := strings.Split(path, "/") if len(parts) == 0 || len(parts[0]) == 0 { log.Debug().Msg("Invalid path") continue } if len(parts) == 1 { parts = append(parts, ".*") } if len(parts[1]) == 0 { parts[1] = ".*" } parts[0] = strings.TrimPrefix(parts[0], "^") parts[0] = strings.TrimSuffix(parts[0], "$") parts[1] = strings.TrimPrefix(parts[1], "^") parts[1] = strings.TrimSuffix(parts[1], "$") specifier := fmt.Sprintf("^%s/%s$", parts[0], parts[1]) regex, err := regexp.Compile(specifier) if err != nil { log.Warn().Str("specifier", specifier).Err(err).Msg("Invalid path regex") continue } regexes = append(regexes, regex) } return regexes } func (s *Service) fetchAccountsForWallet(ctx context.Context, wallet e2wtypes.Wallet, verificationRegexes []*regexp.Regexp) map[phase0.BLSPubKey]e2wtypes.Account { res := make(map[phase0.BLSPubKey]e2wtypes.Account) for account := range wallet.Accounts(ctx) { // Ensure the name matches one of our account paths. name := fmt.Sprintf("%s/%s", wallet.Name(), account.Name()) verified := false for _, verificationRegex := range verificationRegexes { if verificationRegex.Match([]byte(name)) { verified = true break } } if !verified { log.Debug().Str("account", name).Msg("Received unwanted account from server; ignoring") continue } var pubKey []byte if provider, isProvider := account.(e2wtypes.AccountCompositePublicKeyProvider); isProvider { pubKey = provider.CompositePublicKey().Marshal() } else { pubKey = account.PublicKey().Marshal() } res[bytesutil.ToBytes48(pubKey)] = account } return res }
package com_errors import "errors" var ( // ZeroMQ errors ErrZMQContext = errors.New("fetcher: Could not create ZeroMQ Context") ErrZMQConnect = errors.New("fetcher: Could not connect to port") ErrZMQRecieve = errors.New("fetcher: Could not receive message from server") ErrZMQSend = errors.New("fetcher: Could not send message to server") // Message Errors ErrUnknownMessage = errors.New("No known message of that name") ErrUnknownNotification = errors.New("Notification not known") )
package routinghelpers import ( "context" "testing" peert "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer/test" routing "gx/ipfs/QmRjT8Bkut84fHf9nxMQBxGsqLAkqzMdFaemDK7e61dBNZ/go-libp2p-routing" ) func TestGetPublicKey(t *testing.T) { d := Parallel{ Routers: []routing.IpfsRouting{ Parallel{ Routers: []routing.IpfsRouting{ &Compose{ ValueStore: &LimitedValueStore{ ValueStore: new(dummyValueStore), Namespaces: []string{"other"}, }, }, }, }, Tiered{ Routers: []routing.IpfsRouting{ &Compose{ ValueStore: &LimitedValueStore{ ValueStore: new(dummyValueStore), Namespaces: []string{"pk"}, }, }, }, }, &Compose{ ValueStore: &LimitedValueStore{ ValueStore: new(dummyValueStore), Namespaces: []string{"other", "pk"}, }, }, &struct{ Compose }{Compose{ValueStore: &LimitedValueStore{ValueStore: Null{}}}}, &struct{ Compose }{}, }, } pid, _ := peert.RandPeerID() ctx := context.Background() if _, err := d.GetPublicKey(ctx, pid); err != routing.ErrNotFound { t.Fatal(err) } }
package main import ( "testing" "strings" "fmt" "reflect" "github.com/stretchr/testify/assert" ) func TestSplit(t *testing.T) { fmt.Println("string array test....") got := strings.Split("a:b:c", ":") want := []string{"a", "b", "c"}; if !reflect.DeepEqual(got, want) { fmt.Println("!reflect.DeepEqual(got, want)", got,want) }else{ fmt.Println("reflect.DeepEqual(got, want)", got,want) } assert.Equal(t, got, want) }
package socket import ( "KServer/library/kiface/isocket" "KServer/library/socket" ) type IClient interface { /* 发送消息 data []byte 返回值 error */ Send(data []byte) error /* 发送buff消息 data []byte 返回值 error */ SendBuff(data []byte) error /* 获取Client ConnId 返回值 uint32 */ GetConnId() uint32 /* 停止Client 并执行关闭回调 */ Stop() /* 获取ClientToken 返回值string */ GetToken() string /* 获取Client原始Connection 返回值IConnection */ GetRawConn() isocket.IConnection } type Client struct { Conn isocket.IConnection pack isocket.IDataPack Token string } func NewClient(conn isocket.IConnection, Token string) IClient { return &Client{Conn: conn, Token: Token, pack: socket.NewDataPack()} } func (c *Client) Send(data []byte) error { return c.Conn.SendMsg(data) } func (c *Client) SendBuff(data []byte) error { return c.Conn.SendBuffMsg(data) } func (c *Client) Stop() { c.Conn.Stop() } func (c *Client) GetConnId() uint32 { return c.Conn.GetConnID() } func (c *Client) GetRawConn() isocket.IConnection { return c.Conn } func (c *Client) GetToken() string { return c.Token }
package reflection import ( "fmt" "reflect" ) func ValidateWriterFunction(writerFuncPtrValue reflect.Value, baseType reflect.Type) reflect.Type { if writerFuncPtrValue.Kind() != reflect.Ptr || writerFuncPtrValue.Elem().Kind() != reflect.Func { panic(fmt.Errorf("reader function value has to be passed by pointer")) } writerFuncType := writerFuncPtrValue.Elem().Type() if writerFuncType.NumIn() != 1 { panic(fmt.Errorf("expected 1 input argument for the baseRecord function, got: %v", writerFuncType.NumIn())) } baseReturnType := writerFuncType.In(0) isAssignable := baseType.AssignableTo(baseReturnType) if !isAssignable { panic(fmt.Errorf("the return type '%v' of the writer function is incompatible with the base type '%v", baseReturnType.String(), baseType.String())) } return writerFuncType }
// Copyright 2017 Vlad Didenko. All rights reserved. // See the included LICENSE.md file for licensing information package slops // import "go.didenko.com/slops" // Common gathers same entries from two sorted slices into // a new slice. The order is preserved. The lesser number of // duplicates is preserved func Common(left, right []string) []string { return CollectCommon(left, right, GetAll) } // CommonUnique gathers same entries from two sorted slices into // a new slice. The order is preserved. Duplicates are reduced to // a single item func CommonUnique(left, right []string) []string { return CollectCommon(left, right, GetUnique) } // CollectCommon applies a Collector to every item which is // in both left and right slices. Both input slices are // expected to be sorted. func CollectCommon(left, right []string, collect Collector) []string { return CollectVariety(left, right, NoOp, collect, NoOp) }
package onepage import ( "path/filepath" "runtime" "github.com/maprost/application/generator/genmodel" ) func Data(application *genmodel.Application) (data interface{}, err error) { index, err := initData(application) if err != nil { return } data = index return } func Files() (path string, mainFile string, subFiles []string) { path = rootPath() + "/template/" mainFile = "index.tex" subFiles = []string{} return } func rootPath() string { _, curFile, _, _ := runtime.Caller(0) return filepath.Dir(curFile) }
package msgbuzz import "fmt" type QueueNameGenerator struct { topicName string clientGroup string } func NewQueueNameGenerator(topicName string, clientGroup string) *QueueNameGenerator { return &QueueNameGenerator{ topicName: topicName, clientGroup: clientGroup, } } func (q QueueNameGenerator) Exchange() string { return q.topicName } func (q QueueNameGenerator) Queue() string { return fmt.Sprintf("%s.%s", q.topicName, q.clientGroup) } func (q QueueNameGenerator) RetryExchange() string { return q.RetryQueue() } func (q QueueNameGenerator) RetryQueue() string { return fmt.Sprintf("%s__retry", q.Queue()) } func (q QueueNameGenerator) DlxExchange() string { return q.DlxQueue() } func (q QueueNameGenerator) DlxQueue() string { return fmt.Sprintf("%s__failed", q.Queue()) }
package main import ( "fmt" "io/ioutil" "log" "path/filepath" "runtime" "strconv" "strings" ) func main() { _, file, _, _ := runtime.Caller(0) content, err := ioutil.ReadFile(filepath.Join(filepath.Dir(file), "./input.txt")) if err != nil { log.Fatalln("load input error:", err) } rawInput := strings.Split(string(content), "\n") valid := 0 for i := 0; i < len(rawInput); i++ { input := strings.Split(rawInput[i], " ") appearRange := strings.Split(input[0], "-") pos1, err := strconv.Atoi(appearRange[0]) if err != nil { log.Fatalln("input not number", err) } pos2, err := strconv.Atoi(appearRange[1]) if err != nil { log.Fatalln("input not number", err) } target := input[1][0] if (input[2][pos1-1] == target && input[2][pos2-1] != target) || (input[2][pos1-1] != target && input[2][pos2-1] == target) { valid++ } } fmt.Println("valid:", valid) fmt.Println("done") }
package slices_test import ( "fmt" "github.com/life4/genesis/channels" "github.com/life4/genesis/slices" ) func ExampleAny() { even := func(item int) bool { return item%2 == 0 } result := slices.Any([]int{1, 2, 3}, even) fmt.Println(result) result = slices.Any([]int{1, 3, 5}, even) fmt.Println(result) // Output: // true // false } func ExampleFindIndex() { type UserId int index := slices.FindIndex( []UserId{1, 2, 3, 4, 5}, func(el UserId) bool { return el == 3 }, ) fmt.Println(index) // Output: 2 } func ExampleAll() { even := func(item int) bool { return item%2 == 0 } result := slices.All([]int{2, 4, 6}, even) fmt.Println(result) result = slices.All([]int{2, 4, 5}, even) fmt.Println(result) // Output: // true // false } func ExampleChunkBy() { s := []int{1, 3, 4, 6, 8, 9} remainder := func(item int) int { return item % 2 } result := slices.ChunkBy(s, remainder) fmt.Println(result) // Output: [[1 3] [4 6 8] [9]] } func ExampleChunkEvery() { s := []int{1, 1, 2, 3, 5, 8, 13} result, _ := slices.ChunkEvery(s, 3) fmt.Println(result) // Output: [[1 1 2] [3 5 8] [13]] } func ExampleContains() { s := []int{2, 4, 6, 8} result := slices.Contains(s, 4) fmt.Println(result) result = slices.Contains(s, 3) fmt.Println(result) // Output: // true // false } func ExampleCount() { s := []int{1, 0, 1, 0, 0, 1, 1, 0, 1, 0} result := slices.Count(s, 1) fmt.Println(result) // Output: 5 } func ExampleCountBy() { s := []int{1, 2, 3, 4, 5, 6} even := func(item int) bool { return item%2 == 0 } result := slices.CountBy(s, even) fmt.Println(result) // Output: 3 } func ExampleCycle() { s := []int{1, 2, 3} c := slices.Cycle(s) c = channels.Take(c, 5) result := channels.ToSlice(c) fmt.Println(result) // Output: [1 2 3 1 2] } func ExampleDedup() { s := []int{1, 2, 2, 3, 3, 3, 2, 3, 1, 1} result := slices.Dedup(s) fmt.Println(result) // Output: [1 2 3 2 3 1] } func ExampleMin() { s := []int{42, 7, 13} min, _ := slices.Min(s) fmt.Println(min) // Output: 7 } func ExampleMax() { s := []int{7, 42, 13} max, _ := slices.Max(s) fmt.Println(max) // Output: 42 } func ExampleMap() { s := []int{4, 8, 15, 16, 23, 42} double := func(el int) int { return el * 2 } doubled := slices.Map(s, double) fmt.Println(doubled) // Output: [8 16 30 32 46 84] } func ExampleMapFilter() { s := []int{4, 8, 15, 16, 23, 42} isEven := func(t int) (string, bool) { if t%2 == 0 { s := fmt.Sprintf("%d", t) return s, true } else { return "", false } } doubled := slices.MapFilter(s, isEven) fmt.Println(doubled) // Output: [4 8 16 42] } func ExampleMapAsync() { pages := slices.MapAsync( []string{"google.com", "go.dev", "golang.org"}, 0, func(url string) string { return fmt.Sprintf("<web page for %s>", url) }, ) fmt.Println(pages) // [<web page for google.com> <web page for go.dev> <web page for golang.org>] }
package boot import ( _ "gf-init/packed" ) func init() { }
package sqlmodel import ( "github.com/2liang/mcache/models/base" "errors" "strconv" ) type KeyData struct { Id int `xorm:"id int(11)" json:"id"` CaseId int `xorm:"case_id int(10)" json:"case_id"` Name string `xorm:"name varchar(250)" json:"name"` Desc string `xorm:"desc varchar(250)" json:"desc"` Prefix string `xorm:"prefix varchar(250)" json:"prefix"` KeyType string `xorm:"key_type varchar(250)" json:"key_type"` CreateTime int64 `xorm:"create_time int(11)" json:"create_time"` ModifyTime int64 `xorm:"modify_time int(11)" json:"modify_time"` } func(kd *KeyData) GetKeyById() ([]KeyData, error) { db := base.DbCache.GetSlave() r := make([]KeyData, 0) if err := db.Table("keys").Where("id = ?", kd.Id).Find(&r); err != nil { return nil, err } if len(r) < 1 { return nil, errors.New("this key (" + strconv.Itoa(kd.Id) + ") does not exists!") } return r, nil } func(kd *KeyData) GetKey(cid int, name string, page int, limit int) ([]KeyData, error) { start := (page - 1) * limit db := base.DbCache.GetSlave() // 判断cid是否存在 CaseData := new(CaseData) CaseInfo, err := CaseData.GetCaseById(cid) if err != nil { return nil, err } if len(CaseInfo) < 1 { return nil, errors.New("this is case(" + strconv.Itoa(cid) + ") does not exist!") } r := make([]KeyData, 0) if err := db.Table("keys").Where("case_id = ? AND name LIKE ?", cid, "%" + name + "%").Limit(limit, start).Find(&r); err != nil { return nil, err } return r, nil } func(kd *KeyData) AddKey() (int64, error) { db := base.DbCache.GetMaster() // 判断实例是否存在 cd := new(CaseData) caseInfo, err := cd.GetCaseById(kd.CaseId) if err != nil { return 1, err } if len(caseInfo) < 1 { return 1, errors.New("this is case(" + strconv.Itoa(kd.CaseId) + ") does not exist!") } r := make([]KeyData, 0) if err := db.Table("keys").Where("name = ?", kd.Name).Limit(1, 0).Find(&r); err != nil { return 1, err } if len(r) > 0 { return 1, errors.New("this is name(" + kd.Name + ") already exists!") } res, err := db.Table("keys").Insert(kd) return res, err } func(kd *KeyData) UpdateKey (id int) (int64, error) { db := base.DbCache.GetMaster() keyInfo, err := kd.GetKeyById() if err != nil { return 1, err } if len(keyInfo) < 1 { return 1, errors.New("this is key(" + strconv.Itoa(kd.Id) + ") does not exists!") } r := make([]KeyData, 0) if err := db.Table("keys").Where("name = ?", kd.Name).Limit(1, 0).Find(&r); err != nil { return 1, err } if len(r) > 0 { return 1, errors.New("this is name(" + kd.Name + ") already exists!") } res, err := db.Table("keys").Where("id = ?", id).Update(kd) if err != nil { return 1, err } return res, nil } func(kd *KeyData) DeleteKey () (int64, error) { db := base.DbCache.GetMaster() keyInfo, err := kd.GetKeyById() if err != nil { return 1, err } if len(keyInfo) < 1 { return 1, errors.New("this is key(" + strconv.Itoa(kd.Id) + ") does not exists!") } res, err := db.Table("keys").Where("id = ?", kd.Id).Delete(kd) return res, err }
package rule import ( "net/http" "strings" "github.com/sirupsen/logrus" ) // HTTPMethodSetting HTTPMethodRule Setting type HTTPMethodSetting struct { Method string Exclude bool } type httpMethodRule struct { method string exclude bool } // NewHTTPMethodRule : func NewHTTPMethodRule(setting HTTPMethodSetting) Rule { return httpMethodRule{ method: setting.Method, exclude: setting.Exclude, } } // Execute Execute HTTP Method Based Rule func (r httpMethodRule) Execute(req *http.Request) bool { if strings.ToUpper(r.method) == strings.ToUpper(req.Method) && !r.exclude { logrus.WithFields(logrus.Fields{ "type": "rule", "app": "rigis", }).Tracef("HTTPMethodRule:Success:%s", r.method) return true } logrus.WithFields(logrus.Fields{ "type": "rule", "app": "rigis", }).Tracef("HTTPMethodRule:Failed:%s", r.method) return false }
/* Description Ugly numbers are numbers whose only prime factors are 2, 3 or 5. The sequence 1, 2, 3, 4, 5, 6, 8, 9, 10, 12, ... shows the first 10 ugly numbers. By convention, 1 is included. Given the integer n,write a program to find and print the n'th ugly number. Input Each line of the input contains a postisive integer n (n <= 1500).Input is terminated by a line with n=0. Output For each line, output the n’th ugly number .:Don’t deal with the line with n=0. Sample Input 1 2 9 0 Sample Output 1 2 10 Source New Zealand 1990 Division I,UVA 136 */ package main func main() { x := uglysieve(10) assert(x[0] == 1) assert(x[1] == 2) assert(x[8] == 10) } func assert(x bool) { if !x { panic("assertion failed") } } // https://oeis.org/A051037 func uglysieve(n int) []int { if n < 1 { return []int{} } i2, i3, i5 := 0, 0, 0 n2, n3, n5 := 2, 3, 5 u := make([]int, n) u[0] = 1 for i := 1; i < n; i++ { u[i] = min(n2, n3, n5) if u[i] == n2 { n2, i2 = u[i2+1]*2, i2+1 } if u[i] == n3 { n3, i3 = u[i3+1]*3, i3+1 } if u[i] == n5 { n5, i5 = u[i5+1]*5, i5+1 } } return u }
package main import ( "fmt" "dockertree" ) func main() { r := dockertree.Root{Registry: "test.com", BasePath: "/foo/bar"} fmt.Println(r.GetBaseName()) p := dockertree.Node{Name: "bare", Tag: "latest", Root: &r} n := dockertree.Node{Name: "centos", Tag: "7", Parent: &p} fmt.Println(n.GetFullName()) }
// Copyright 2019 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aggfuncs import ( "unsafe" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/util/chunk" ) const ( // DefPartialResult4Ntile is the size of partialResult4Ntile DefPartialResult4Ntile = int64(unsafe.Sizeof(partialResult4Ntile{})) ) // ntile divides the partition into n ranked groups and returns the group number a row belongs to. // e.g. We have 11 rows and n = 3. They will be divided into 3 groups. // // First 4 rows belongs to group 1. Following 4 rows belongs to group 2. The last 3 rows belongs to group 3. type ntile struct { n uint64 baseAggFunc } type partialResult4Ntile struct { curIdx uint64 curGroupIdx uint64 remainder uint64 quotient uint64 numRows uint64 } func (*ntile) AllocPartialResult() (pr PartialResult, memDelta int64) { return PartialResult(&partialResult4Ntile{curGroupIdx: 1}), DefPartialResult4Ntile } func (*ntile) ResetPartialResult(pr PartialResult) { p := (*partialResult4Ntile)(pr) p.curIdx = 0 p.curGroupIdx = 1 p.numRows = 0 } func (n *ntile) UpdatePartialResult(_ sessionctx.Context, rowsInGroup []chunk.Row, pr PartialResult) (memDelta int64, err error) { p := (*partialResult4Ntile)(pr) p.numRows += uint64(len(rowsInGroup)) // Update the quotient and remainder. if n.n != 0 { p.quotient = p.numRows / n.n p.remainder = p.numRows % n.n } return 0, nil } func (n *ntile) AppendFinalResult2Chunk(_ sessionctx.Context, pr PartialResult, chk *chunk.Chunk) error { p := (*partialResult4Ntile)(pr) // If the divisor is 0, the arg of NTILE would be NULL. So we just return NULL. if n.n == 0 { chk.AppendNull(n.ordinal) return nil } chk.AppendUint64(n.ordinal, p.curGroupIdx) p.curIdx++ curMaxIdx := p.quotient if p.curGroupIdx <= p.remainder { curMaxIdx++ } if p.curIdx == curMaxIdx { p.curIdx = 0 p.curGroupIdx++ } return nil }
package instrument import ( "time" confluent "github.com/confluentinc/confluent-kafka-go/kafka" ) // Collector allows to specify a collector for all the main actions of the kafka transformer. // Main actions are : consume, transform, produce/project // Before is called before the action and After called after. // warning: message can be nil, check err != nil also for errors type Collector interface { Before(message *confluent.Message, action Action, start time.Time) After(message *confluent.Message, action Action, err error, start time.Time) }
package main import ( "github.com/aws/aws-lambda-go/events" "encoding/json" "bartenderAsFunction/dao" "bartenderAsFunction/model" "github.com/aws/aws-lambda-go/lambda" "fmt" ) var DataConnectionManager dao.CommandConnectionInterface func Handler(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) { idCommand := request.PathParameters["idCommand"] typeItem := request.PathParameters["type"] toServe := model.Item{} json.Unmarshal([]byte(request.Body), &toServe) fmt.Println(idCommand) // TODO 1. read the command by idCommand. Hint, there are a dao package with all you need. Use the DataConnectionManager var var command model.Command // TODO 2. Verify command exist. If not, return 200 but with body "not available command to serve" // TODO 3. search item (just implement method in TODO 3 if typeItem == "beer" { serveCommand(&command.Beer, toServe.Name) } else { serveCommand(&command.Food, toServe.Name) } // TODO 4. save command. User dao package // And return 200 with command. Use Json marshall to transform the command in []byte return events.APIGatewayProxyResponse{}, nil } // TODO 3 func serveCommand(items *[]model.Item, name string) { for i, item := range *items { if item.Name == name { (*items)[i].Served = true } } } func main() { DataConnectionManager = dao.CreateCommandConnection() lambda.Start(Handler) }
package storage import ( "context" "fmt" "strings" "k8s.io/apimachinery/pkg/fields" "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/kubernetes/pkg/printers" printerstorage "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/kubernetes/pkg/printers/storage" apierrors "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" genericreq "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/rest" "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/apis/operators" "github.com/operator-framework/operator-lifecycle-manager/pkg/package-server/provider" ) type PackageManifestStorage struct { groupResource schema.GroupResource prov provider.PackageManifestProvider scheme *runtime.Scheme rest.TableConvertor } var _ rest.Storage = &PackageManifestStorage{} var _ rest.KindProvider = &PackageManifestStorage{} var _ rest.Lister = &PackageManifestStorage{} var _ rest.Getter = &PackageManifestStorage{} var _ rest.Scoper = &PackageManifestStorage{} var _ rest.TableConvertor = &PackageManifestStorage{} var _ rest.SingularNameProvider = &PackageManifestStorage{} // NewStorage returns a struct that implements methods needed for Kubernetes to satisfy API requests for the `PackageManifest` resource func NewStorage(groupResource schema.GroupResource, prov provider.PackageManifestProvider, scheme *runtime.Scheme) *PackageManifestStorage { return &PackageManifestStorage{ groupResource: groupResource, prov: prov, scheme: scheme, TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(addTableHandlers)}, } } // New satisfies the Storage interface func (m *PackageManifestStorage) New() runtime.Object { return &operators.PackageManifest{} } // Destroy satisfies the Storage interface // Performs a no-op func (m *PackageManifestStorage) Destroy() {} // Kind satisfies the KindProvider interface func (m *PackageManifestStorage) Kind() string { return "PackageManifest" } func (m *PackageManifestStorage) GetSingularName() string { return strings.ToLower(m.Kind()) } // NewList satisfies part of the Lister interface func (m *PackageManifestStorage) NewList() runtime.Object { return &operators.PackageManifestList{} } // List satisfies part of the Lister interface func (m *PackageManifestStorage) List(ctx context.Context, options *metainternalversion.ListOptions) (runtime.Object, error) { namespace := genericreq.NamespaceValue(ctx) labelSelector := labels.Everything() if options != nil && options.LabelSelector != nil { labelSelector = options.LabelSelector } name, err := nameFor(options.FieldSelector) if err != nil { return nil, err } res, err := m.prov.List(namespace, labelSelector) if err != nil { return nil, apierrors.NewInternalError(err) } filtered := []operators.PackageManifest{} for _, manifest := range res.Items { if matches(manifest, name) { filtered = append(filtered, manifest) } } for i := range filtered { for j := range filtered[i].Status.Channels { filtered[i].Status.Channels[j].CurrentCSVDesc.Icon = []operators.Icon{} } } res.Items = filtered return res, nil } // Get satisfies the Getter interface func (m *PackageManifestStorage) Get(ctx context.Context, name string, opts *metav1.GetOptions) (runtime.Object, error) { namespace := genericreq.NamespaceValue(ctx) manifest, err := m.prov.Get(namespace, name) if err != nil || manifest == nil { return nil, apierrors.NewNotFound(m.groupResource, name) } // Strip logo icons for i := range manifest.Status.Channels { manifest.Status.Channels[i].CurrentCSVDesc.Icon = []operators.Icon{} } return manifest, nil } // NamespaceScoped satisfies the Scoper interface func (m *PackageManifestStorage) NamespaceScoped() bool { return true } func nameFor(fs fields.Selector) (string, error) { if fs == nil { fs = fields.Everything() } name := "" if value, found := fs.RequiresExactMatch("metadata.name"); found { name = value } else if !fs.Empty() { return "", fmt.Errorf("field label not supported: %s", fs.Requirements()[0].Field) } return name, nil } func matches(pm operators.PackageManifest, name string) bool { if name == "" { name = pm.GetName() } return pm.GetName() == name }
// Package tfrecord is an obvious tfrecord IO implementation // // Format spec: https://www.tensorflow.org/tutorials/load_data/tfrecord, // assume all numbers are little-endian although not actually defined in spec. package tfrecord import ( "encoding/binary" "errors" "hash/crc32" "io" ) const ( crcMagicNum = 0xa282ead8 lengthSize = 8 crcSize = 4 headerSize = lengthSize + crcSize footerSize = crcSize ) // ErrChecksum is error returned when TFRecord content doesn't pass checksum. // It indicates data corruption or wrong file format. var ErrChecksum = errors.New("checksum error in TFRecord") // see TFREcord spec. var crc32Table = crc32.MakeTable(crc32.Castagnoli) func checksum(p []byte) uint32 { crc := crc32.Checksum(p, crc32Table) return ((crc >> 15) | (crc << 17)) + crcMagicNum } // Iterator iterates TFRecords through an io.Reader type Iterator struct { r io.Reader checkDataCRC bool preBuf []byte value []byte err error } // NewIterator creates a Iterator. Iterator pre-allocates and reuse buffer to avoid frequent buffer allocation, // bufSize should be set to upper-bound of expected common record size. when checkDataCRC is true, check CRC of // data content, this is the recommend setup because checking CRC of data won't be performance bottleneck in most cases. func NewIterator(r io.Reader, bufSize int64, checkDataCRC bool) *Iterator { var buf []byte if bufSize > 0 { buf = make([]byte, bufSize) } return &Iterator{ r: r, checkDataCRC: checkDataCRC, preBuf: buf, } } // Next reads in next record from underlying reader func (it *Iterator) Next() bool { if it.err != nil { return false } withError := func(err error) bool { it.err = err return false } it.value = nil header := [headerSize]byte{} if _, err := io.ReadFull(it.r, header[:]); err != nil { if err == io.EOF { return false } return withError(err) } recordLen := binary.LittleEndian.Uint64(header[:lengthSize]) lenCRC := binary.LittleEndian.Uint32(header[lengthSize:]) if crc := checksum(header[:lengthSize]); crc != lenCRC { return withError(ErrChecksum) } var record []byte if recordLen > uint64(len(it.preBuf)) { record = make([]byte, recordLen) } else { record = it.preBuf[:recordLen] } if _, err := io.ReadFull(it.r, record); err != nil { return withError(err) } var footer [footerSize]byte if _, err := io.ReadFull(it.r, footer[:]); err != nil { return withError(err) } if it.checkDataCRC { dataCRC := binary.LittleEndian.Uint32(footer[:]) if crc := checksum(record); crc != dataCRC { return withError(ErrChecksum) } } it.value = record return true } // Err returns any error stopping Next(), io.EOF is not considered error func (it *Iterator) Err() error { return it.err } // Value returns the current value, returns nil when iterator not in valid state func (it *Iterator) Value() []byte { return it.value } // NewWriter creates a TFRecord writer on top of w func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } // Writer implements io.Writer that writes TFRecord type Writer struct { w io.Writer } // Write implements io.Write func (w *Writer) Write(record []byte) (n int, err error) { header := [headerSize]byte{} binary.LittleEndian.PutUint64(header[:lengthSize], uint64(len(record))) binary.LittleEndian.PutUint32(header[lengthSize:], checksum(header[:lengthSize])) if _, err := w.w.Write(header[:]); err != nil { return 0, err } if _, err := w.w.Write(record); err != nil { return 0, err } var footer [footerSize]byte binary.LittleEndian.PutUint32(footer[:], checksum(record)) if _, err := w.w.Write(footer[:]); err != nil { return 0, err } return len(record), nil }
package config import ( "github.com/caarlos0/env" log "github.com/sirupsen/logrus" ) type Config struct { Port string `env:"PORT"` } func Load() (cfg Config) { if err := env.Parse(&cfg); err != nil { log.Errorf("%s", err) } return }
package decl import ( "go/ast" "fmt" "github.com/sky0621/go-testcode-autogen/inspect/result" ) type GenDeclInspector struct{} func (i *GenDeclInspector) IsTarget(node ast.Node) bool { switch node.(type) { case *ast.GenDecl: return true } return false } func (i *GenDeclInspector) Inspect(node ast.Node, aggregater *result.Aggregater) error { gd, ok := node.(*ast.GenDecl) if !ok { return fmt.Errorf("Not target Node: %#v", node) } // FIXME fmt.Println("===== GenDeclInspector ===================================================================================") fmt.Printf("GenDecl: %#v\n", gd) return nil }
package main import "os" func fib(n uint, acc uint, prev uint) uint { if n == 0 { return acc } else { return fib(n-1, prev+acc, acc) } } func main() { os.Exit(int(fib(2560000, 0, 0))) }
package main func main() { //data := data2.SampleUser //keyPem := utils.ReadFile("./jwtRS256.key") //keyPemPub := utils.ReadFile("./jwtRS256.key.pub") //expires := time.Now().Add(24 * time.Hour) //token := auth.CreateRSA256SignedToken(keyPem, data, expires, {}) //fmt.Println(token) //claims := auth.ParseRSA256SignedToken(token, keyPemPub) //fmt.Println(utils.Stringify(claims)) //fmt.Println(keyPem) //fmt.Println(data) //fmt.Println(expires.Unix()) }
package main import ( "fmt" "time" ) // --------------------------------------------------------- // EXERCISE: fix without type conversion // // 1. Fix the program without doing any conversion. // 2. Explain why it doesn't work. // // EXPECTED OUTPUT // 10h0m0s later... // --------------------------------------------------------- func main() { const later = 10 hours, _ := time.ParseDuration("1h") fmt.Printf("%s later...\n", hours*later) }
package utils import "golang.org/x/crypto/bcrypt" type Hashing struct{} type HashingInterface interface { HashPass(pass string) ([]byte, error) ComparePass(hashedPass, pass string) error } func (h *Hashing) HashPass(pass string) ([]byte, error) { return bcrypt.GenerateFromPassword([]byte(pass), bcrypt.DefaultCost) } func (h *Hashing) ComparePass(hashedPass, pass string) error { return bcrypt.CompareHashAndPassword([]byte(hashedPass), []byte(pass)) }
package main import "fmt" func main() { var i int = 10 { var i string = "Phani" fmt.Println(i) } fmt.Println(i) }
package datamodels // Request : Format d'une requete type Request struct { ID int `json:"id"` Method string `json:"method"` Params DataParams `json:"params"` } // Response : Format d'une reponse type Response struct { ID int `json:"id"` Method string `json:"method"` Params DataParams `json:"params"` Status Error `json:"status"` } // Error : gestion d'une erreur de requete type Error struct { ID int `json:"errorCode"` Message string `json:"errorMessage"` } ///////////////////////////////////////// /////////// OUTGOING MESSAGES /////////// ///////////////////////////////////////// // Login : Structure d'info pour le login Driver // Response is error code type Login struct { Token string `json:"token"` State TaximeterState `json:"state"` ID int `json:"id"` Name string `json:"name"` } // CreateRide : Proposition de course type CreateRide struct { Ride RideData `mapstructure:"ride" json:"ride"` SearchOptions SearchOptions `mapstructure:"searchOptions" json:"searchOptions"` Passenger Passenger `mapstructure:"passenger" json:"passenger"` Proposal Proposal `mapstructure:"proposal" json:"proposal"` } // RideUpdate : Modifie l'état de la course // Response is the same with error code type ChangeRideState struct { ID int64 `mapstructure:"rideId" json:"rideId"` State RideState `mapstructure:"state" json:"state"` } // UpdateDriverLocation: Mise à jour de la position du chauffeur type UpdateDriverLocation struct { Coord Coordinates `mapstructure:"coordinates" json:"coordinates"` VehicleOptions []VehicleOption `mapstructure:"vehicleOptions" json:"vehicleOptions"` VehicleType VehicleType `mapstructure:"vehicleType" json:"vehicleType"` } // AcceptRide : Message du chauffeur pour accepter la course type AcceptRide struct { ID int64 `mapstructure:"rideId" json:"rideId"` Vehicle Vehicle `mapstructure:"vehicle" json:"vehicle"` } // ChangeTaximeterState : Changement du status d'un Driver // Response is the same with error code type ChangeTaximeterState struct { State TaximeterState `mapstructure:"state" json:"state"` } ///////////////////////////////////////// /////////// INCOMING MESSAGES /////////// ///////////////////////////////////////// // MonitorConfig : Structure d'info pour le login Driver type MonitorConfig struct { Config Globals `json:"config"` } // Payment : Payement d'une course type PendingPaymentResponse struct { Ride RideData `mapstructure:"ride" json:"ride"` PickUpAddress Address `mapstructure:"pickUpAddress" json:"pickUpAddress"` Payment Payment `mapstructure:"payment" json:"payment"` CancellationReason string `mapstructure:"cancellationReason" json:"cancellationReason"` } // AcceptRideResponse : Retour pour course acceptée type AcceptRideResponse struct { Ride RideData `mapstructure:"ride" json:"ride"` Passenger Passenger `mapstructure:"passenger" json:"passenger"` Vehicle Vehicle `mapstructure:"vehicle" json:"vehicle"` SearchOptions SearchOptions `mapstructure:"searchOptions" json:"searchOptions"` } /* { "method" : "Login", "id" : 1, "params" : { "token" : "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MTIyMDkwMDksImlhdCI6MTYxMjIwNjAwOSwidXNlclV1aWQiOiIwMDQwYjUzMi1lMmM1LTEwMzktOGNiMC02NWFkMTQ1ODZjMTUifQ.rl237_kl6-lj2nsNth5oBZ4fvW2UuapbdmW2NhmxPAaOJDTEcObtjHxvxuo0VxO6EvmnMa-lQs9JpA2Zn7ZfGqripx3zUYyHWrOgjL9zKLfy0QOb7NqXqwryn2HiMgqXmd0ZpwrXNjFeSr2jBZT2BWslWIO_oN3fJpFiORtf8384y6SvjjquZO4Jkwv8m44fDJyKXRFIq-koQJh5nAHj0dP7LAwEpBMFMf_6pnzUqOMvzNfVyEtmnKuK6jwSxqy98IMCJjp2UiCitjGIU88_yHJA5ZLAAmOj1yfKUJeNNtDVUdkdGTrGCaBAIgHHSBvRdk4X4M4079AcfFuerIw9yQ" } } { "status" : { "errorMessage" : "OK", "errorCode" : 0 }, "method" : "LoginResponse", "id" : 1, "params" : { "id" : 12345, "name" : "Joe le Taxi" } } ////////// UpdateDriverLocation ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "UpdateDriverLocation", "id" : 1, "params" : { "longitude" : 4.9878669999999996, "latitude" : 43.987867000000001 } } ////////// NewRide ////////// { "id":256, "method":"NewRide", "params":{ "ride": { "id" : 987, "externalId": "skhdf455", "origin" : "BOOKER", "state" : 1, "memo" : "mémo optionnel", "reference" : "reference optionnelle", "isImmediate" : true, "startDate" : "2020-12-16T17:20:00.00Z", "validUntil" : "2020-12-16T17:20:00.00Z", "fromAddress" : { "name" : "nom cours de l'adresse", "address" : "adresse complete", "coordinates" : { "latitude" : 42.9867, "longitude" : 4.9867 } }, "toAddress" : { "name" : "nom cours de l'adresse optionelle", "address" : "adresse complete optionelle", "coordinates" : { "latitude" : 42.9867, "longitude" : 4.9867 } }, "numberOfPassengers" : 1, "numberOfLuggages" : 0, "vehicleOptions" : [1, 3, 5], "vehicleType" : 2, "passenger" : { "id" : 89767, "firstname" : "prénom", "lastname" : "lastname", "phone" : "phone", "picture" : "url de la photo optionnelle" }, "vehicle" : { "id" : 8976, "brand" : "BMW", "model" : "Série 3", "vehicleType" : 2, "color" : "WHITE", "plate" : "TY-496-CZ", "numberOfSeats" : 6 }, "pickUpAddress" : { "name" : "nom cours de l'adresse optionelle", "address" : "adresse complete optionelle", "coordinates" : { "latitude" : 42.9867, "longitude" : 4.9867 } }, "vatValue" : 20.0, "stats" : [ { "value" : 0.0, "unit" : "€", "type" : 0, "additionnalValue": 10.0 }, { "value" : 25.0, "unit" : "km", "type" : 1 }, { "value" : 40.0, "unit" : "min", "type" : 2 } ] }, "proposal": { "saveForMe": true, "shareGroups": "Nom du groupe" } }, "status" : { "errorMessage" : "status", "errorCode" : 987 } } ////////// AcceptRide ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "AcceptRide", "id" : 1, "params" : { "rideId" : 8976987986 } } ////////// AcceptRideResponse ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "AcceptRideResponse", "id" : 1, "params" : { "memo" : "Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.", "reference" : "Chambre 208A", "ride" : { "options" : { "numberOfPassengers" : 2, "numberOfLuggages" : 1 }, "isImmediate" : true, "origin" : 0, "id" : "8976987986", "validUntil" : "2020-12-16T17:20:00.000Z", "date" : "2020-12-16T17:20:00.000Z", "toAddress" : { "coordinates" : { "longitude" : 5.4925626895974045, "latitude" : 43.471590283851015 }, "address" : "arrivée adresse 14019 Ubcqoeu" }, "fromAddress" : { "coordinates" : { "longitude" : 5.4925626895974045, "latitude" : 43.471590283851015 }, "address" : "départ adresse 14019 Ubcqoeu" } }, "passenger" : { "picture" : "https:\/\/media-exp1.licdn.com\/dms\/image\/C4D03AQHAPi4WceJ6rA\/profile-displayphoto-shrink_400_400\/0\/1516561939955?e=1614211200&v=beta&t=Mk1eA5tDgOODt3V9cLqITaWj9TAelHZTHDAFXVhx4vE", "phone" : "+330987654321", "firstname" : "Jérôme", "id" : "8976987986", "lastname" : "TONNELIER" } } } ////////// StartOngoingRide ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "StartOngoingRide", "id" : 1, "params" : { "memo" : "Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Nulla vitae elit libero, a pharetra augue.", "reference" : "Chambre 208A", "ride" : { "options" : { "numberOfPassengers" : 2, "numberOfLuggages" : 1 }, "state" : 0, "origin" : 0, "isImmediate" : true, "id" : "8976987986", "validUntil" : "2020-12-16T17:20:00.000Z", "date" : "2020-12-16T17:20:00.000Z", "toAddress" : { "coordinates" : { "longitude" : 5.452597832139817, "latitude" : 43.52645372148015 }, "address" : "arrivée adresse 14019 Ubcqoeu" }, "fromAddress" : { "coordinates" : { "longitude" : 5.53859787072443, "latitude" : 43.47865284174063 }, "address" : "départ adresse 14019 Ubcqoeu" } }, "passenger" : { "picture" : "https:\/\/media-exp1.licdn.com\/dms\/image\/C4D03AQHAPi4WceJ6rA\/profile-displayphoto-shrink_400_400\/0\/1516561939955?e=1614211200&v=beta&t=Mk1eA5tDgOODt3V9cLqITaWj9TAelHZTHDAFXVhx4vE", "phone" : "+330987654321", "firstname" : "Jérôme", "id" : 12, "lastname" : "TONNELIER" } } } ////////// ChangeRideStateResponse ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "ChangeRideStateResponse", "id" : 1, "params" : { "rideId" : 8976987986, "state" : 1 } } ////////// ChangeRideState ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "ChangeRideState", "id" : 1, "params" : { "rideId" : 8976987986, "state" : 1 } } ////////// PendingPaymentResponse ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "PendingPaymentResponse", "id" : 1, "params" : { "ride" : { "options" : { "numberOfPassengers" : 9, "numberOfLuggages" : 5 }, "state" : 7, "origin" : 0, "isImmediate" : true, "id" : "8976987986", "validUntil" : "2020-12-16T17:20:00.000Z", "date" : "2020-12-16T17:20:00.000Z", "toAddress" : { "coordinates" : { "longitude" : 5.4925626895974045, "latitude" : 43.471590283851015 }, "address" : "arrivée adresse 14019 Ubcqoeu" }, "fromAddress" : { "coordinates" : { "longitude" : 5.4925626895974045, "latitude" : 43.471590283851015 }, "address" : "départ adresse 14019 Ubcqoeu" } }, "pickUpAddress" : { "coordinates" : { "longitude" : 5.53858787072443, "latitude" : 43.46865284174063 }, "address" : "Acception de la course address" }, "stats" : [ { "value" : 0.0, "unit" : "€", "type" : 0, "additionnalValue" = 10.0 }, { "value" : 25.0, "unit" : "km", "type" : 1 }, { "value" : 40.0, "unit" : "min", "type" : 2 } ] } } ////////// Ended ////////// { "status" : { "errorMessage" : "status", "errorCode" : 987 }, "method" : "RideEndedResponse", "id" : 1, "params" : { "rideId" : "JHGCUYGC-HCS" } } Option Véhicule Paroi de séparation COVID Chauffeur parlant Anglais MKIDS : 1 réhausseur MKIDS : 2 réhausseurs MKIDS : 1 Siège bébé MKIDS : 1 Siège bébé+1réhausseur Animal accepté. -ACCESS (personne à mobilité réduite). case cpam = 1, covidShield, englishSpoken, mkids1, mkids2, mkids3, mkids4, pets, access { "method" : "CreateRide", "id" : 1, "params" : { "start" : { "coordinates" : { "latitude" : 42.2324, "longitude" : 42.2324 }, "address" : "l'adresse en texte" }, "end" : { "coordinates" : { "latitude" : 42.2324, "longitude" : 42.2324 }, "address" : "l'adresse d'arrivée est optionnelle" }, "vehicleType" : 1, "options" : [1, 4], "date" : "2020-12-16T17:20:00.000Z", "shareGroups" : [123, 98327], "driverId" : 87586 var passengerName: String? var passengerPhone: String? } } */
package api import ( "bytes" "encoding/json" "io/ioutil" "testing" ) func Test_RepoCreate(t *testing.T) { http := &FakeHTTP{} client := NewClient(ReplaceTripper(http)) http.StubResponse(200, bytes.NewBufferString(`{}`)) input := RepoCreateInput{ Description: "roasted chesnuts", HomepageURL: "http://example.com", } _, err := RepoCreate(client, input) if err != nil { t.Fatalf("unexpected error: %v", err) } if len(http.Requests) != 1 { t.Fatalf("expected 1 HTTP request, seen %d", len(http.Requests)) } var reqBody struct { Query string Variables struct { Input map[string]interface{} } } bodyBytes, _ := ioutil.ReadAll(http.Requests[0].Body) _ = json.Unmarshal(bodyBytes, &reqBody) if description := reqBody.Variables.Input["description"].(string); description != "roasted chesnuts" { t.Errorf("expected description to be %q, got %q", "roasted chesnuts", description) } if homepage := reqBody.Variables.Input["homepageUrl"].(string); homepage != "http://example.com" { t.Errorf("expected homepageUrl to be %q, got %q", "http://example.com", homepage) } }
// Copyright 2019-2023 The sakuracloud_exporter Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package platform import ( "context" "time" "github.com/sacloud/iaas-api-go" "github.com/sacloud/iaas-api-go/types" "github.com/sacloud/packages-go/newsfeed" ) type VPCRouter struct { *iaas.VPCRouter ZoneName string } type VPCRouterClient interface { Find(ctx context.Context) ([]*VPCRouter, error) Status(ctx context.Context, zone string, id types.ID) (*iaas.VPCRouterStatus, error) MonitorNIC(ctx context.Context, zone string, id types.ID, index int, end time.Time) (*iaas.MonitorInterfaceValue, error) MonitorCPU(ctx context.Context, zone string, id types.ID, end time.Time) (*iaas.MonitorCPUTimeValue, error) MaintenanceInfo(infoURL string) (*newsfeed.FeedItem, error) } func getVPCRouterClient(caller iaas.APICaller, zones []string) VPCRouterClient { return &vpcRouterClient{ client: iaas.NewVPCRouterOp(caller), zones: zones, } } type vpcRouterClient struct { client iaas.VPCRouterAPI zones []string } func (c *vpcRouterClient) find(ctx context.Context, zone string) ([]interface{}, error) { var results []interface{} res, err := c.client.Find(ctx, zone, &iaas.FindCondition{ Count: 10000, }) if err != nil { return results, err } for _, v := range res.VPCRouters { results = append(results, &VPCRouter{ VPCRouter: v, ZoneName: zone, }) } return results, err } func (c *vpcRouterClient) Find(ctx context.Context) ([]*VPCRouter, error) { res, err := queryToZones(ctx, c.zones, c.find) if err != nil { return nil, err } var results []*VPCRouter for _, s := range res { results = append(results, s.(*VPCRouter)) } return results, nil } func (c *vpcRouterClient) MonitorNIC(ctx context.Context, zone string, id types.ID, index int, end time.Time) (*iaas.MonitorInterfaceValue, error) { mvs, err := c.client.MonitorInterface(ctx, zone, id, index, monitorCondition(end)) if err != nil { return nil, err } return monitorInterfaceValue(mvs.Values), nil } func (c *vpcRouterClient) MonitorCPU(ctx context.Context, zone string, id types.ID, end time.Time) (*iaas.MonitorCPUTimeValue, error) { mvs, err := c.client.MonitorCPU(ctx, zone, id, monitorCondition(end)) if err != nil { return nil, err } return monitorCPUTimeValue(mvs.Values), nil } func (c *vpcRouterClient) Status(ctx context.Context, zone string, id types.ID) (*iaas.VPCRouterStatus, error) { return c.client.Status(ctx, zone, id) } func (c *vpcRouterClient) MaintenanceInfo(infoURL string) (*newsfeed.FeedItem, error) { return newsfeed.GetByURL(infoURL) }
// Copyright 2019 Kuei-chun Chen. All rights reserved. package mdb import ( "encoding/json" "io/ioutil" "testing" "github.com/simagix/gox" "go.mongodb.org/mongo-driver/bson" ) func TestGetIndexSuggestionFromFilter(t *testing.T) { filename := "testdata/commerceticket-replica-explain.json" buffer, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } var v bson.M json.Unmarshal(buffer, &v) data, _ := bson.Marshal(v) bson.Unmarshal(data, &v) var summary CardinalitySummary data, _ = json.Marshal(v["cardinality"]) json.Unmarshal(data, &summary) var explain ExplainCommand str := `{"filter": {"ct": "abc", "cs": {"$exists": true}}}` bson.UnmarshalExtJSON([]byte(str), true, &explain) index := GetIndexSuggestion(explain, summary.List) str = `{"ct":1,"cs":1}` if str != gox.Stringify(index) { t.Fatal("Expected", str, "but got", gox.Stringify(index)) } t.Log(gox.Stringify(index)) } func TestGetIndexSuggestion(t *testing.T) { filename := "testdata/TestGetIndexSuggestion.json" buffer, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } var v bson.M bson.UnmarshalExtJSON(buffer, true, &v) var summary CardinalitySummary data, _ := json.Marshal(v["cardinality"]) json.Unmarshal(data, &summary) var explain ExplainCommand str := `{"filter": {"brand": "BMW", "year": {"$gt": 2017}}, "sort": {"color": 1}}` bson.UnmarshalExtJSON([]byte(str), true, &explain) index := GetIndexSuggestion(explain, summary.List) expected := `{"brand":1,"color":1,"year":1}` if gox.Stringify(index) != expected { t.Fatal("Expected", expected, "but got", gox.Stringify(index)) } t.Log("index:", gox.Stringify(index)) } func TestGetIndexSuggestionElemMatch(t *testing.T) { filename := "testdata/TestGetIndexSuggestionElemMatch.json" buffer, err := ioutil.ReadFile(filename) if err != nil { t.Fatal(err) } var v bson.M bson.UnmarshalExtJSON(buffer, true, &v) var summary CardinalitySummary data, _ := json.Marshal(v["cardinality"]) json.Unmarshal(data, &summary) var explain ExplainCommand str := `{"filter": { "$and": [{ "filters": { "$elemMatch": { "k": "color", "v": "Red" } } }, { "filters": { "$elemMatch": { "k": "year", "v": { "$gt": 2017 } } } }] } }` bson.UnmarshalExtJSON([]byte(str), true, &explain) index := GetIndexSuggestion(explain, summary.List) expected := `{"filters.v":1,"filters.k":1}` if gox.Stringify(index) != expected { t.Fatal("Expected", expected, "but got", gox.Stringify(index)) } t.Log("index:", gox.Stringify(index)) }
package suites import ( "context" "encoding/json" "fmt" "log" "strings" "testing" "time" mapset "github.com/deckarep/golang-set/v2" "github.com/stretchr/testify/suite" ) type CustomHeadersScenario struct { *RodSuite } func NewCustomHeadersScenario() *CustomHeadersScenario { return &CustomHeadersScenario{ RodSuite: NewRodSuite(""), } } func (s *CustomHeadersScenario) SetupSuite() { browser, err := StartRod() if err != nil { log.Fatal(err) } s.RodSession = browser } func (s *CustomHeadersScenario) TearDownSuite() { err := s.RodSession.Stop() if err != nil { log.Fatal(err) } } func (s *CustomHeadersScenario) SetupTest() { s.Page = s.doCreateTab(s.T(), HomeBaseURL) s.verifyIsHome(s.T(), s.Page) } func (s *CustomHeadersScenario) TearDownTest() { s.collectCoverage(s.Page) s.MustClose() } func (s *CustomHeadersScenario) TestShouldNotForwardCustomHeaderForUnauthenticatedUser() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer func() { cancel() s.collectScreenshot(ctx.Err(), s.Page) }() s.doVisit(s.T(), s.Context(ctx), fmt.Sprintf("%s/headers", PublicBaseURL)) body, err := s.Context(ctx).Element("body") s.Assert().NoError(err) b, err := body.Text() s.Assert().NoError(err) s.Assert().NotContains(b, "john") s.Assert().NotContains(b, "admins") s.Assert().NotContains(b, "John Doe") s.Assert().NotContains(b, "john.doe@authelia.com") } type Headers struct { ForwardedEmail string `json:"Remote-Email"` ForwardedGroups string `json:"Remote-Groups"` ForwardedName string `json:"Remote-Name"` ForwardedUser string `json:"Remote-User"` } type HeadersPayload struct { Headers Headers `json:"headers"` } func (s *CustomHeadersScenario) TestShouldForwardCustomHeaderForAuthenticatedUser() { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) defer func() { cancel() s.collectScreenshot(ctx.Err(), s.Page) }() expectedGroups := mapset.NewSet("dev", "admins") targetURL := fmt.Sprintf("%s/headers", PublicBaseURL) s.doLoginOneFactor(s.T(), s.Context(ctx), "john", "password", false, BaseDomain, targetURL) s.verifyIsPublic(s.T(), s.Context(ctx)) body, err := s.Context(ctx).Element("body") s.Assert().NoError(err) s.Assert().NotNil(body) content, err := body.Text() s.Assert().NoError(err) s.Assert().NotNil(content) payload := HeadersPayload{} if err := json.Unmarshal([]byte(content), &payload); err != nil { log.Panic(err) } groups := strings.Split(payload.Headers.ForwardedGroups, ",") actualGroups := mapset.NewSet[string]() for _, group := range groups { actualGroups.Add(group) } if strings.Contains(payload.Headers.ForwardedUser, "john") && expectedGroups.Equal(actualGroups) && strings.Contains(payload.Headers.ForwardedName, "John Doe") && strings.Contains(payload.Headers.ForwardedEmail, "john.doe@authelia.com") { err = nil } else { err = fmt.Errorf("headers do not include user information") } s.Require().NoError(err) } func TestCustomHeadersScenario(t *testing.T) { if testing.Short() { t.Skip("skipping suite test in short mode") } suite.Run(t, NewCustomHeadersScenario()) }
// Copyright (c) 2020 VMware, Inc. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package starlark import ( "bytes" "fmt" "io" "os" "path/filepath" "strings" "testing" "go.starlark.net/starlark" ) func TestCaptureLocalFunc(t *testing.T) { tests := []struct { name string args func(t *testing.T) []starlark.Tuple eval func(t *testing.T, kwargs []starlark.Tuple) }{ { name: "capture with defaults", args: func(t *testing.T) []starlark.Tuple { return []starlark.Tuple{{starlark.String("cmd"), starlark.String("echo 'Hello World!'")}} }, eval: func(t *testing.T, kwargs []starlark.Tuple) { val, err := captureLocalFunc(newTestThreadLocal(t), nil, nil, kwargs) if err != nil { t.Fatal(err) } expected := filepath.Join(defaults.workdir, fmt.Sprintf("%s.txt", sanitizeStr("echo 'Hello World!'"))) result := "" if r, ok := val.(starlark.String); ok { result = string(r) } defer func() { os.RemoveAll(result) os.RemoveAll(defaults.workdir) }() if result != expected { t.Errorf("unexpected result: %s", result) } file, err := os.Open(result) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) if _, err := io.Copy(buf, file); err != nil { t.Fatal(err) } expected = strings.TrimSpace(buf.String()) if expected != "Hello World!" { t.Errorf("unexpected content captured: %s", expected) } if err := file.Close(); err != nil { t.Error(err) } }, }, { name: "capture with args", args: func(t *testing.T) []starlark.Tuple { return []starlark.Tuple{ {starlark.String("cmd"), starlark.String("echo 'Hello World!'")}, {starlark.String("workdir"), starlark.String("/tmp/capturecrashd")}, {starlark.String("file_name"), starlark.String("echo.txt")}, {starlark.String("desc"), starlark.String("echo command")}, {starlark.String("append"), starlark.True}, } }, eval: func(t *testing.T, kwargs []starlark.Tuple) { expected := filepath.Join("/tmp/capturecrashd", "echo.txt") err := os.MkdirAll("/tmp/capturecrashd", 0777) if err != nil { t.Fatal(err) } f, err := os.OpenFile(expected, os.O_RDWR|os.O_CREATE, 0644) if err != nil { t.Fatal(err) } if _, err := f.Write([]byte("Hello World!\n")); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } val, err := captureLocalFunc(newTestThreadLocal(t), nil, nil, kwargs) if err != nil { t.Fatal(err) } result := "" if r, ok := val.(starlark.String); ok { result = string(r) } defer func() { os.RemoveAll(result) os.RemoveAll(defaults.workdir) }() if result != expected { t.Errorf("unexpected result: %s", result) } file, err := os.Open(result) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) if _, err := io.Copy(buf, file); err != nil { t.Fatal(err) } expected = strings.TrimSpace(buf.String()) if expected != "Hello World!\necho command\nHello World!" { t.Errorf("unexpected content captured: %s", expected) } if err := file.Close(); err != nil { t.Error(err) } }, }, { name: "capture with error", args: func(t *testing.T) []starlark.Tuple { return []starlark.Tuple{{starlark.String("cmd"), starlark.String("nacho 'Hello World!'")}} }, eval: func(t *testing.T, kwargs []starlark.Tuple) { val, err := captureLocalFunc(newTestThreadLocal(t), nil, nil, kwargs) if err != nil { t.Fatal(err) } result := "" if r, ok := val.(starlark.String); ok { result = string(r) } defer func() { os.RemoveAll(result) os.RemoveAll(defaults.workdir) }() file, err := os.Open(result) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) if _, err := io.Copy(buf, file); err != nil { t.Fatal(err) } expected := strings.TrimSpace(buf.String()) if !strings.Contains(expected, "not found") { t.Errorf("unexpected content captured: %s", expected) } if err := file.Close(); err != nil { t.Error(err) } }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { test.eval(t, test.args(t)) }) } } func TestCaptureLocalScript(t *testing.T) { tests := []struct { name string script string eval func(t *testing.T, script string) }{ { name: "capture local defaults", script: ` result = capture_local("echo 'Hello World!'") `, eval: func(t *testing.T, script string) { exe := New() if err := exe.Exec("test.star", strings.NewReader(script)); err != nil { t.Fatal(err) } expected := filepath.Join(defaults.workdir, fmt.Sprintf("%s.txt", sanitizeStr("echo 'Hello World!'"))) var result string resultVal := exe.result["result"] if resultVal == nil { t.Fatal("capture_local() should be assigned to a variable for test") } res, ok := resultVal.(starlark.String) if !ok { t.Fatal("capture_local() should return a string") } result = string(res) defer func() { os.RemoveAll(result) os.RemoveAll(defaults.workdir) }() if result != expected { t.Errorf("unexpected result: %s", result) } file, err := os.Open(result) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) if _, err := io.Copy(buf, file); err != nil { t.Fatal(err) } expected = strings.TrimSpace(buf.String()) if expected != "Hello World!" { t.Errorf("unexpected content captured: %s", expected) } if err := file.Close(); err != nil { t.Error(err) } }, }, { name: "capture local with args", script: ` result = capture_local(cmd="echo 'Hello World!'", workdir="/tmp/capturecrash", file_name="echo_out.txt", desc="output command", append=True) `, eval: func(t *testing.T, script string) { expected := filepath.Join("/tmp/capturecrash", "echo_out.txt") err := os.MkdirAll("/tmp/capturecrash", 0777) if err != nil { t.Fatal(err) } f, err := os.OpenFile(expected, os.O_RDWR|os.O_CREATE, 0644) if err != nil { t.Fatal(err) } if _, err := f.Write([]byte("Hello World!\n")); err != nil { t.Fatal(err) } if err := f.Close(); err != nil { t.Fatal(err) } exe := New() if err := exe.Exec("test.star", strings.NewReader(script)); err != nil { t.Fatal(err) } var result string resultVal := exe.result["result"] if resultVal == nil { t.Fatal("capture_local() should be assigned to a variable for test") } res, ok := resultVal.(starlark.String) if !ok { t.Fatal("capture_local() should return a string") } result = string(res) defer func() { os.RemoveAll(result) os.RemoveAll(defaults.workdir) }() if result != expected { t.Errorf("unexpected result: %s", result) } file, err := os.Open(result) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) if _, err := io.Copy(buf, file); err != nil { t.Fatal(err) } expected = strings.TrimSpace(buf.String()) if expected != "Hello World!\noutput command\nHello World!" { t.Errorf("unexpected content captured: %s", expected) } if err := file.Close(); err != nil { t.Error(err) } }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { test.eval(t, test.script) }) } }
// Copyright 2015 Alexey Martseniuk. All rights reserved. // Use of this source code is governed by a MIT license // that can be found in the LICENSE file. package linq import ( "container/list" ) type T interface{} type Enumerator interface { // Sets the enumerator to its initial position, which is before the first element. Reset() error // Gets the current element in the collection. Value() T // Advances the enumerator to the next element. Next() (bool, error) } type Enumerable interface { // Returns an enumerator that iterates through the collection. Enumerator() Enumerator // Applies an accumulator function over a sequence. Aggregate(fn func(T, T) T) (T, error) // Determines whether all elements of a sequence satisfy a condition. All(fn func(T) bool) (bool, error) // Determines whether any element of a sequence satisfies a condition. Any(fn func(T) bool) (bool, error) // Applies function on each element of a sequence and returns the result // or a default value if the sequence is empty. Apply(fn func(T) T, def T) (T, error) // Concatenates two sequences. Concat(second Enumerable) Enumerable // Determines whether a sequence contains a specified element by using // a specified equality comparer to compare values. Contains(val T, eq func(T, T) bool) (bool, error) // Returns the number of elements in a sequence. Count() (uint, error) // Returns the elements of the specified sequence or the specified value // in a singleton collection if the sequence is empty. DefaultIfEmpty(def T) Enumerable // Returns distinct elements from a sequence by using a specified equality // comparer to compare values. Distinct(eq func(T, T) bool) Enumerable // Returns the element at a specified index in a sequence or a default // value if the index is out of range. ElementAtOrDefault(index uint, def T) (T, error) // Produces the set difference of two sequences by using the specified // equality comparer to compare values. Except(second Enumerable, eq func(T, T) bool) Enumerable // Returns the first element of the sequence that satisfies a condition or // a default value if no such element is found. FirstOrDefault(fn func(T) bool, def T) (T, error) // Groups the elements of a sequence according to a specified key selector // function and creates a result value from each group and its key. // The keys are compared by using a specified comparer. GroupBy(key func(T) T, res func(T, Enumerator) T, eq func(T, T) bool) Enumerable // Correlates the elements of two sequences based on key equality and // groups the results. A specified equality comparer is used to compare keys. GroupJoin(inner Enumerable, outKey, inKey func(T) T, res func(T, Enumerator) T, eq func(T, T) bool) Enumerable // Produces the set intersection of two sequences by using the specified // equality comparer to compare values. Intersect(second Enumerable, eq func(T, T) bool) Enumerable // Correlates the elements of two sequences based on matching keys. // A specified equality comparer is used to compare keys. Join(inner Enumerable, outKey, inKey func(T) T, res func(T, T) T, eq func(T, T) bool) Enumerable // Returns the last element of a sequence that satisfies a condition or // a default value if no such element is found. LastOrDefault(fn func(T) bool, def T) (T, error) // Sorts the elements of a sequence in order by using a specified comparer. OrderBy(fn func(T) T, less func(T, T) bool) Enumerable // Inverts the order of the elements in a sequence. Reverse() Enumerable // Projects each element of a sequence into a new form. Select(fn func(T) T) Enumerable // Projects each element of a sequence to an Enumerable and flattens // the resulting sequences into one sequence. SelectMany(fn func(T) Enumerable) Enumerable // Determines whether two sequences are equal by comparing their elements // by using the specified equality comparer to compare values. SequenceEqual(second Enumerable, eq func(T, T) bool) (bool, error) // Bypasses elements in a sequence as long as a specified condition is // true and then returns the remaining elements. SkipWhile(fn func(T) bool) Enumerable // Returns elements from a sequence as long as a specified condition is true. TakeWhile(fn func(T) bool) Enumerable // Creates a slice from an Enumerable. ToSlice() ([]T, error) // Creates a map from an Enumerable according to specified key selector function. ToMap(fn func(T) T) (map[T]T, error) // Produces the set union of two sequences by using a specified equality comparer. Union(second Enumerable, eq func(T, T) bool) Enumerable // Filters a sequence of values based on a predicate. Where(fn func(T) bool) Enumerable // Applies a specified function to the corresponding elements of two sequences, // producing a sequence of the results. Zip(second Enumerable, fn func(T, T) T) Enumerable } // Creates an Enumerable from an Enumerator. func From(en Enumerator) Enumerable { if en == nil { return empty } return enumerable(func() Enumerator { return en }) } // Creates an Enumerable from a sequence of T. func FromSequence(items ...T) Enumerable { if len(items) < 1 { return empty } return enumerable(func() Enumerator { return sliceIter(items) }) } // Creates an Enumerable from a slice. func FromSlice(obj T) Enumerable { if obj == nil { return empty } return enumerable(func() Enumerator { return sliceObjIter(obj) }) } // Creates an Enumerable from list. func FromList(l *list.List) Enumerable { if l == nil { return empty } return enumerable(func() Enumerator { return listIter(l) }) } // Applies a function over a sequence. func ForEach(en Enumerator, fn func(T)) error { if en == nil || fn == nil { return nil } var ( ok bool err error ) if err = en.Reset(); err == nil { for ok, err = en.Next(); ok && err == nil; ok, err = en.Next() { fn(en.Value()) } } return err }
package dialog import ( "github.com/therecipe/qt/core" "github.com/therecipe/qt/internal/examples/showcases/wallet/wallet/dialog/controller" ) type dialogTemplate struct { core.QObject _ func() `constructor:"init"` _ func(cident string) `signal:"show,<-(controller.Controller)"` _ func(bool) `signal:"blur,->(controller.Controller)"` } func (t *dialogTemplate) init() { if controller.Controller == nil { controller.NewDialogController(nil) } } func (t *dialogTemplate) show(cident string) { if controller.Controller.IsLocked() { t.Show("unlock") } else { t.Show(cident) } }
package main import "testing" func TestCheckParenthesis(t *testing.T) { cases := []struct { input string want bool }{ {"a+(b*c)-2-a", true}, {"(a+b*(2-c)-2+a)*2", true}, {"(a*b-(2+c)", false}, {"2*(3-a))", false}, {")3+b*(2-c)(", false}, } for _, c := range cases { got := CheckParenthesis(c.input) if got != c.want { t.Errorf("Got %v but expected %v for %q", got,c.want,c.input) } } }
package sqlbuilder type Sqlizer interface { ToSql() (string, []interface{}, error) } type rawSqlizer interface { toSqlRaw() (string, []interface{}, error) }
package verr import ( "fmt" "io" "strings" ) var StackEnabled = true // Error severance level type. Can be used to determine the importance of the error before it handled appropriately. type ErrLevel int const ( ErrError ErrLevel = iota ErrIgnorable ErrInfo ErrNotice ErrWarning ErrCritical ErrFatal ErrPanic ) type VError struct { Code int Level ErrLevel Msg string Cause error Stack *stack ErrParams map[string]interface{} } var LevelMap = map[ErrLevel]string{ ErrIgnorable: "ignorable", ErrInfo: "info", ErrNotice: "notice", ErrWarning: "warning", ErrError: "error", ErrCritical: "critical", ErrFatal: "fatal", ErrPanic: "panic", } func (fe *VError) Error() string { text := []string{fe.Msg} curErr := fe.Cause for curErr != nil { if e, ok := fe.Cause.(*VError); ok { text = append(text, e.Msg) curErr = e.Cause } else { text = append(text, curErr.Error()) break } } return strings.Join(text, ": ") } func (fe *VError) WithLevel(level ErrLevel) *VError { fe.Level = level return fe } func (fe *VError) WithCause(cause error) *VError { fe.Cause = cause return fe } func (fe *VError) WithCode(code int) *VError { fe.Code = code return fe } func (fe *VError) AddParam(key string, v interface{}) *VError { if fe.ErrParams == nil { fe.ErrParams = make(map[string]interface{}) } fe.ErrParams[key] = v return fe } func (fe *VError) Format(s fmt.State, verb rune) { switch verb { case 'v': if s.Flag('+') { fmt.Fprintf(s, "%+v", fe.Cause) fe.Stack.Format(s, verb) return } fallthrough case 's': io.WriteString(s, fe.Error()) case 'q': fmt.Fprintf(s, "%q", fe.Error()) } } func Error(message string) *VError { return &VError{ Level: ErrError, Msg: message, Stack: callers(), } } func Errorf(format string, args ...interface{}) *VError { return &VError{ Level: ErrError, Msg: fmt.Sprintf(format, args...), Stack: callers(), } } func Wrap(err error, message string) *VError { return &VError{ Msg: message, Stack: callers(), Cause: err, } } func Cause(err error) error { e, ok := err.(*VError) if ok { return e.Cause } return nil } func Params(err error) map[string]interface{} { e, ok := err.(*VError) if ok { return e.ErrParams } return nil } func Level(err error) ErrLevel { e, ok := err.(*VError) if ok { return e.Level } return ErrError } func Code(err error) int { e, ok := err.(*VError) if ok { return e.Code } return -1 }
package api import ( "fmt" "net/http" ) // Route represents a standard route object type Route struct { Method string Version int Path string HandlerFunc http.HandlerFunc Authenticate bool } // GetPattern returns the url match pattern for the route func (r Route) GetPattern() string { return fmt.Sprintf("/v%d/%s", r.Version, r.Path) }
package set import ( "sort" "golang.org/x/exp/constraints" ) type SortableSet[V constraints.Ordered] map[V]struct{} // OrderedList return an ordered list // can use AscLess, DescLess as less function func (s SortableSet[V]) OrderedList(less func(v1, v2 V) bool) []V { list := Set[V](s).List() sort.Slice(list, func(i, j int) bool { return less(list[i], list[j]) }) return list } func AscLess[V constraints.Ordered](v1, v2 V) bool { return v1 < v2 } func DescLess[V constraints.Ordered](v1, v2 V) bool { return v1 > v2 }
package Problem0383 func canConstruct(ransomNote string, magazine string) bool { mc := getCount(magazine) for _, b := range ransomNote { mc[b-'a']-- if mc[b-'a'] < 0 { return false } } return true } func getCount(s string) []int { res := make([]int, 26) for i := range s { res[s[i]-'a']++ } return res }
package main import ( "errors" "fmt" "io/ioutil" "log" "os" "path/filepath" "github.com/is8ac/tfutils/descend" "github.com/is8ac/tfutils/descend/models" tf "github.com/tensorflow/tensorflow/tensorflow/go" "github.com/tensorflow/tensorflow/tensorflow/go/op" ) func convModel(s *op.Scope, input tf.Output, filter1, filter2, filter3, fcWeights, readout tf.Output) (class tf.Output) { batchSize := input.Shape().Size(0) conv1 := op.Conv2D(s.SubScope("conv1"), input, filter1, []int64{1, 4, 2, 1}, "VALID", ) conv2 := op.Conv2D(s.SubScope("conv2"), op.Relu(s.SubScope("l2_relu"), conv1), filter2, []int64{1, 4, 3, 1}, "VALID", ) conv3 := op.Conv2D(s.SubScope("conv3"), op.Relu(s.SubScope("l3_relu"), conv2), filter3, []int64{1, 4, 3, 1}, "VALID", ) flat := op.Reshape(s, conv3, op.Const(s.SubScope("flat"), []int64{batchSize, -1})) timeOutput := op.MatMul(s, flat, fcWeights) class = op.Softmax(s.SubScope("output"), op.MatMul(s.SubScope("readout"), timeOutput, readout)) return } func simpleModel(s *op.Scope, input tf.Output, filter, weights tf.Output) (class tf.Output) { batchSize := input.Shape().Size(0) // ugly hack if batchSize == -1 { batchSize = 1 } fmt.Println("batch_size:", batchSize) conv1 := op.Conv2D(s.SubScope("conv1"), input, filter, []int64{1, 7, 7, 1}, "VALID", ) //fmt.Println("conv1", conv1.Shape()) flatInput := op.Reshape(s, conv1, op.Const(s.SubScope("shape"), []int64{batchSize, -1})) class = op.Softmax(s, op.MatMul(s, flatInput, weights)) return } var simpleModelParamDefs = []models.ParamDef{ models.ParamDef{Name: "filter1", Shape: tf.MakeShape(23, 11, 1, 3)}, models.ParamDef{Name: "weights", Shape: tf.MakeShape(14148, 2)}, } func makeSimpleModel(input, target tf.Output) ( lossFunc descend.LossFunc, size int64, makeFinalizeAccuracy func(*op.Scope, tf.Output, tf.Output, tf.Output) tf.Output, ) { unflatten, size := models.MakeUnflatten(simpleModelParamDefs) lossFunc = func(s *op.Scope, params tf.Output) (loss tf.Output) { layerParams := unflatten(s.SubScope("unflatten"), params) output := simpleModel(s.SubScope("model"), input, layerParams[0], layerParams[1]) loss = op.Mean(s, op.Sum(s, op.Square(s, op.Sub(s, output, target)), op.Const(s.SubScope("one"), int64(1)), ), op.Const(s.SubScope("zero"), int64(0)), ) return } makeFinalizeAccuracy = func(s *op.Scope, params tf.Output, testInputs, testTargets tf.Output, ) ( accuracy tf.Output, ) { layerParams := unflatten(s.SubScope("accuracy_unflatten"), params) actual := simpleModel(s.SubScope("model"), testInputs, layerParams[0], layerParams[1]) actualLabels := op.ArgMax(s, actual, op.Const(s.SubScope("argmax_dim"), int32(-1)), op.ArgMaxOutputType(tf.Int32)) targetLabels := op.ArgMax(s.SubScope("targets"), testTargets, op.Const(s.SubScope("argmax_dim"), int32(-1)), op.ArgMaxOutputType(tf.Int32)) correct := op.Reshape(s.SubScope("correct"), op.Equal(s, actualLabels, targetLabels), op.Const(s.SubScope("all"), []int32{-1})) accuracy = op.Mean(s, op.Cast(s.SubScope("accuracy"), correct, tf.Float), op.Const(s.SubScope("mean_dim"), int32(0))) return } return } func preprocessAudio(s *op.Scope, audio tf.Output) (ffts tf.Output) { pcm := op.Div(s.SubScope("div_by_2_16"), op.Cast(s, op.DecodeRaw(s.SubScope("decode_u16"), audio, tf.Uint16), tf.Float), op.Const(s.SubScope("65536"), float32(65536)), ) ffts = op.ExpandDims(s.SubScope("add_chan"), op.Squeeze(s, op.AudioSpectrogram(s, op.ExpandDims(s, pcm, op.Const(s.SubScope("one"), int64(1))), 500, 500, ), ), op.Const(s.SubScope("two"), int64(2)), ) return } func makeConvModel(input, target tf.Output) ( lossFunc descend.LossFunc, size int64, makeFinalizeAccuracy func(*op.Scope, tf.Output, tf.Output, tf.Output) tf.Output, ) { paramDefs := []models.ParamDef{ models.ParamDef{Name: "filter1", Shape: tf.MakeShape(7, 5, 1, 3)}, models.ParamDef{Name: "filter2", Shape: tf.MakeShape(7, 5, 3, 5)}, models.ParamDef{Name: "filter3", Shape: tf.MakeShape(7, 5, 5, 5)}, models.ParamDef{Name: "fc", Shape: tf.MakeShape(845, 7)}, models.ParamDef{Name: "readout", Shape: tf.MakeShape(7, 2)}, } unflatten, size := models.MakeUnflatten(paramDefs) lossFunc = func(s *op.Scope, params tf.Output) (loss tf.Output) { layerParams := unflatten(s.SubScope("unflatten"), params) output := convModel(s.SubScope("model"), input, layerParams[0], layerParams[1], layerParams[2], layerParams[3], layerParams[4]) loss = op.Mean(s, op.Sum(s, op.Square(s, op.Sub(s, output, target)), op.Const(s.SubScope("one"), int64(1)), ), op.Const(s.SubScope("zero"), int64(0)), ) return } makeFinalizeAccuracy = func(s *op.Scope, params tf.Output, testInputs, testTargets tf.Output, ) ( accuracy tf.Output, ) { layerParams := unflatten(s.SubScope("accuracy_unflatten"), params) actual := convModel(s.SubScope("model"), testInputs, layerParams[0], layerParams[1], layerParams[2], layerParams[3], layerParams[4]) actualLabels := op.ArgMax(s, actual, op.Const(s.SubScope("argmax_dim"), int32(-1)), op.ArgMaxOutputType(tf.Int32)) targetLabels := op.ArgMax(s.SubScope("targets"), testTargets, op.Const(s.SubScope("argmax_dim"), int32(-1)), op.ArgMaxOutputType(tf.Int32)) correct := op.Reshape(s.SubScope("correct"), op.Equal(s, actualLabels, targetLabels), op.Const(s.SubScope("all"), []int32{-1})) accuracy = op.Mean(s, op.Cast(s.SubScope("accuracy"), correct, tf.Float), op.Const(s.SubScope("mean_dim"), int32(0))) return } return } func initDataQueue(s *op.Scope, preprocess func(*op.Scope, tf.Output) tf.Output, n int32, ) ( initLoadDatum func(*tf.Session) (func(string, bool) error, error), closeQueue *tf.Operation, dequeueFFTs, dequeueLabels tf.Output, ) { queue := op.FIFOQueueV2(s, []tf.DataType{tf.Float, tf.Float}, op.FIFOQueueV2Shapes([]tf.Shape{tf.MakeShape(938, 257, 1), tf.MakeShape(2)})) sizeVar := op.VarHandleOp(s.SubScope("n"), tf.Int32, tf.ScalarShape(), op.VarHandleOpContainer("n")) increment := op.AssignAddVariableOp(s, sizeVar, op.Const(s.SubScope("1i32"), int32(1))) reset := op.AssignVariableOp(s, sizeVar, op.Const(s.SubScope("0i32"), int32(0))) readSize := op.ReadVariableOp(s, sizeVar, tf.Int32) _ = readSize fileNamePH := op.Placeholder(s.SubScope("filename"), tf.String, op.PlaceholderShape(tf.ScalarShape())) readFile := op.ReadFile(s, fileNamePH) fft := preprocess(s.SubScope("preprocess"), readFile) labelIndexPH := op.Placeholder(s.SubScope("label_index"), tf.Bool) label := op.OneHot(s, op.Cast(s, labelIndexPH, tf.Int64), op.Const(s.SubScope("two"), int32(2)), op.Const(s.SubScope("1f32"), float32(1)), op.Const(s.SubScope("0f32"), float32(0)), ) enqueue := op.QueueEnqueueV2(s.WithControlDependencies(increment), queue, []tf.Output{fft, label}) dequeueComponents := op.QueueDequeueManyV2(s, queue, op.Const(s.SubScope("n"), n), []tf.DataType{tf.Float, tf.Float}) fmt.Println(s.Err()) dequeueFFTs = dequeueComponents[0] dequeueLabels = dequeueComponents[1] closeQueue = op.QueueCloseV2(s, queue) initLoadDatum = func(sess *tf.Session) (loadDatum func(string, bool) error, err error) { _, err = sess.Run(nil, nil, []*tf.Operation{reset}) if err != nil { return } loadDatum = func(path string, label bool) (err error) { fileNameTensor, err := tf.NewTensor(path) if err != nil { return } labelTensor, err := tf.NewTensor(label) if err != nil { return } _, err = sess.Run(map[tf.Output]*tf.Tensor{fileNamePH: fileNameTensor, labelIndexPH: labelTensor}, nil, []*tf.Operation{enqueue}) return } return } return } func nextBatch(s *op.Scope, ffts, labels, seed tf.Output, n int64) (batchFFTs, batchLabels tf.Output, init *tf.Operation) { outputTypes := []tf.DataType{ffts.DataType(), labels.DataType()} outputShapes := []tf.Shape{tf.MakeShape(n, 938, 257, 1), tf.MakeShape(n, 2)} preBatchOutputShapes := []tf.Shape{tf.MakeShape(938, 257, 1), tf.MakeShape(2)} dataset := op.TensorSliceDataset(s, []tf.Output{ffts, labels}, preBatchOutputShapes) repeatDataset := op.RepeatDataset(s, dataset, op.Const(s.SubScope("count"), int64(-1)), outputTypes, preBatchOutputShapes) shuffleDataset := op.ShuffleDataset(s, repeatDataset, op.Const(s.SubScope("buffer_size"), int64(1000)), seed, seed, outputTypes, preBatchOutputShapes, ) batchDataset := op.BatchDataset(s, shuffleDataset, op.Const(s.SubScope("batch_size"), n), outputTypes, outputShapes) iterator := op.Iterator(s, "", "", outputTypes, outputShapes) next := op.IteratorGetNext(s, iterator, outputTypes, outputShapes) init = op.MakeIterator(s, batchDataset, iterator) batchFFTs = next[0] batchLabels = next[1] return } func loadClass(path string, load func(string) error) (err error) { files, err := ioutil.ReadDir(path) if err != nil { return } for _, file := range files { err = load(filepath.Join(path, file.Name())) if err != nil { log.Println("skipping", file.Name(), ":", err.Error()) continue } } return } func varCache(s *op.Scope, input tf.Output, shape tf.Shape, name string) (init *tf.Operation, output tf.Output) { variable := op.VarHandleOp(s, input.DataType(), shape, op.VarHandleOpSharedName(name)) init = op.AssignVariableOp(s, variable, input) output = op.ReadVariableOp(s, variable, input.DataType()) return } func namedIdentity(scope *op.Scope, input tf.Output, name string) (output tf.Output) { if scope.Err() != nil { return } opspec := tf.OpSpec{ Type: "Identity", Input: []tf.Input{ input, }, Name: name, } op := scope.AddOperation(opspec) return op.Output(0) } func getOP(graph *tf.Graph, name string) (operation *tf.Operation, err error) { operation = graph.Operation(name) if operation == nil { err = errors.New("can't find operation " + name) return } return } const gobalSeed int64 = 0 func main() { const subSize = 30 const globalSeed = 42 const batchSize = 150 const searchSize float32 = 0.0003 const gradsScale float32 = 0.005 const dataSize int64 = 1100 fmt.Println(tf.Version()) s := op.NewScope() initLoadDatum, closeQueue, dequeueFFTs, dequeueLabels := initDataQueue(s.SubScope("queue"), preprocessAudio, int32(dataSize)) initFFTcache, readFFTs := varCache(s.SubScope("fft_cache"), dequeueFFTs, tf.MakeShape(dataSize, 938, 257, 1), "ffts") initLabelsCache, readLabels := varCache(s.SubScope("labels_cache"), dequeueLabels, tf.MakeShape(dataSize, 2), "labels") fmt.Println("shape:", readFFTs.Shape()) scalarSeed := op.Const(s.SubScope("scalar_seed"), int64(gobalSeed)) fftsBatch, labelsBatch, initOP := nextBatch(s.SubScope("dataset"), readFFTs, readLabels, scalarSeed, batchSize) step := op.Const(s.SubScope("search_size"), searchSize) lossFunc, size, makeFinalizeAccuracy := makeSimpleModel(fftsBatch, labelsBatch) //lossFunc, size, makeFinalizeAccuracy := makeConvModel(fftsBatch, labelsBatch) fmt.Println("size:", size) updatesPH := op.Placeholder(s.SubScope("updates"), tf.Float, op.PlaceholderShape(tf.MakeShape(subSize))) randomExpand := descend.MakeRandomExpand(size, 42) initSM, createObserveGrads, incGeneration, generation, params, perturb := descend.NewDynamicSubDimSM(s.SubScope("sm"), updatesPH, randomExpand, size) // make the state machine. _ = params _ = generation loss := lossFunc(s.SubScope("loss"), params) grads := createObserveGrads(lossFunc, step) updates := op.Mul(s.SubScope("scale_grads"), grads, op.Const(s.SubScope("grads_scale"), gradsScale)) // We are reusing the training data for test. This is bad practice. accuracyOP := makeFinalizeAccuracy(s.SubScope("accuracy"), params, readFFTs, readLabels) unflatten, _ := models.MakeUnflatten(simpleModelParamDefs) layerParams := unflatten(s.SubScope("unflatten"), params) graph, err := s.Finalize() if err != nil { panic(err) } sess, err := tf.NewSession(graph, nil) if err != nil { panic(err) } loadDatum, err := initLoadDatum(sess) if err != nil { panic(err) } loadClass("label/good", func(fileName string) error { return loadDatum(fileName, false) }) loadClass("label/nongood", func(fileName string) error { return loadDatum(fileName, true) }) _, err = sess.Run(nil, nil, []*tf.Operation{initFFTcache, initLabelsCache}) if err != nil { panic(err) } _, err = sess.Run(nil, nil, []*tf.Operation{initOP}) if err != nil { panic(err) } // now we can close the queue _, err = sess.Run(nil, nil, []*tf.Operation{closeQueue}) if err != nil { panic(err) } err = initSM(sess) if err != nil { panic(err) } for i := 0; i < 10000; i++ { observedGrads, err := sess.Run(nil, []tf.Output{updates, loss}, nil) if err != nil { panic(err) } //fmt.Println(observedGrads[0].Value()) fmt.Println("loss:", observedGrads[1].Value()) _, err = sess.Run(map[tf.Output]*tf.Tensor{updatesPH: observedGrads[0]}, nil, []*tf.Operation{perturb}) if err != nil { panic(err) } _, err = sess.Run(nil, nil, []*tf.Operation{incGeneration}) if err != nil { panic(err) } if i%1 == 0 { acc, err := sess.Run(nil, []tf.Output{accuracyOP}, nil) if err != nil { panic(err) } fmt.Println(i, acc[0].Value().(float32)*100.0, "%") } } results, err := sess.Run(nil, layerParams, nil) if err != nil { panic(err) } s = op.NewScope() filter := op.Const(s.SubScope("filter"), results[0]) weights := op.Const(s.SubScope("weights"), results[1]) dataPH := op.Placeholder(s.SubScope("input"), tf.String, op.PlaceholderShape(tf.ScalarShape())) ffts := preprocessAudio(s.SubScope("preprocess"), dataPH) fmt.Println("ffts:", ffts.Shape()) expandedFfts := op.ExpandDims(s, ffts, op.Const(s.SubScope("one"), int64(0))) fmt.Println("expandedFfts:", expandedFfts.Shape()) output := simpleModel(s.SubScope("model"), expandedFfts, filter, weights) label := namedIdentity(s, op.Squeeze(s.SubScope("remove_dim"), output), "output") _ = label fmt.Println(output.Shape()) graph, err = s.Finalize() if err != nil { panic(err) } file, err := os.Create("conv1.pb") if err != nil { panic(err) } _, err = graph.WriteTo(file) if err != nil { panic(err) } }
package models import ( "github.com/google/uuid" ) type Session struct { SessionId uuid.UUID User *User } type SessionService interface { Set(session *Session) error Get(sessionId uuid.UUID) (bool, error) Delete(sessionId uuid.UUID) error }
package main import ( "os" "fmt" ) func rm(message Message) { err := os.Remove(configData.STORAGE_PATH + message.Name) if err != nil { fmt.Printf("Failed to remove '%v' because '%v'\n", message.Name, err) } }
package main import ( "flag" "fmt" "io/ioutil" "log" "os" "path/filepath" "reflect" "strconv" "testing" "time" "github.com/robustirc/rafthttp" "github.com/robustirc/robustirc/internal/ircserver" "github.com/robustirc/robustirc/internal/outputstream" "github.com/robustirc/robustirc/internal/raftstore" "github.com/robustirc/robustirc/internal/robust" "github.com/stapelberg/glog" "github.com/hashicorp/raft" ) func appendLog(logs []*raft.Log, msg string) []*raft.Log { return append(logs, &raft.Log{ Type: raft.LogCommand, Index: uint64(len(logs) + 1), Data: []byte(msg), }) } func verifyEndState(t *testing.T) { s, err := ircServer.GetSession(robust.Id{Id: 1}) if err != nil { t.Fatalf("No session found after applying log messages") } if s.Nick != "secure_" { t.Fatalf("session.Nick: got %q, want %q", s.Nick, "secure_") } // s.Channels is a map[lcChan]bool, so we copy it over. got := make(map[string]bool) for key, value := range s.Channels { got[string(key)] = value } want := make(map[string]bool) want["#chaos-hd"] = true if !reflect.DeepEqual(got, want) { t.Fatalf("session.Channels: got %v, want %v", got, want) } } func snapshot(fsm raft.FSM, fss raft.SnapshotStore, numLogs uint64) error { snapshot, err := fsm.Snapshot() if err != nil { return fmt.Errorf("Unexpected error in fsm.Snapshot(): %v", err) } robustsnap, ok := snapshot.(*robustSnapshot) if !ok { return fmt.Errorf("fsm.Snapshot() return value is not a robustSnapshot") } if robustsnap.lastIndex != numLogs { return fmt.Errorf("snapshot does not retain the last message, got: %d, want: %d", robustsnap.lastIndex, numLogs) } sink, err := fss.Create( 1, // snapshot version numLogs, // index 1, // term raft.Configuration{}, // configuration (peers) 0, // configurationIndex &rafthttp.HTTPTransport{}) // only needed for EncodePeers, which is stateless if err != nil { return fmt.Errorf("fss.Create: %v", err) } if err := snapshot.Persist(sink); err != nil { return fmt.Errorf("Unexpected error in snapshot.Persist(): %v", err) } sink.Close() return nil } func restore(fsm raft.FSM, fss raft.SnapshotStore, numLogs uint64) error { snapshots, err := fss.List() if err != nil { return fmt.Errorf("fss.List(): %v", err) } // snapshots[0] is the most recent snapshot snapshotId := snapshots[0].ID _, readcloser, err := fss.Open(snapshotId) if err != nil { return fmt.Errorf("fss.Open(%s): %v", snapshotId, err) } if err := fsm.Restore(readcloser); err != nil { return fmt.Errorf("fsm.Restore(): %v", err) } first, _ := fsm.(*FSM).ircstore.FirstIndex() last, _ := fsm.(*FSM).ircstore.LastIndex() if last-first >= numLogs { return fmt.Errorf("Compaction did not decrease log size. got: %d, want: < %d", last-first, numLogs) } return nil } // TestCompaction does a full snapshot, persists it to disk, restores it and // makes sure the state matches expectations. The other test functions directly // test what should be compacted. func TestCompaction(t *testing.T) { ircServer = ircserver.NewIRCServer("testnetwork", time.Now()) var err error outputStream, err = outputstream.NewOutputStream("") tempdir := t.TempDir() flag.Set("raftdir", tempdir) logstore, err := raftstore.NewLevelDBStore(filepath.Join(tempdir, "raftlog"), false, false) if err != nil { t.Fatalf("Unexpected error in NewLevelDBStore: %v", err) } ircstore, err := raftstore.NewLevelDBStore(filepath.Join(tempdir, "irclog"), false, false) if err != nil { t.Fatalf("Unexpected error in NewLevelDBStore: %v", err) } fsm := FSM{ store: logstore, ircstore: ircstore, lastSnapshotState: make(map[uint64][]byte), sessionExpirationDur: 10 * time.Minute, ReplaceState: func(*ircserver.IRCServer, *raftstore.LevelDBStore, *outputstream.OutputStream) { // no-op for the compaction test }, } var logs []*raft.Log logs = appendLog(logs, `{"Id": {"Id": 1}, "Type": 0, "Data": "auth"}`) logs = appendLog(logs, `{"Id": {"Id": 2}, "Session": {"Id": 1}, "Type": 2, "Data": "NICK sECuRE"}`) logs = appendLog(logs, `{"Id": {"Id": 3}, "Session": {"Id": 1}, "Type": 2, "Data": "USER blah 0 * :Michael Stapelberg"}`) logs = appendLog(logs, `{"Id": {"Id": 4}, "Session": {"Id": 1}, "Type": 2, "Data": "NICK secure_"}`) logs = appendLog(logs, `{"Id": {"Id": 5}, "Session": {"Id": 1}, "Type": 2, "Data": "JOIN #chaos-hd"}`) logs = appendLog(logs, `{"Id": {"Id": 6}, "Session": {"Id": 1}, "Type": 2, "Data": "JOIN #i3"}`) logs = appendLog(logs, `{"Id": {"Id": 7}, "Session": {"Id": 1}, "Type": 2, "Data": "PRIVMSG #chaos-hd :heya"}`) logs = appendLog(logs, `{"Id": {"Id": 8}, "Session": {"Id": 1}, "Type": 2, "Data": "PRIVMSG #chaos-hd :newer message"}`) logs = appendLog(logs, `{"Id": {"Id": 9}, "Session": {"Id": 1}, "Type": 2, "Data": "PART #i3"}`) // These messages are too new to be compacted. nowID := time.Now().UnixNano() logs = appendLog(logs, `{"Id": {"Id": 10}, "UnixNano": `+strconv.FormatInt(nowID, 10)+`, "Session": {"Id": 1}, "Type": 2, "Data": "PART #chaos-hd"}`) nowID++ logs = appendLog(logs, `{"Id": {"Id": 11}, "UnixNano": `+strconv.FormatInt(nowID, 10)+`, "Session": {"Id": 1}, "Type": 2, "Data": "JOIN #chaos-hd"}`) if err := logstore.StoreLogs(logs); err != nil { t.Fatalf("Unexpected error in store.StoreLogs: %v", err) } for _, log := range logs { fsm.Apply(log) } verifyEndState(t) fss, err := raft.NewFileSnapshotStore(tempdir, 5, nil) if err != nil { t.Fatalf("%v", err) } // Snapshot twice so that we know state is carried over from one // snapshot to the next. if err := snapshot(&fsm, fss, uint64(len(logs))); err != nil { t.Fatal(err) } // raft uses time.Now() in the snapshot name, so advance time by 1ms to // guarantee we get a different filename. time.Sleep(1 * time.Millisecond) if err := snapshot(&fsm, fss, uint64(len(logs))); err != nil { t.Fatal(err) } ircServer = ircserver.NewIRCServer("testnetwork", time.Now()) if err := restore(&fsm, fss, uint64(len(logs))); err != nil { t.Fatal(err) } verifyEndState(t) // Restore() a fresh FSM, then take another snapshot, restore it // and verify the end state. This covers the code path where the // previous snapshot was not done in the same process run. ircstore = fsm.ircstore fsm = FSM{ store: logstore, ircstore: ircstore, lastSnapshotState: make(map[uint64][]byte), ReplaceState: func(*ircserver.IRCServer, *raftstore.LevelDBStore, *outputstream.OutputStream) { // no-op for the compaction test }, } if err := restore(&fsm, fss, uint64(len(logs))); err != nil { t.Fatal(err) } if err := snapshot(&fsm, fss, uint64(len(logs))); err != nil { t.Fatal(err) } ircServer = ircserver.NewIRCServer("testnetwork", time.Now()) if err := restore(&fsm, fss, uint64(len(logs))); err != nil { t.Fatal(err) } verifyEndState(t) } func TestMain(m *testing.M) { defer glog.Flush() flag.Parse() tempdir, err := ioutil.TempDir("", "robustirc-test-raftdir-") if err != nil { log.Fatal(err) } raftDir = &tempdir // TODO: cleanup tmp-outputstream and permanent-compaction* os.Exit(m.Run()) }
package factories import ( sdk "github.com/identityOrg/oidcsdk" "net/url" "time" ) type ( DefaultAuthenticationRequestContext struct { RequestID string RequestedAt time.Time State string RedirectURI string ClientId string Nonce string ResponseMode string RequestedScopes sdk.Arguments RequestedAudience sdk.Arguments Claims map[string]interface{} Client sdk.IClient Profile sdk.RequestProfile IssuedTokens sdk.Tokens Error sdk.IError Form *url.Values ResponseType sdk.Arguments UserSession sdk.ISession } ) func (d *DefaultAuthenticationRequestContext) GetUserSession() sdk.ISession { return d.UserSession } func (d *DefaultAuthenticationRequestContext) SetUserSession(sess sdk.ISession) { d.UserSession = sess } func (d *DefaultAuthenticationRequestContext) GetError() sdk.IError { return d.Error } func (d *DefaultAuthenticationRequestContext) SetError(err sdk.IError) { d.Error = err } func (d *DefaultAuthenticationRequestContext) GetRequestID() string { return d.RequestID } func (d *DefaultAuthenticationRequestContext) GetRequestedAt() time.Time { return d.RequestedAt } func (d *DefaultAuthenticationRequestContext) GetState() string { return d.State } func (d *DefaultAuthenticationRequestContext) GetRedirectURI() string { return d.RedirectURI } func (d *DefaultAuthenticationRequestContext) GetClientID() string { return d.ClientId } func (d *DefaultAuthenticationRequestContext) GetRequestedScopes() sdk.Arguments { return d.RequestedScopes } func (d *DefaultAuthenticationRequestContext) GetRequestedAudience() sdk.Arguments { return d.RequestedAudience } func (d *DefaultAuthenticationRequestContext) GetClaims() map[string]interface{} { return d.Claims } func (d *DefaultAuthenticationRequestContext) GetClient() sdk.IClient { return d.Client } func (d *DefaultAuthenticationRequestContext) SetClient(client sdk.IClient) { d.Client = client } func (d *DefaultAuthenticationRequestContext) GetProfile() sdk.RequestProfile { return d.Profile } func (d *DefaultAuthenticationRequestContext) SetProfile(profile sdk.RequestProfile) { d.Profile = profile } func (d *DefaultAuthenticationRequestContext) GetIssuedTokens() sdk.Tokens { return d.IssuedTokens } func (d *DefaultAuthenticationRequestContext) IssueAccessToken(token string, signature string, expiry time.Time) { d.IssuedTokens.AccessToken = token d.IssuedTokens.AccessTokenSignature = signature d.IssuedTokens.AccessTokenExpiry = expiry } func (d *DefaultAuthenticationRequestContext) IssueAuthorizationCode(code string, signature string, expiry time.Time) { d.IssuedTokens.AuthorizationCode = code d.IssuedTokens.AuthorizationCodeSignature = signature d.IssuedTokens.AuthorizationCodeExpiry = expiry } func (d *DefaultAuthenticationRequestContext) IssueRefreshToken(token string, signature string, expiry time.Time) { d.IssuedTokens.RefreshToken = token d.IssuedTokens.RefreshTokenSignature = signature d.IssuedTokens.RefreshTokenExpiry = expiry } func (d *DefaultAuthenticationRequestContext) IssueIDToken(token string) { d.IssuedTokens.IDToken = token } func (d *DefaultAuthenticationRequestContext) GetForm() *url.Values { return d.Form } func (d *DefaultAuthenticationRequestContext) GetNonce() string { return d.Nonce } func (d *DefaultAuthenticationRequestContext) GetResponseMode() string { if d.ResponseMode != "" { return d.ResponseMode } else { if d.ResponseType.HasOneOf(sdk.ResponseTypeToken, sdk.ResponseTypeIdToken) { return sdk.ResponseModeFragment } else { return sdk.ResponseModeQuery } } } func (d *DefaultAuthenticationRequestContext) GetResponseType() sdk.Arguments { return d.ResponseType } func (d *DefaultAuthenticationRequestContext) SetRedirectURI(uri string) { d.RedirectURI = uri }
package pstoremem import pstore "gx/ipfs/QmQFFp4ntkd4C14sP3FaH9WJyBuetuGUVo6dShNHvnoEvC/go-libp2p-peerstore" // NewPeerstore creates an in-memory threadsafe collection of peers. func NewPeerstore() pstore.Peerstore { return pstore.NewPeerstore( NewKeyBook(), NewAddrBook(), NewPeerMetadata()) }
/* * EVE Swagger Interface * * An OpenAPI for EVE Online * * OpenAPI spec version: 0.4.1.dev1 * * Generated by: https://github.com/swagger-api/swagger-codegen.git */ package swagger // 200 ok object type GetUniverseBloodlines200Ok struct { // bloodline_id integer BloodlineId int32 `json:"bloodline_id,omitempty"` // charisma integer Charisma int32 `json:"charisma,omitempty"` // corporation_id integer CorporationId int32 `json:"corporation_id,omitempty"` // description string Description string `json:"description,omitempty"` // intelligence integer Intelligence int32 `json:"intelligence,omitempty"` // memory integer Memory int32 `json:"memory,omitempty"` // name string Name string `json:"name,omitempty"` // perception integer Perception int32 `json:"perception,omitempty"` // race_id integer RaceId int32 `json:"race_id,omitempty"` // ship_type_id integer ShipTypeId int32 `json:"ship_type_id,omitempty"` // willpower integer Willpower int32 `json:"willpower,omitempty"` }
package hive import ( "github.com/google/uuid" "net" ) type AConnection interface { Start() RemoteAddr() net.Addr Send([]byte) Close() } type AUserHandler interface { ConnectionAdd(uint32, AConnection) ConnectionRemove(uint32, AConnection) ConnectionMessage(uint32, []byte) } type AUserStat interface { Connected() ConnectionAdded() ConnectionRemoved() Disconnected() Received() Transmitted() } type AAppHandler interface { ConnectionAdd(uuid.UUID, AConnection) ConnectionRemove(uuid.UUID, AConnection) ConnectionMessage(uuid.UUID, []byte) } type AAppStat interface { Connected() Disconnected() Reconnected() Received() Transmitted() }
package logbridge import ( "bufio" "bytes" "io" "time" "github.com/rcrowley/go-metrics" "github.com/square/p2/pkg/logging" "golang.org/x/time/rate" ) type LogBridge struct { Reader io.Reader DurableWriter io.Writer LossyWriter io.Writer // We rate limit writes to LossyWriter because we can tolerate loss. // writes to DurableWriter are not rate limited. logLineRateLimit *rate.Limiter logByteRateLimit *rate.Limiter logger logging.Logger metrics MetricsRegistry logLinesCount metrics.Counter logBytes metrics.Counter droppedLineCount metrics.Counter throttledMs metrics.Counter } type MetricsRegistry interface { Register(metricName string, metric interface{}) error } func NewLogBridge(r io.Reader, durableWriter io.Writer, lossyWriter io.Writer, logger logging.Logger, writePerSec int, bytesPerSec int, metricsRegistry MetricsRegistry, loglineMetricKeyName string, logByteMetricKeyName string, droppedLineMetricKeyName string, throttledMsKeyName string) *LogBridge { if metricsRegistry == nil { metricsRegistry = metrics.NewRegistry() } lineCount := metrics.NewCounter() logBytes := metrics.NewCounter() throttledMs := metrics.NewCounter() droppedLineCount := metrics.NewCounter() _ = metricsRegistry.Register(loglineMetricKeyName, lineCount) _ = metricsRegistry.Register(logByteMetricKeyName, logBytes) _ = metricsRegistry.Register(droppedLineMetricKeyName, droppedLineCount) _ = metricsRegistry.Register(throttledMsKeyName, throttledMs) return &LogBridge{ Reader: r, DurableWriter: durableWriter, LossyWriter: lossyWriter, logLineRateLimit: rate.NewLimiter(rate.Limit(writePerSec), 5*writePerSec), logByteRateLimit: rate.NewLimiter(rate.Limit(bytesPerSec), 5*bytesPerSec), logger: logger, metrics: metricsRegistry, logLinesCount: lineCount, logBytes: logBytes, throttledMs: throttledMs, droppedLineCount: droppedLineCount, } } // LossyCopy implements a buffered copy operation between dest and src. // It returns the number of dropped messages as a result of insufficient // capacity func (lb *LogBridge) LossyCopy(r io.Reader, capacity int) { lines := make(chan []byte, capacity) go lb.lossyCopy(r, lines) var ( n int err error lineReservation *rate.Reservation bytesReservation *rate.Reservation lineDelay time.Duration byteDelay time.Duration ) for line := range lines { lineReservation = lb.logLineRateLimit.Reserve() bytesReservation = lb.logByteRateLimit.ReserveN(time.Now(), len(line)) lineDelay = lineReservation.Delay() byteDelay = bytesReservation.Delay() if lineDelay == byteDelay { lb.throttledMs.Inc(lineDelay.Nanoseconds() / int64(time.Millisecond)) time.Sleep(lineDelay) } else if lineDelay > byteDelay { lb.throttledMs.Inc(lineDelay.Nanoseconds() / int64(time.Millisecond)) time.Sleep(lineDelay) } else { lb.throttledMs.Inc(byteDelay.Nanoseconds() / int64(time.Millisecond)) time.Sleep(byteDelay) } n, err = writeWithRetry(lb.LossyWriter, line, lb.logger) if err != nil { lb.logger.WithError(err).WithField("dropped line", line).WithField("retried", isRetriable(err)).WithField("bytes written", n).Errorln("Encountered a non-recoverable error. Proceeding.") continue } lb.logLinesCount.Inc(1) lb.logBytes.Inc(int64(n)) } } // This function will scan lines from src and send them on the lines channel, // except when the channel is full in which case it will skip the line func (lb *LogBridge) lossyCopy(r io.Reader, lines chan []byte) { defer close(lines) droppedLines := 0 scanner := bufio.NewScanner(r) scanner.Split(scanFullLines) var buf []byte for scanner.Scan() { rawLine := scanner.Bytes() // consume a line regardless of the state of the writer // The token slices returned by the Scanner are potentially backed by the same // array, whose contents changes over time as new input is read. Since the lines // will be handled asynchronously, we have to make a copy now. Copy into a large // buffer to prevent too much churn on the garbage collector. if len(rawLine) > len(buf) || len(buf) == 0 { buf = make([]byte, bufio.MaxScanTokenSize) } n := copy(buf, rawLine) line := buf[:n] buf = buf[n:] select { case lines <- line: default: droppedLines++ lb.droppedLineCount.Inc(1) warningMessage := "Line was dropped due to full capacity. If this occurs frequently, consider increasing the capacity of this logbridge." lb.logger.WithField("dropped line", line).Errorln(warningMessage) if droppedLines%10 == 0 { select { case lines <- []byte(warningMessage): case <-time.After(100 * time.Millisecond): // best effort warning of dropped messages. If this doesn't succeed expediently, forget it and get back to work } } } } if err := scanner.Err(); err != nil { lb.logger.WithError(err).Errorln("Encountered error while reading from src. Proceeding.") } } // scanFullLines is a SplitFunc for a bufio.Scanner that splits at each newline and, // unlike the the default splitter, returns the entire line with a trailing newline. This // method is derived from bufio.ScanLines. func scanFullLines(data []byte, atEOF bool) (advance int, token []byte, err error) { if atEOF && len(data) == 0 { return 0, nil, nil } if i := bytes.IndexByte(data, '\n'); i >= 0 { return i + 1, data[0 : i+1], nil } if atEOF { return len(data), data, nil } return 0, nil, nil } // Tee will copy to durableWriter without dropping messages. Lines written to // lossyWriter will be copied best effort with respect to latency on the // writer. Writes to lossyWriter are buffered through a go channel. func (lb *LogBridge) Tee() { tr := io.TeeReader(lb.Reader, lb.DurableWriter) lb.LossyCopy(tr, 1<<10) } // This is an error wrapper type that may be used to denote an error is retriable // RetriableError is exported so clients of this package can express their // error semantics to this package type retriableError struct { err error } func NewRetriableError(err error) retriableError { return retriableError{err} } func (r retriableError) Error() string { return r.err.Error() } func isRetriable(err error) bool { _, ok := err.(retriableError) return ok } var backoff = func(i int) time.Duration { return time.Duration(1 << uint(i) * time.Second) } func writeWithRetry(w io.Writer, line []byte, logger logging.Logger) (int, error) { var err error var n int totalAttempts := 5 for attempt := 1; attempt <= totalAttempts; attempt++ { n, err = w.Write(line) if err == nil || !isRetriable(err) { return n, err } logger.WithError(err).Errorf("Retriable error, retry %d of %d", attempt, totalAttempts) time.Sleep(backoff(attempt)) } return n, err }
package main import ( "flag" "github.com/gin-gonic/gin" "github.com/jalgoarena/skeleton-code-java/api" "log" "net/http" ) func setupRouter() *gin.Engine { router := gin.Default() router.GET("health", api.HealthCheck) v1 := router.Group("api/v1") { v1.GET("/code/java/:problemId", api.GetSkeletonCode) } return router } var ( problemsURL string port string ) func init() { flag.StringVar(&problemsURL, "problems-url", "http://localhost:8080", "Problems store url") flag.StringVar(&port, "port", "8081", "Port to listen on") flag.Parse() log.SetFlags(log.LstdFlags) } func main() { api.SetupProblems(problemsURL, &http.Client{}) router := setupRouter() router.Run(":" + port) }
package monosize import "fmt" // GetFixedSize reads files size as float64 for supporting huge file sizes // (like ZettaByte and YottaByte) and returns user friendly file size in 6 // characters long with leading spaces (if required) using Base 2 calculation // and file size abbreviation from Bytes to YottaBytes as string. // // Output will be always 6+1+2 = 9 characters long until YottaByte limit is exceeded. // 6 characters for file size, 1 space character and 2 characters for abbreviations. func GetFixedSize(fileSize float64) string { abbreviations := []string{"B.", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} selectedAbr := 0 if fileSize < 0 { return fmt.Sprintf("%6.2f %v", float64(0), abbreviations[0]) } for i := range abbreviations { if fileSize > 999 && selectedAbr < len(abbreviations)-1 { fileSize /= 1024 selectedAbr++ } else { selectedAbr = i break } } return fmt.Sprintf("%6.2f %v", fileSize, abbreviations[selectedAbr]) }
/* * Tencent is pleased to support the open source community by making Blueking Container Service available., * Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. * Licensed under the MIT License (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * http://opensource.org/licenses/MIT * Unless required by applicable law or agreed to in writing, software distributed under, * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific language governing permissions and * limitations under the License. */ package constant const ( // BCSNetPoolInitializingStatus for BCSNetPool Initializing status BCSNetPoolInitializingStatus = "Initializing" // BCSNetPoolNormalStatus for BCSNetPool Normal status BCSNetPoolNormalStatus = "Normal" // BCSNetIPActiveStatus for BCSNetIP Active status BCSNetIPActiveStatus = "Active" // BCSNetIPAvailableStatus for BCSNetIP Available status BCSNetIPAvailableStatus = "Available" // BCSNetIPReservedStatus for BCSNetIP Reserved status BCSNetIPReservedStatus = "Reserved" // BCSNetIPClaimBoundedStatus for BCSNetIPClaim Bound status BCSNetIPClaimBoundedStatus = "Bound" // BCSNetIPClaimPendingStatus for BCSNetIPClaim Pending status BCSNetIPClaimPendingStatus = "Pending" // BCSNetIPClaimExpiredStatus for BCSNetIPClaim Expired status BCSNetIPClaimExpiredStatus = "Expired" // PodLabelKeyForPool pod label key for pool PodLabelKeyForPool = "pool" // PodAnnotationKeyForIPClaim pod annotation key for IP claim PodAnnotationKeyForIPClaim = "netservicecontroller.bkbcs.tencent.com/ipclaim" // FixIPLabel label key for fix ip FixIPLabel = "fixed-ip" // FinalizerNameBcsNetserviceController finalizer name of bcs netservice controller FinalizerNameBcsNetserviceController = "netservicecontroller.bkbcs.tencent.com" )
// Copyright 2016 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package expression import ( goJSON "encoding/json" "fmt" "strconv" "strings" "sync/atomic" "github.com/gogo/protobuf/proto" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/charset" "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tidb/parser/opcode" "github.com/pingcap/tidb/parser/terror" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/chunk" "github.com/pingcap/tidb/util/generatedexpr" "github.com/pingcap/tidb/util/logutil" "github.com/pingcap/tidb/util/size" "github.com/pingcap/tidb/util/zeropool" "github.com/pingcap/tipb/go-tipb" "go.uber.org/zap" ) // These are byte flags used for `HashCode()`. const ( constantFlag byte = 0 columnFlag byte = 1 scalarFunctionFlag byte = 3 parameterFlag byte = 4 ScalarSubQFlag byte = 5 ) // EvalAstExpr evaluates ast expression directly. // Note: initialized in planner/core // import expression and planner/core together to use EvalAstExpr var EvalAstExpr func(sctx sessionctx.Context, expr ast.ExprNode) (types.Datum, error) // RewriteAstExpr rewrites ast expression directly. // Note: initialized in planner/core // import expression and planner/core together to use EvalAstExpr var RewriteAstExpr func(sctx sessionctx.Context, expr ast.ExprNode, schema *Schema, names types.NameSlice, allowCastArray bool) (Expression, error) // VecExpr contains all vectorized evaluation methods. type VecExpr interface { // Vectorized returns if this expression supports vectorized evaluation. Vectorized() bool // VecEvalInt evaluates this expression in a vectorized manner. VecEvalInt(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error // VecEvalReal evaluates this expression in a vectorized manner. VecEvalReal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error // VecEvalString evaluates this expression in a vectorized manner. VecEvalString(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error // VecEvalDecimal evaluates this expression in a vectorized manner. VecEvalDecimal(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error // VecEvalTime evaluates this expression in a vectorized manner. VecEvalTime(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error // VecEvalDuration evaluates this expression in a vectorized manner. VecEvalDuration(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error // VecEvalJSON evaluates this expression in a vectorized manner. VecEvalJSON(ctx sessionctx.Context, input *chunk.Chunk, result *chunk.Column) error } // ReverseExpr contains all resersed evaluation methods. type ReverseExpr interface { // SupportReverseEval checks whether the builtinFunc support reverse evaluation. SupportReverseEval() bool // ReverseEval evaluates the only one column value with given function result. ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) } // TraverseAction define the interface for action when traversing down an expression. type TraverseAction interface { Transform(Expression) Expression } // Expression represents all scalar expression in SQL. type Expression interface { fmt.Stringer goJSON.Marshaler VecExpr ReverseExpr CollationInfo Traverse(TraverseAction) Expression // Eval evaluates an expression through a row. Eval(row chunk.Row) (types.Datum, error) // EvalInt returns the int64 representation of expression. EvalInt(ctx sessionctx.Context, row chunk.Row) (val int64, isNull bool, err error) // EvalReal returns the float64 representation of expression. EvalReal(ctx sessionctx.Context, row chunk.Row) (val float64, isNull bool, err error) // EvalString returns the string representation of expression. EvalString(ctx sessionctx.Context, row chunk.Row) (val string, isNull bool, err error) // EvalDecimal returns the decimal representation of expression. EvalDecimal(ctx sessionctx.Context, row chunk.Row) (val *types.MyDecimal, isNull bool, err error) // EvalTime returns the DATE/DATETIME/TIMESTAMP representation of expression. EvalTime(ctx sessionctx.Context, row chunk.Row) (val types.Time, isNull bool, err error) // EvalDuration returns the duration representation of expression. EvalDuration(ctx sessionctx.Context, row chunk.Row) (val types.Duration, isNull bool, err error) // EvalJSON returns the JSON representation of expression. EvalJSON(ctx sessionctx.Context, row chunk.Row) (val types.BinaryJSON, isNull bool, err error) // GetType gets the type that the expression returns. GetType() *types.FieldType // Clone copies an expression totally. Clone() Expression // Equal checks whether two expressions are equal. Equal(ctx sessionctx.Context, e Expression) bool // IsCorrelated checks if this expression has correlated key. IsCorrelated() bool // ConstItem checks if this expression is constant item, regardless of query evaluation state. // An expression is constant item if it: // refers no tables. // refers no correlated column. // refers no subqueries that refers any tables. // refers no non-deterministic functions. // refers no statement parameters. // refers no param markers when prepare plan cache is enabled. ConstItem(sc *stmtctx.StatementContext) bool // Decorrelate try to decorrelate the expression by schema. Decorrelate(schema *Schema) Expression // ResolveIndices resolves indices by the given schema. It will copy the original expression and return the copied one. ResolveIndices(schema *Schema) (Expression, error) // resolveIndices is called inside the `ResolveIndices` It will perform on the expression itself. resolveIndices(schema *Schema) error // ResolveIndicesByVirtualExpr resolves indices by the given schema in terms of virual expression. It will copy the original expression and return the copied one. ResolveIndicesByVirtualExpr(schema *Schema) (Expression, bool) // resolveIndicesByVirtualExpr is called inside the `ResolveIndicesByVirtualExpr` It will perform on the expression itself. resolveIndicesByVirtualExpr(schema *Schema) bool // RemapColumn remaps columns with provided mapping and returns new expression RemapColumn(map[int64]*Column) (Expression, error) // ExplainInfo returns operator information to be explained. ExplainInfo() string // ExplainNormalizedInfo returns operator normalized information for generating digest. ExplainNormalizedInfo() string // HashCode creates the hashcode for expression which can be used to identify itself from other expression. // It generated as the following: // Constant: ConstantFlag+encoded value // Column: ColumnFlag+encoded value // ScalarFunction: SFFlag+encoded function name + encoded arg_1 + encoded arg_2 + ... HashCode(sc *stmtctx.StatementContext) []byte // MemoryUsage return the memory usage of Expression MemoryUsage() int64 } // CNFExprs stands for a CNF expression. type CNFExprs []Expression // Clone clones itself. func (e CNFExprs) Clone() CNFExprs { cnf := make(CNFExprs, 0, len(e)) for _, expr := range e { cnf = append(cnf, expr.Clone()) } return cnf } // Shallow makes a shallow copy of itself. func (e CNFExprs) Shallow() CNFExprs { cnf := make(CNFExprs, 0, len(e)) cnf = append(cnf, e...) return cnf } func isColumnInOperand(c *Column) bool { return c.InOperand } // IsEQCondFromIn checks if an expression is equal condition converted from `[not] in (subq)`. func IsEQCondFromIn(expr Expression) bool { sf, ok := expr.(*ScalarFunction) if !ok || sf.FuncName.L != ast.EQ { return false } cols := make([]*Column, 0, 1) cols = ExtractColumnsFromExpressions(cols, sf.GetArgs(), isColumnInOperand) return len(cols) > 0 } // ExprNotNull checks if an expression is possible to be null. func ExprNotNull(expr Expression) bool { if c, ok := expr.(*Constant); ok { return !c.Value.IsNull() } // For ScalarFunction, the result would not be correct until we support maintaining // NotNull flag for it. return mysql.HasNotNullFlag(expr.GetType().GetFlag()) } // HandleOverflowOnSelection handles Overflow errors when evaluating selection filters. // We should ignore overflow errors when evaluating selection conditions: // // INSERT INTO t VALUES ("999999999999999999"); // SELECT * FROM t WHERE v; func HandleOverflowOnSelection(sc *stmtctx.StatementContext, val int64, err error) (int64, error) { if sc.InSelectStmt && err != nil && types.ErrOverflow.Equal(err) { return -1, nil } return val, err } // EvalBool evaluates expression list to a boolean value. The first returned value // indicates bool result of the expression list, the second returned value indicates // whether the result of the expression list is null, it can only be true when the // first returned values is false. func EvalBool(ctx sessionctx.Context, exprList CNFExprs, row chunk.Row) (bool, bool, error) { hasNull := false for _, expr := range exprList { data, err := expr.Eval(row) if err != nil { return false, false, err } if data.IsNull() { // For queries like `select a in (select a from s where t.b = s.b) from t`, // if result of `t.a = s.a` is null, we cannot return immediately until // we have checked if `t.b = s.b` is null or false, because it means // subquery is empty, and we should return false as the result of the whole // exprList in that case, instead of null. if !IsEQCondFromIn(expr) { return false, false, nil } hasNull = true continue } i, err := data.ToBool(ctx.GetSessionVars().StmtCtx) if err != nil { i, err = HandleOverflowOnSelection(ctx.GetSessionVars().StmtCtx, i, err) if err != nil { return false, false, err } } if i == 0 { return false, false, nil } } if hasNull { return false, true, nil } return true, false, nil } var ( defaultChunkSize = 1024 selPool = zeropool.New[[]int](func() []int { return make([]int, defaultChunkSize) }) zeroPool = zeropool.New[[]int8](func() []int8 { return make([]int8, defaultChunkSize) }) ) func allocSelSlice(n int) []int { if n > defaultChunkSize { return make([]int, n) } return selPool.Get() } func deallocateSelSlice(sel []int) { if cap(sel) <= defaultChunkSize { selPool.Put(sel) } } func allocZeroSlice(n int) []int8 { if n > defaultChunkSize { return make([]int8, n) } return zeroPool.Get() } func deallocateZeroSlice(isZero []int8) { if cap(isZero) <= defaultChunkSize { zeroPool.Put(isZero) } } // VecEvalBool does the same thing as EvalBool but it works in a vectorized manner. func VecEvalBool(ctx sessionctx.Context, exprList CNFExprs, input *chunk.Chunk, selected, nulls []bool) ([]bool, []bool, error) { // If input.Sel() != nil, we will call input.SetSel(nil) to clear the sel slice in input chunk. // After the function finished, then we reset the input.Sel(). // The caller will handle the input.Sel() and selected slices. defer input.SetSel(input.Sel()) input.SetSel(nil) n := input.NumRows() selected = selected[:0] nulls = nulls[:0] for i := 0; i < n; i++ { selected = append(selected, false) nulls = append(nulls, false) } sel := allocSelSlice(n) defer deallocateSelSlice(sel) sel = sel[:0] for i := 0; i < n; i++ { sel = append(sel, i) } input.SetSel(sel) // In isZero slice, -1 means Null, 0 means zero, 1 means not zero isZero := allocZeroSlice(n) defer deallocateZeroSlice(isZero) for _, expr := range exprList { tp := expr.GetType() eType := tp.EvalType() if CanImplicitEvalReal(expr) { eType = types.ETReal } buf, err := globalColumnAllocator.get() if err != nil { return nil, nil, err } // Take the implicit evalReal path if possible. if CanImplicitEvalReal(expr) { if err := implicitEvalReal(ctx, expr, input, buf); err != nil { return nil, nil, err } } else if err := EvalExpr(ctx, expr, eType, input, buf); err != nil { return nil, nil, err } err = toBool(ctx.GetSessionVars().StmtCtx, tp, eType, buf, sel, isZero) if err != nil { return nil, nil, err } j := 0 isEQCondFromIn := IsEQCondFromIn(expr) for i := range sel { if isZero[i] == -1 { if eType != types.ETInt && !isEQCondFromIn { continue } // In this case, we set this row to null and let it pass this filter. // The null flag may be set to false later by other expressions in some cases. nulls[sel[i]] = true sel[j] = sel[i] j++ continue } if isZero[i] == 0 { continue } sel[j] = sel[i] // this row passes this filter j++ } sel = sel[:j] input.SetSel(sel) globalColumnAllocator.put(buf) } for _, i := range sel { if !nulls[i] { selected[i] = true } } return selected, nulls, nil } func toBool(sc *stmtctx.StatementContext, tp *types.FieldType, eType types.EvalType, buf *chunk.Column, sel []int, isZero []int8) error { switch eType { case types.ETInt: i64s := buf.Int64s() for i := range sel { if buf.IsNull(i) { isZero[i] = -1 } else { if i64s[i] == 0 { isZero[i] = 0 } else { isZero[i] = 1 } } } case types.ETReal: f64s := buf.Float64s() for i := range sel { if buf.IsNull(i) { isZero[i] = -1 } else { if f64s[i] == 0 { isZero[i] = 0 } else { isZero[i] = 1 } } } case types.ETDuration: d64s := buf.GoDurations() for i := range sel { if buf.IsNull(i) { isZero[i] = -1 } else { if d64s[i] == 0 { isZero[i] = 0 } else { isZero[i] = 1 } } } case types.ETDatetime, types.ETTimestamp: t64s := buf.Times() for i := range sel { if buf.IsNull(i) { isZero[i] = -1 } else { if t64s[i].IsZero() { isZero[i] = 0 } else { isZero[i] = 1 } } } case types.ETString: for i := range sel { if buf.IsNull(i) { isZero[i] = -1 } else { var fVal float64 var err error sVal := buf.GetString(i) if tp.Hybrid() { switch tp.GetType() { case mysql.TypeSet, mysql.TypeEnum: fVal = float64(len(sVal)) if fVal == 0 { // The elements listed in the column specification are assigned index numbers, beginning // with 1. The index value of the empty string error value (distinguish from a "normal" // empty string) is 0. Thus we need to check whether it's an empty string error value when // `fVal==0`. for idx, elem := range tp.GetElems() { if elem == sVal { fVal = float64(idx + 1) break } } } case mysql.TypeBit: var bl types.BinaryLiteral = buf.GetBytes(i) iVal, err := bl.ToInt(sc) if err != nil { return err } fVal = float64(iVal) } } else { fVal, err = types.StrToFloat(sc, sVal, false) if err != nil { return err } } if fVal == 0 { isZero[i] = 0 } else { isZero[i] = 1 } } } case types.ETDecimal: d64s := buf.Decimals() for i := range sel { if buf.IsNull(i) { isZero[i] = -1 } else { if d64s[i].IsZero() { isZero[i] = 0 } else { isZero[i] = 1 } } } case types.ETJson: for i := range sel { if buf.IsNull(i) { isZero[i] = -1 } else { if buf.GetJSON(i).IsZero() { isZero[i] = 0 } else { isZero[i] = 1 } } } } return nil } func implicitEvalReal(ctx sessionctx.Context, expr Expression, input *chunk.Chunk, result *chunk.Column) (err error) { if expr.Vectorized() && ctx.GetSessionVars().EnableVectorizedExpression { err = expr.VecEvalReal(ctx, input, result) } else { ind, n := 0, input.NumRows() iter := chunk.NewIterator4Chunk(input) result.ResizeFloat64(n, false) f64s := result.Float64s() for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalReal(ctx, it) if err != nil { return err } if isNull { result.SetNull(ind, isNull) } else { f64s[ind] = value } ind++ } } return } // EvalExpr evaluates this expr according to its type. // And it selects the method for evaluating expression based on // the environment variables and whether the expression can be vectorized. // Note: the input argument `evalType` is needed because of that when `expr` is // of the hybrid type(ENUM/SET/BIT), we need the invoker decide the actual EvalType. func EvalExpr(ctx sessionctx.Context, expr Expression, evalType types.EvalType, input *chunk.Chunk, result *chunk.Column) (err error) { if expr.Vectorized() && ctx.GetSessionVars().EnableVectorizedExpression { switch evalType { case types.ETInt: err = expr.VecEvalInt(ctx, input, result) case types.ETReal: err = expr.VecEvalReal(ctx, input, result) case types.ETDuration: err = expr.VecEvalDuration(ctx, input, result) case types.ETDatetime, types.ETTimestamp: err = expr.VecEvalTime(ctx, input, result) case types.ETString: err = expr.VecEvalString(ctx, input, result) case types.ETJson: err = expr.VecEvalJSON(ctx, input, result) case types.ETDecimal: err = expr.VecEvalDecimal(ctx, input, result) default: err = fmt.Errorf("invalid eval type %v", expr.GetType().EvalType()) } } else { ind, n := 0, input.NumRows() iter := chunk.NewIterator4Chunk(input) switch evalType { case types.ETInt: result.ResizeInt64(n, false) i64s := result.Int64s() for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalInt(ctx, it) if err != nil { return err } if isNull { result.SetNull(ind, isNull) } else { i64s[ind] = value } ind++ } case types.ETReal: result.ResizeFloat64(n, false) f64s := result.Float64s() for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalReal(ctx, it) if err != nil { return err } if isNull { result.SetNull(ind, isNull) } else { f64s[ind] = value } ind++ } case types.ETDuration: result.ResizeGoDuration(n, false) d64s := result.GoDurations() for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalDuration(ctx, it) if err != nil { return err } if isNull { result.SetNull(ind, isNull) } else { d64s[ind] = value.Duration } ind++ } case types.ETDatetime, types.ETTimestamp: result.ResizeTime(n, false) t64s := result.Times() for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalTime(ctx, it) if err != nil { return err } if isNull { result.SetNull(ind, isNull) } else { t64s[ind] = value } ind++ } case types.ETString: result.ReserveString(n) for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalString(ctx, it) if err != nil { return err } if isNull { result.AppendNull() } else { result.AppendString(value) } } case types.ETJson: result.ReserveJSON(n) for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalJSON(ctx, it) if err != nil { return err } if isNull { result.AppendNull() } else { result.AppendJSON(value) } } case types.ETDecimal: result.ResizeDecimal(n, false) d64s := result.Decimals() for it := iter.Begin(); it != iter.End(); it = iter.Next() { value, isNull, err := expr.EvalDecimal(ctx, it) if err != nil { return err } if isNull { result.SetNull(ind, isNull) } else { d64s[ind] = *value } ind++ } default: err = fmt.Errorf("invalid eval type %v", expr.GetType().EvalType()) } } return } // composeConditionWithBinaryOp composes condition with binary operator into a balance deep tree, which benefits a lot for pb decoder/encoder. func composeConditionWithBinaryOp(ctx sessionctx.Context, conditions []Expression, funcName string) Expression { length := len(conditions) if length == 0 { return nil } if length == 1 { return conditions[0] } expr := NewFunctionInternal(ctx, funcName, types.NewFieldType(mysql.TypeTiny), composeConditionWithBinaryOp(ctx, conditions[:length/2], funcName), composeConditionWithBinaryOp(ctx, conditions[length/2:], funcName)) return expr } // ComposeCNFCondition composes CNF items into a balance deep CNF tree, which benefits a lot for pb decoder/encoder. func ComposeCNFCondition(ctx sessionctx.Context, conditions ...Expression) Expression { return composeConditionWithBinaryOp(ctx, conditions, ast.LogicAnd) } // ComposeDNFCondition composes DNF items into a balance deep DNF tree. func ComposeDNFCondition(ctx sessionctx.Context, conditions ...Expression) Expression { return composeConditionWithBinaryOp(ctx, conditions, ast.LogicOr) } func extractBinaryOpItems(conditions *ScalarFunction, funcName string) []Expression { var ret []Expression for _, arg := range conditions.GetArgs() { if sf, ok := arg.(*ScalarFunction); ok && sf.FuncName.L == funcName { ret = append(ret, extractBinaryOpItems(sf, funcName)...) } else { ret = append(ret, arg) } } return ret } // FlattenDNFConditions extracts DNF expression's leaf item. // e.g. or(or(a=1, a=2), or(a=3, a=4)), we'll get [a=1, a=2, a=3, a=4]. func FlattenDNFConditions(DNFCondition *ScalarFunction) []Expression { return extractBinaryOpItems(DNFCondition, ast.LogicOr) } // FlattenCNFConditions extracts CNF expression's leaf item. // e.g. and(and(a>1, a>2), and(a>3, a>4)), we'll get [a>1, a>2, a>3, a>4]. func FlattenCNFConditions(CNFCondition *ScalarFunction) []Expression { return extractBinaryOpItems(CNFCondition, ast.LogicAnd) } // Assignment represents a set assignment in Update, such as // Update t set c1 = hex(12), c2 = c3 where c2 = 1 type Assignment struct { Col *Column // ColName indicates its original column name in table schema. It's used for outputting helping message when executing meets some errors. ColName model.CIStr Expr Expression // LazyErr is used in statement like `INSERT INTO t1 (a) VALUES (1) ON DUPLICATE KEY UPDATE a= (SELECT b FROM source);`, ErrSubqueryMoreThan1Row // should be evaluated after the duplicate situation is detected in the executing procedure. LazyErr error } // MemoryUsage return the memory usage of Assignment func (a *Assignment) MemoryUsage() (sum int64) { if a == nil { return } sum = size.SizeOfPointer + a.ColName.MemoryUsage() + size.SizeOfInterface*2 if a.Expr != nil { sum += a.Expr.MemoryUsage() } return } // VarAssignment represents a variable assignment in Set, such as set global a = 1. type VarAssignment struct { Name string Expr Expression IsDefault bool IsGlobal bool IsSystem bool ExtendValue *Constant } // splitNormalFormItems split CNF(conjunctive normal form) like "a and b and c", or DNF(disjunctive normal form) like "a or b or c" func splitNormalFormItems(onExpr Expression, funcName string) []Expression { //nolint: revive switch v := onExpr.(type) { case *ScalarFunction: if v.FuncName.L == funcName { var ret []Expression for _, arg := range v.GetArgs() { ret = append(ret, splitNormalFormItems(arg, funcName)...) } return ret } } return []Expression{onExpr} } // SplitCNFItems splits CNF items. // CNF means conjunctive normal form, e.g. "a and b and c". func SplitCNFItems(onExpr Expression) []Expression { return splitNormalFormItems(onExpr, ast.LogicAnd) } // SplitDNFItems splits DNF items. // DNF means disjunctive normal form, e.g. "a or b or c". func SplitDNFItems(onExpr Expression) []Expression { return splitNormalFormItems(onExpr, ast.LogicOr) } // EvaluateExprWithNull sets columns in schema as null and calculate the final result of the scalar function. // If the Expression is a non-constant value, it means the result is unknown. func EvaluateExprWithNull(ctx sessionctx.Context, schema *Schema, expr Expression) Expression { if MaybeOverOptimized4PlanCache(ctx, []Expression{expr}) { ctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.New("%v affects null check")) } if ctx.GetSessionVars().StmtCtx.InNullRejectCheck { expr, _ = evaluateExprWithNullInNullRejectCheck(ctx, schema, expr) return expr } return evaluateExprWithNull(ctx, schema, expr) } func evaluateExprWithNull(ctx sessionctx.Context, schema *Schema, expr Expression) Expression { switch x := expr.(type) { case *ScalarFunction: args := make([]Expression, len(x.GetArgs())) for i, arg := range x.GetArgs() { args[i] = evaluateExprWithNull(ctx, schema, arg) } return NewFunctionInternal(ctx, x.FuncName.L, x.RetType.Clone(), args...) case *Column: if !schema.Contains(x) { return x } return &Constant{Value: types.Datum{}, RetType: types.NewFieldType(mysql.TypeNull)} case *Constant: if x.DeferredExpr != nil { return FoldConstant(x) } } return expr } // evaluateExprWithNullInNullRejectCheck sets columns in schema as null and calculate the final result of the scalar function. // If the Expression is a non-constant value, it means the result is unknown. // The returned bool values indicates whether the value is influenced by the Null Constant transformed from schema column // when the value is Null Constant. func evaluateExprWithNullInNullRejectCheck(ctx sessionctx.Context, schema *Schema, expr Expression) (Expression, bool) { switch x := expr.(type) { case *ScalarFunction: args := make([]Expression, len(x.GetArgs())) nullFromSets := make([]bool, len(x.GetArgs())) for i, arg := range x.GetArgs() { args[i], nullFromSets[i] = evaluateExprWithNullInNullRejectCheck(ctx, schema, arg) } allArgsNullFromSet := true for i := range args { if cons, ok := args[i].(*Constant); ok && cons.Value.IsNull() && !nullFromSets[i] { allArgsNullFromSet = false break } } // If one of the args of `AND` and `OR` are Null Constant from the column schema, and the another argument is Constant, then we should keep it. // Otherwise, we shouldn't let Null Constant which affected by the column schema participate in computing in `And` and `OR` // due to the result of `AND` and `OR` are uncertain if one of the arguments is NULL. if x.FuncName.L == ast.LogicAnd || x.FuncName.L == ast.LogicOr { hasNonConstantArg := false for _, arg := range args { if _, ok := arg.(*Constant); !ok { hasNonConstantArg = true break } } if hasNonConstantArg { for i := range args { if cons, ok := args[i].(*Constant); ok && cons.Value.IsNull() && nullFromSets[i] { if x.FuncName.L == ast.LogicAnd { args[i] = NewOne() break } if x.FuncName.L == ast.LogicOr { args[i] = NewZero() break } } } } } c := NewFunctionInternal(ctx, x.FuncName.L, x.RetType.Clone(), args...) cons, ok := c.(*Constant) // If the return expr is Null Constant, and all the Null Constant arguments are affected by column schema, // then we think the result Null Constant is also affected by the column schema return c, ok && cons.Value.IsNull() && allArgsNullFromSet case *Column: if !schema.Contains(x) { return x, false } return &Constant{Value: types.Datum{}, RetType: types.NewFieldType(mysql.TypeNull)}, true case *Constant: if x.DeferredExpr != nil { return FoldConstant(x), false } } return expr, false } // TableInfo2SchemaAndNames converts the TableInfo to the schema and name slice. func TableInfo2SchemaAndNames(ctx sessionctx.Context, dbName model.CIStr, tbl *model.TableInfo) (*Schema, []*types.FieldName, error) { cols, names, err := ColumnInfos2ColumnsAndNames(ctx, dbName, tbl.Name, tbl.Cols(), tbl) if err != nil { return nil, nil, err } keys := make([]KeyInfo, 0, len(tbl.Indices)+1) for _, idx := range tbl.Indices { if !idx.Unique || idx.State != model.StatePublic { continue } ok := true newKey := make([]*Column, 0, len(idx.Columns)) for _, idxCol := range idx.Columns { find := false for i, col := range tbl.Columns { if idxCol.Name.L == col.Name.L { if !mysql.HasNotNullFlag(col.GetFlag()) { break } newKey = append(newKey, cols[i]) find = true break } } if !find { ok = false break } } if ok { keys = append(keys, newKey) } } if tbl.PKIsHandle { for i, col := range tbl.Columns { if mysql.HasPriKeyFlag(col.GetFlag()) { keys = append(keys, KeyInfo{cols[i]}) break } } } schema := NewSchema(cols...) schema.SetUniqueKeys(keys) return schema, names, nil } // ColumnInfos2ColumnsAndNames converts the ColumnInfo to the *Column and NameSlice. func ColumnInfos2ColumnsAndNames(ctx sessionctx.Context, dbName, tblName model.CIStr, colInfos []*model.ColumnInfo, tblInfo *model.TableInfo) ([]*Column, types.NameSlice, error) { columns := make([]*Column, 0, len(colInfos)) names := make([]*types.FieldName, 0, len(colInfos)) for i, col := range colInfos { names = append(names, &types.FieldName{ OrigTblName: tblName, OrigColName: col.Name, DBName: dbName, TblName: tblName, ColName: col.Name, }) newCol := &Column{ RetType: col.FieldType.Clone(), ID: col.ID, UniqueID: ctx.GetSessionVars().AllocPlanColumnID(), Index: col.Offset, OrigName: names[i].String(), IsHidden: col.Hidden, } columns = append(columns, newCol) } // Resolve virtual generated column. mockSchema := NewSchema(columns...) // Ignore redundant warning here. save := ctx.GetSessionVars().StmtCtx.IgnoreTruncate.Load() defer func() { ctx.GetSessionVars().StmtCtx.IgnoreTruncate.Store(save) }() ctx.GetSessionVars().StmtCtx.IgnoreTruncate.Store(true) for i, col := range colInfos { if col.IsGenerated() && !col.GeneratedStored { expr, err := generatedexpr.ParseExpression(col.GeneratedExprString) if err != nil { return nil, nil, errors.Trace(err) } expr, err = generatedexpr.SimpleResolveName(expr, tblInfo) if err != nil { return nil, nil, errors.Trace(err) } e, err := RewriteAstExpr(ctx, expr, mockSchema, names, true) if err != nil { return nil, nil, errors.Trace(err) } if e != nil { columns[i].VirtualExpr = e.Clone() } columns[i].VirtualExpr, err = columns[i].VirtualExpr.ResolveIndices(mockSchema) if err != nil { return nil, nil, errors.Trace(err) } } } return columns, names, nil } // NewValuesFunc creates a new values function. func NewValuesFunc(ctx sessionctx.Context, offset int, retTp *types.FieldType) *ScalarFunction { fc := &valuesFunctionClass{baseFunctionClass{ast.Values, 0, 0}, offset, retTp} bt, err := fc.getFunction(ctx, nil) terror.Log(err) return &ScalarFunction{ FuncName: model.NewCIStr(ast.Values), RetType: retTp, Function: bt, } } // IsBinaryLiteral checks whether an expression is a binary literal func IsBinaryLiteral(expr Expression) bool { con, ok := expr.(*Constant) return ok && con.Value.Kind() == types.KindBinaryLiteral } // supported functions tracked by https://github.com/tikv/tikv/issues/5751 func scalarExprSupportedByTiKV(sf *ScalarFunction) bool { switch sf.FuncName.L { case // op functions. ast.LogicAnd, ast.LogicOr, ast.LogicXor, ast.UnaryNot, ast.And, ast.Or, ast.Xor, ast.BitNeg, ast.LeftShift, ast.RightShift, ast.UnaryMinus, // compare functions. ast.LT, ast.LE, ast.EQ, ast.NE, ast.GE, ast.GT, ast.NullEQ, ast.In, ast.IsNull, ast.Like, ast.IsTruthWithoutNull, ast.IsTruthWithNull, ast.IsFalsity, // ast.Greatest, ast.Least, ast.Interval // arithmetical functions. ast.PI, /* ast.Truncate */ ast.Plus, ast.Minus, ast.Mul, ast.Div, ast.Abs, ast.Mod, // math functions. ast.Ceil, ast.Ceiling, ast.Floor, ast.Sqrt, ast.Sign, ast.Ln, ast.Log, ast.Log2, ast.Log10, ast.Exp, ast.Pow, // Rust use the llvm math functions, which have different precision with Golang/MySQL(cmath) // open the following switchers if we implement them in coprocessor via `cmath` ast.Sin, ast.Asin, ast.Cos, ast.Acos /* ast.Tan */, ast.Atan, ast.Atan2, ast.Cot, ast.Radians, ast.Degrees, ast.Conv, ast.CRC32, // control flow functions. ast.Case, ast.If, ast.Ifnull, ast.Coalesce, // string functions. // ast.Bin, ast.Unhex, ast.Locate, ast.Ord, ast.Lpad, ast.Rpad, // ast.Trim, ast.FromBase64, ast.ToBase64, ast.Upper, ast.Lower, ast.InsertFunc, // ast.MakeSet, ast.SubstringIndex, ast.Instr, ast.Quote, ast.Oct, // ast.FindInSet, ast.Repeat, ast.Length, ast.BitLength, ast.Concat, ast.ConcatWS, ast.Replace, ast.ASCII, ast.Hex, ast.Reverse, ast.LTrim, ast.RTrim, ast.Strcmp, ast.Space, ast.Elt, ast.Field, InternalFuncFromBinary, InternalFuncToBinary, ast.Mid, ast.Substring, ast.Substr, ast.CharLength, ast.Right, /* ast.Left */ // json functions. ast.JSONType, ast.JSONExtract, ast.JSONObject, ast.JSONArray, ast.JSONMerge, ast.JSONSet, ast.JSONInsert /*ast.JSONReplace,*/, ast.JSONRemove, ast.JSONLength, ast.JSONUnquote, ast.JSONContains, ast.JSONValid, ast.JSONMemberOf, // date functions. ast.Date, ast.Week /* ast.YearWeek, ast.ToSeconds */, ast.DateDiff, /* ast.TimeDiff, ast.AddTime, ast.SubTime, */ ast.MonthName, ast.MakeDate, ast.TimeToSec, ast.MakeTime, ast.DateFormat, ast.Hour, ast.Minute, ast.Second, ast.MicroSecond, ast.Month, /* ast.DayName */ ast.DayOfMonth, ast.DayOfWeek, ast.DayOfYear, /* ast.Weekday */ ast.WeekOfYear, ast.Year, ast.FromDays, /* ast.ToDays */ ast.PeriodAdd, ast.PeriodDiff, /*ast.TimestampDiff, ast.DateAdd, ast.FromUnixTime,*/ /* ast.LastDay */ ast.Sysdate, // encryption functions. ast.MD5, ast.SHA1, ast.UncompressedLength, ast.Cast, // misc functions. // TODO(#26942): enable functions below after them are fully tested in TiKV. /*ast.InetNtoa, ast.InetAton, ast.Inet6Ntoa, ast.Inet6Aton, ast.IsIPv4, ast.IsIPv4Compat, ast.IsIPv4Mapped, ast.IsIPv6,*/ ast.UUID: return true case ast.Round: switch sf.Function.PbCode() { case tipb.ScalarFuncSig_RoundReal, tipb.ScalarFuncSig_RoundInt, tipb.ScalarFuncSig_RoundDec: // We don't push round with frac due to mysql's round with frac has its special behavior: // https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_round return true } case ast.Rand: switch sf.Function.PbCode() { case tipb.ScalarFuncSig_RandWithSeedFirstGen: return true } case ast.Regexp, ast.RegexpLike, ast.RegexpSubstr, ast.RegexpInStr, ast.RegexpReplace: funcCharset, funcCollation := sf.Function.CharsetAndCollation() if funcCharset == charset.CharsetBin && funcCollation == charset.CollationBin { return false } return true } return false } func canEnumPushdownPreliminarily(scalarFunc *ScalarFunction) bool { switch scalarFunc.FuncName.L { case ast.Cast: return scalarFunc.RetType.EvalType() == types.ETInt || scalarFunc.RetType.EvalType() == types.ETReal || scalarFunc.RetType.EvalType() == types.ETDecimal default: return false } } func scalarExprSupportedByFlash(function *ScalarFunction) bool { switch function.FuncName.L { case ast.Floor, ast.Ceil, ast.Ceiling: switch function.Function.PbCode() { case tipb.ScalarFuncSig_FloorIntToDec, tipb.ScalarFuncSig_CeilIntToDec: return false default: return true } case ast.LogicOr, ast.LogicAnd, ast.UnaryNot, ast.BitNeg, ast.Xor, ast.And, ast.Or, ast.RightShift, ast.LeftShift, ast.GE, ast.LE, ast.EQ, ast.NE, ast.LT, ast.GT, ast.In, ast.IsNull, ast.Like, ast.Ilike, ast.Strcmp, ast.Plus, ast.Minus, ast.Div, ast.Mul, ast.Abs, ast.Mod, ast.If, ast.Ifnull, ast.Case, ast.Concat, ast.ConcatWS, ast.Date, ast.Year, ast.Month, ast.Day, ast.Quarter, ast.DayName, ast.MonthName, ast.DateDiff, ast.TimestampDiff, ast.DateFormat, ast.FromUnixTime, ast.DayOfWeek, ast.DayOfMonth, ast.DayOfYear, ast.LastDay, ast.WeekOfYear, ast.ToSeconds, ast.FromDays, ast.ToDays, ast.Sqrt, ast.Log, ast.Log2, ast.Log10, ast.Ln, ast.Exp, ast.Pow, ast.Sign, ast.Radians, ast.Degrees, ast.Conv, ast.CRC32, ast.JSONLength, ast.JSONExtract, ast.JSONUnquote, ast.Repeat, ast.InetNtoa, ast.InetAton, ast.Inet6Ntoa, ast.Inet6Aton, ast.Coalesce, ast.ASCII, ast.Length, ast.Trim, ast.Position, ast.Format, ast.Elt, ast.LTrim, ast.RTrim, ast.Lpad, ast.Rpad, ast.Hour, ast.Minute, ast.Second, ast.MicroSecond, ast.TimeToSec: switch function.Function.PbCode() { case tipb.ScalarFuncSig_InDuration, tipb.ScalarFuncSig_CoalesceDuration, tipb.ScalarFuncSig_IfNullDuration, tipb.ScalarFuncSig_IfDuration, tipb.ScalarFuncSig_CaseWhenDuration: return false case tipb.ScalarFuncSig_JsonUnquoteSig: // TiFlash json_unquote now only supports json string generated by cast(json as string) if childFunc, ok := function.GetArgs()[0].(*ScalarFunction); ok { return childFunc.Function.PbCode() == tipb.ScalarFuncSig_CastJsonAsString } return false } return true case ast.Regexp, ast.RegexpLike, ast.RegexpInStr, ast.RegexpSubstr, ast.RegexpReplace: funcCharset, funcCollation := function.Function.CharsetAndCollation() if funcCharset == charset.CharsetBin && funcCollation == charset.CollationBin { return false } return true case ast.Substr, ast.Substring, ast.Left, ast.Right, ast.CharLength, ast.SubstringIndex, ast.Reverse: switch function.Function.PbCode() { case tipb.ScalarFuncSig_LeftUTF8, tipb.ScalarFuncSig_RightUTF8, tipb.ScalarFuncSig_CharLengthUTF8, tipb.ScalarFuncSig_Substring2ArgsUTF8, tipb.ScalarFuncSig_Substring3ArgsUTF8, tipb.ScalarFuncSig_SubstringIndex, tipb.ScalarFuncSig_ReverseUTF8, tipb.ScalarFuncSig_Reverse: return true } case ast.Cast: sourceType := function.GetArgs()[0].GetType() retType := function.RetType switch function.Function.PbCode() { case tipb.ScalarFuncSig_CastDecimalAsInt, tipb.ScalarFuncSig_CastIntAsInt, tipb.ScalarFuncSig_CastRealAsInt, tipb.ScalarFuncSig_CastTimeAsInt, tipb.ScalarFuncSig_CastStringAsInt /*, tipb.ScalarFuncSig_CastDurationAsInt, tipb.ScalarFuncSig_CastJsonAsInt*/ : // TiFlash cast only support cast to Int64 or the source type is the same as the target type return (sourceType.GetType() == retType.GetType() && mysql.HasUnsignedFlag(sourceType.GetFlag()) == mysql.HasUnsignedFlag(retType.GetFlag())) || retType.GetType() == mysql.TypeLonglong case tipb.ScalarFuncSig_CastIntAsReal, tipb.ScalarFuncSig_CastRealAsReal, tipb.ScalarFuncSig_CastStringAsReal, tipb.ScalarFuncSig_CastTimeAsReal: /*, tipb.ScalarFuncSig_CastDecimalAsReal, tipb.ScalarFuncSig_CastDurationAsReal, tipb.ScalarFuncSig_CastJsonAsReal*/ // TiFlash cast only support cast to Float64 or the source type is the same as the target type return sourceType.GetType() == retType.GetType() || retType.GetType() == mysql.TypeDouble case tipb.ScalarFuncSig_CastDecimalAsDecimal, tipb.ScalarFuncSig_CastIntAsDecimal, tipb.ScalarFuncSig_CastRealAsDecimal, tipb.ScalarFuncSig_CastTimeAsDecimal, tipb.ScalarFuncSig_CastStringAsDecimal /*, tipb.ScalarFuncSig_CastDurationAsDecimal, tipb.ScalarFuncSig_CastJsonAsDecimal*/ : return function.RetType.IsDecimalValid() case tipb.ScalarFuncSig_CastDecimalAsString, tipb.ScalarFuncSig_CastIntAsString, tipb.ScalarFuncSig_CastRealAsString, tipb.ScalarFuncSig_CastTimeAsString, tipb.ScalarFuncSig_CastStringAsString, tipb.ScalarFuncSig_CastJsonAsString /*, tipb.ScalarFuncSig_CastDurationAsString*/ : return true case tipb.ScalarFuncSig_CastDecimalAsTime, tipb.ScalarFuncSig_CastIntAsTime, tipb.ScalarFuncSig_CastRealAsTime, tipb.ScalarFuncSig_CastTimeAsTime, tipb.ScalarFuncSig_CastStringAsTime /*, tipb.ScalarFuncSig_CastDurationAsTime, tipb.ScalarFuncSig_CastJsonAsTime*/ : // ban the function of casting year type as time type pushing down to tiflash because of https://github.com/pingcap/tidb/issues/26215 return function.GetArgs()[0].GetType().GetType() != mysql.TypeYear case tipb.ScalarFuncSig_CastTimeAsDuration: return retType.GetType() == mysql.TypeDuration } case ast.DateAdd, ast.AddDate: switch function.Function.PbCode() { case tipb.ScalarFuncSig_AddDateDatetimeInt, tipb.ScalarFuncSig_AddDateStringInt, tipb.ScalarFuncSig_AddDateStringReal: return true } case ast.DateSub, ast.SubDate: switch function.Function.PbCode() { case tipb.ScalarFuncSig_SubDateDatetimeInt, tipb.ScalarFuncSig_SubDateStringInt, tipb.ScalarFuncSig_SubDateStringReal: return true } case ast.UnixTimestamp: switch function.Function.PbCode() { case tipb.ScalarFuncSig_UnixTimestampInt, tipb.ScalarFuncSig_UnixTimestampDec: return true } case ast.Round: switch function.Function.PbCode() { case tipb.ScalarFuncSig_RoundInt, tipb.ScalarFuncSig_RoundReal, tipb.ScalarFuncSig_RoundDec, tipb.ScalarFuncSig_RoundWithFracInt, tipb.ScalarFuncSig_RoundWithFracReal, tipb.ScalarFuncSig_RoundWithFracDec: return true } case ast.Extract: switch function.Function.PbCode() { case tipb.ScalarFuncSig_ExtractDatetime, tipb.ScalarFuncSig_ExtractDuration: return true } case ast.Replace: switch function.Function.PbCode() { case tipb.ScalarFuncSig_Replace: return true } case ast.StrToDate: switch function.Function.PbCode() { case tipb.ScalarFuncSig_StrToDateDate, tipb.ScalarFuncSig_StrToDateDatetime: return true default: return false } case ast.Upper, ast.Ucase, ast.Lower, ast.Lcase, ast.Space: return true case ast.Sysdate: return true case ast.Least, ast.Greatest: switch function.Function.PbCode() { case tipb.ScalarFuncSig_GreatestInt, tipb.ScalarFuncSig_GreatestReal, tipb.ScalarFuncSig_LeastInt, tipb.ScalarFuncSig_LeastReal, tipb.ScalarFuncSig_LeastString, tipb.ScalarFuncSig_GreatestString: return true } case ast.IsTruthWithNull, ast.IsTruthWithoutNull, ast.IsFalsity: return true case ast.Hex, ast.Unhex, ast.Bin: return true case ast.GetFormat: return true case ast.IsIPv4, ast.IsIPv6: return true case ast.Grouping: // grouping function for grouping sets identification. return true } return false } func scalarExprSupportedByTiDB(function *ScalarFunction) bool { // TiDB can support all functions, but TiPB may not include some functions. return scalarExprSupportedByTiKV(function) || scalarExprSupportedByFlash(function) } func canFuncBePushed(sf *ScalarFunction, storeType kv.StoreType) bool { // Use the failpoint to control whether to push down an expression in the integration test. // Push down all expression if the `failpoint expression` is `all`, otherwise, check // whether scalar function's name is contained in the enabled expression list (e.g.`ne,eq,lt`). // If neither of the above is true, switch to original logic. failpoint.Inject("PushDownTestSwitcher", func(val failpoint.Value) { enabled := val.(string) if enabled == "all" { failpoint.Return(true) } exprs := strings.Split(enabled, ",") for _, expr := range exprs { if strings.ToLower(strings.TrimSpace(expr)) == sf.FuncName.L { failpoint.Return(true) } } failpoint.Return(false) }) ret := false switch storeType { case kv.TiFlash: ret = scalarExprSupportedByFlash(sf) case kv.TiKV: ret = scalarExprSupportedByTiKV(sf) case kv.TiDB: ret = scalarExprSupportedByTiDB(sf) case kv.UnSpecified: ret = scalarExprSupportedByTiDB(sf) || scalarExprSupportedByTiKV(sf) || scalarExprSupportedByFlash(sf) } if ret { ret = IsPushDownEnabled(sf.FuncName.L, storeType) } return ret } func storeTypeMask(storeType kv.StoreType) uint32 { if storeType == kv.UnSpecified { return 1<<kv.TiKV | 1<<kv.TiFlash | 1<<kv.TiDB } return 1 << storeType } // IsPushDownEnabled returns true if the input expr is not in the expr_pushdown_blacklist func IsPushDownEnabled(name string, storeType kv.StoreType) bool { value, exists := DefaultExprPushDownBlacklist.Load().(map[string]uint32)[name] if exists { mask := storeTypeMask(storeType) return !(value&mask == mask) } if storeType != kv.TiFlash && name == ast.AggFuncApproxCountDistinct { // Can not push down approx_count_distinct to other store except tiflash by now. return false } return true } // DefaultExprPushDownBlacklist indicates the expressions which can not be pushed down to TiKV. var DefaultExprPushDownBlacklist *atomic.Value // ExprPushDownBlackListReloadTimeStamp is used to record the last time when the push-down black list is reloaded. // This is for plan cache, when the push-down black list is updated, we invalid all cached plans to avoid error. var ExprPushDownBlackListReloadTimeStamp *atomic.Int64 func init() { DefaultExprPushDownBlacklist = new(atomic.Value) DefaultExprPushDownBlacklist.Store(make(map[string]uint32)) ExprPushDownBlackListReloadTimeStamp = new(atomic.Int64) } func canScalarFuncPushDown(scalarFunc *ScalarFunction, pc PbConverter, storeType kv.StoreType) bool { pbCode := scalarFunc.Function.PbCode() // Check whether this function can be pushed. if unspecified := pbCode <= tipb.ScalarFuncSig_Unspecified; unspecified || !canFuncBePushed(scalarFunc, storeType) { if unspecified { failpoint.Inject("PanicIfPbCodeUnspecified", func() { panic(errors.Errorf("unspecified PbCode: %T", scalarFunc.Function)) }) } storageName := storeType.Name() if storeType == kv.UnSpecified { storageName = "storage layer" } warnErr := errors.New("Scalar function '" + scalarFunc.FuncName.L + "'(signature: " + scalarFunc.Function.PbCode().String() + ", return type: " + scalarFunc.RetType.CompactStr() + ") is not supported to push down to " + storageName + " now.") if pc.sc.InExplainStmt { pc.sc.AppendWarning(warnErr) } else { pc.sc.AppendExtraWarning(warnErr) } return false } canEnumPush := canEnumPushdownPreliminarily(scalarFunc) // Check whether all of its parameters can be pushed. for _, arg := range scalarFunc.GetArgs() { if !canExprPushDown(arg, pc, storeType, canEnumPush) { return false } } if metadata := scalarFunc.Function.metadata(); metadata != nil { var err error _, err = proto.Marshal(metadata) if err != nil { logutil.BgLogger().Error("encode metadata", zap.Any("metadata", metadata), zap.Error(err)) return false } } return true } func canExprPushDown(expr Expression, pc PbConverter, storeType kv.StoreType, canEnumPush bool) bool { if storeType == kv.TiFlash { switch expr.GetType().GetType() { case mysql.TypeEnum, mysql.TypeBit, mysql.TypeSet, mysql.TypeGeometry, mysql.TypeUnspecified: if expr.GetType().GetType() == mysql.TypeEnum && canEnumPush { break } warnErr := errors.New("Expression about '" + expr.String() + "' can not be pushed to TiFlash because it contains unsupported calculation of type '" + types.TypeStr(expr.GetType().GetType()) + "'.") if pc.sc.InExplainStmt { pc.sc.AppendWarning(warnErr) } else { pc.sc.AppendExtraWarning(warnErr) } return false case mysql.TypeNewDecimal: if !expr.GetType().IsDecimalValid() { warnErr := errors.New("Expression about '" + expr.String() + "' can not be pushed to TiFlash because it contains invalid decimal('" + strconv.Itoa(expr.GetType().GetFlen()) + "','" + strconv.Itoa(expr.GetType().GetDecimal()) + "').") if pc.sc.InExplainStmt { pc.sc.AppendWarning(warnErr) } else { pc.sc.AppendExtraWarning(warnErr) } return false } } } switch x := expr.(type) { case *CorrelatedColumn: return pc.conOrCorColToPBExpr(expr) != nil && pc.columnToPBExpr(&x.Column) != nil case *Constant: return pc.conOrCorColToPBExpr(expr) != nil case *Column: return pc.columnToPBExpr(x) != nil case *ScalarFunction: return canScalarFuncPushDown(x, pc, storeType) } return false } // PushDownExprsWithExtraInfo split the input exprs into pushed and remained, pushed include all the exprs that can be pushed down func PushDownExprsWithExtraInfo(sc *stmtctx.StatementContext, exprs []Expression, client kv.Client, storeType kv.StoreType, canEnumPush bool) (pushed []Expression, remained []Expression) { pc := PbConverter{sc: sc, client: client} for _, expr := range exprs { if canExprPushDown(expr, pc, storeType, canEnumPush) { pushed = append(pushed, expr) } else { remained = append(remained, expr) } } return } // PushDownExprs split the input exprs into pushed and remained, pushed include all the exprs that can be pushed down func PushDownExprs(sc *stmtctx.StatementContext, exprs []Expression, client kv.Client, storeType kv.StoreType) (pushed []Expression, remained []Expression) { return PushDownExprsWithExtraInfo(sc, exprs, client, storeType, false) } // CanExprsPushDownWithExtraInfo return true if all the expr in exprs can be pushed down func CanExprsPushDownWithExtraInfo(sc *stmtctx.StatementContext, exprs []Expression, client kv.Client, storeType kv.StoreType, canEnumPush bool) bool { _, remained := PushDownExprsWithExtraInfo(sc, exprs, client, storeType, canEnumPush) return len(remained) == 0 } // CanExprsPushDown return true if all the expr in exprs can be pushed down func CanExprsPushDown(sc *stmtctx.StatementContext, exprs []Expression, client kv.Client, storeType kv.StoreType) bool { return CanExprsPushDownWithExtraInfo(sc, exprs, client, storeType, false) } // wrapWithIsTrue wraps `arg` with istrue function if the return type of expr is not // type int, otherwise, returns `arg` directly. // The `keepNull` controls what the istrue function will return when `arg` is null: // 1. keepNull is true and arg is null, the istrue function returns null. // 2. keepNull is false and arg is null, the istrue function returns 0. // The `wrapForInt` indicates whether we need to wrapIsTrue for non-logical Expression with int type. // TODO: remove this function. ScalarFunction should be newed in one place. func wrapWithIsTrue(ctx sessionctx.Context, keepNull bool, arg Expression, wrapForInt bool) (Expression, error) { if arg.GetType().EvalType() == types.ETInt { if !wrapForInt { return arg, nil } if child, ok := arg.(*ScalarFunction); ok { if _, isLogicalOp := logicalOps[child.FuncName.L]; isLogicalOp { return arg, nil } } } var fc *isTrueOrFalseFunctionClass if keepNull { fc = &isTrueOrFalseFunctionClass{baseFunctionClass{ast.IsTruthWithNull, 1, 1}, opcode.IsTruth, keepNull} } else { fc = &isTrueOrFalseFunctionClass{baseFunctionClass{ast.IsTruthWithoutNull, 1, 1}, opcode.IsTruth, keepNull} } f, err := fc.getFunction(ctx, []Expression{arg}) if err != nil { return nil, err } sf := &ScalarFunction{ FuncName: model.NewCIStr(ast.IsTruthWithoutNull), Function: f, RetType: f.getRetTp(), } if keepNull { sf.FuncName = model.NewCIStr(ast.IsTruthWithNull) } return FoldConstant(sf), nil } // PropagateType propagates the type information to the `expr`. // Note: For now, we only propagate type for the function CastDecimalAsDouble. // // e.g. // > create table t(a decimal(9, 8)); // > insert into t values(5.04600000) // > select a/36000 from t; // Type: NEWDECIMAL // Length: 15 // Decimals: 12 // +------------------+ // | 5.04600000/36000 | // +------------------+ // | 0.000140166667 | // +------------------+ // // > select cast(a/36000 as double) as result from t; // Type: DOUBLE // Length: 23 // Decimals: 31 // +----------------------+ // | result | // +----------------------+ // | 0.000140166666666666 | // +----------------------+ // The expected `decimal` and `length` of the outer cast_as_double need to be // propagated to the inner div. func PropagateType(evalType types.EvalType, args ...Expression) { switch evalType { case types.ETReal: expr := args[0] oldFlen, oldDecimal := expr.GetType().GetFlen(), expr.GetType().GetDecimal() newFlen, newDecimal := setDataTypeDouble(expr.GetType().GetDecimal()) // For float(M,D), double(M,D) or decimal(M,D), M must be >= D. if newFlen < newDecimal { newFlen = oldFlen - oldDecimal + newDecimal } if oldFlen != newFlen || oldDecimal != newDecimal { if col, ok := args[0].(*Column); ok { newCol := col.Clone() newCol.(*Column).RetType = col.RetType.Clone() args[0] = newCol } if col, ok := args[0].(*CorrelatedColumn); ok { newCol := col.Clone() newCol.(*CorrelatedColumn).RetType = col.RetType.Clone() args[0] = newCol } if args[0].GetType().GetType() == mysql.TypeNewDecimal { if newDecimal > mysql.MaxDecimalScale { newDecimal = mysql.MaxDecimalScale } } args[0].GetType().SetFlenUnderLimit(newFlen) args[0].GetType().SetDecimalUnderLimit(newDecimal) } } } // Args2Expressions4Test converts these values to an expression list. // This conversion is incomplete, so only use for test. func Args2Expressions4Test(args ...interface{}) []Expression { exprs := make([]Expression, len(args)) for i, v := range args { d := types.NewDatum(v) var ft *types.FieldType switch d.Kind() { case types.KindNull: ft = types.NewFieldType(mysql.TypeNull) case types.KindInt64: ft = types.NewFieldType(mysql.TypeLong) case types.KindUint64: ft = types.NewFieldType(mysql.TypeLong) ft.AddFlag(mysql.UnsignedFlag) case types.KindFloat64: ft = types.NewFieldType(mysql.TypeDouble) case types.KindString: ft = types.NewFieldType(mysql.TypeVarString) case types.KindMysqlTime: ft = types.NewFieldType(mysql.TypeTimestamp) case types.KindBytes: ft = types.NewFieldType(mysql.TypeBlob) default: exprs[i] = nil continue } exprs[i] = &Constant{Value: d, RetType: ft} } return exprs }
package mut import ( "net" "sync" ) type Server struct { closed bool address string cfg *Config mgr *ConnMgr ln *net.TCPListener wgConn *sync.WaitGroup localAddress *net.TCPAddr } func NewServer(address string, cfg *Config) *Server { return &Server{ address: address, cfg: cfg, closed: false, mgr: newConnectionMgr(), wgConn: new(sync.WaitGroup), } } func (srv *Server) Servo() error { err := srv.cfg.validate() if err != nil { return err } //resolve tcpAddress, err := net.ResolveTCPAddr("tcp", srv.address) if err != nil { logger.Err(err, "mut# ResolveTCPAddr failed:") return err } srv.localAddress = tcpAddress //listen ln, err := net.ListenTCP("tcp", srv.localAddress) if err != nil { logger.Err(err, "mut# ListenTCP failed:") return err } srv.ln = ln logger.Info("mut# listen %+v [ok]", srv.ln.Addr()) go srv.listenLoop() return nil } func (srv *Server) listenLoop() { retry := newRetry() for !srv.closed { socket, err := srv.ln.AcceptTCP() //retry if err occur if err != nil { if ne, ok := err.(net.Error); ok && ne.Temporary() { retry.retryAfter(srv) continue } } retry.reset() srv.wgConn.Add(1) logger.Info("mut# connection arrived: %+v => %v", socket.RemoteAddr(), socket.LocalAddr()) conn := NewConnection(socket, srv.cfg) conn.server = srv srv.mgr.add(conn) //only async mode start this loop if srv.cfg.AsyncMode { go conn.ReadLoop() go conn.WriteLoop() } } } func (srv *Server) Close() { srv.closed = true srv.wgConn.Wait() } func (srv *Server) ConnMgr() *ConnMgr { return srv.mgr }
package leetcode func canThreePartsEqualSum(A []int) bool { lA := len(A) s := 0 for _, v := range A { s += v } if s%3 != 0 { return false } left := 0 var i int for i = 0; i < lA-2; i++ { left += A[i] if left == s/3 { break } } if i == lA-2 { return false } i++ mid := 0 for ; i < lA-1; i++ { mid += A[i] if mid == s/3 { break } } return i != lA-1 }
package core import ( "fmt" "time" "github.com/golang/protobuf/ptypes" "github.com/textileio/go-textile/broadcast" "github.com/textileio/go-textile/keypair" "github.com/textileio/go-textile/pb" ) // Account returns account keypair func (t *Textile) Account() *keypair.Full { return t.account } // Sign signs input with account seed func (t *Textile) Sign(input []byte) ([]byte, error) { return t.account.Sign(input) } // Verify verifies input with account address func (t *Textile) Verify(input []byte, sig []byte) error { return t.account.Verify(input, sig) } // Encrypt encrypts input with account address func (t *Textile) Encrypt(input []byte) ([]byte, error) { return t.account.Encrypt(input) } // Decrypt decrypts input with account address func (t *Textile) Decrypt(input []byte) ([]byte, error) { return t.account.Decrypt(input) } // AccountThread returns the account private thread func (t *Textile) AccountThread() *Thread { return t.ThreadByKey(t.config.Account.Address) } // AccountContact returns a contact for this account func (t *Textile) AccountContact() *pb.Contact { return t.contact(t.account.Address(), false) } // SyncAccount performs a thread backup search and applies the result func (t *Textile) SyncAccount(options *pb.QueryOptions) (*broadcast.Broadcaster, error) { query := &pb.ThreadSnapshotQuery{ Address: t.account.Address(), } resCh, errCh, cancel, err := t.SearchThreadSnapshots(query, options) if err != nil { return nil, err } go func() { for { select { case res, ok := <-resCh: if !ok { return } err := t.applySnapshot(res) if err != nil { log.Errorf("error applying snap %s: %s", res.Id, err) } case err := <-errCh: log.Errorf("error during account sync: %s", err) } } }() return cancel, err } // maybeSyncAccount runs SyncAccount if it has not been run in the last kSyncAccountFreq func (t *Textile) maybeSyncAccount() { if t.cancelSync != nil { t.cancelSync.Close() t.cancelSync = nil } daily, err := t.datastore.Config().GetLastDaily() if err != nil { log.Errorf("error get last daily: %s", err) return } if daily.Add(kSyncAccountFreq).Before(time.Now()) { var err error t.cancelSync, err = t.SyncAccount(&pb.QueryOptions{ Wait: 10, }) if err != nil { log.Errorf("error sync account: %s", err) return } err = t.datastore.Config().SetLastDaily() if err != nil { log.Errorf("error set last daily: %s", err) } } } // accountPeers returns all known account peers func (t *Textile) accountPeers() []*pb.Peer { query := fmt.Sprintf("address='%s' and id!='%s'", t.account.Address(), t.node.Identity.Pretty()) return t.datastore.Peers().List(query) } // isAccountPeer returns whether or not the given id is an account peer func (t *Textile) isAccountPeer(id string) bool { query := fmt.Sprintf("address='%s' and id='%s'", t.account.Address(), id) return len(t.datastore.Peers().List(query)) > 0 } // applySnapshot unmarshals and adds an unencrypted thread snapshot from a search result func (t *Textile) applySnapshot(result *pb.QueryResult) error { snap := new(pb.Thread) if err := ptypes.UnmarshalAny(result.Value, snap); err != nil { return err } log.Debugf("applying snapshot %s", snap.Id) return t.AddOrUpdateThread(snap) }
package Shuffle import ( "fmt" "sort" "testing" ) func TestSolution_Shuffle(t *testing.T) { type fields struct { origin []int } tests := []struct { name string fields fields }{ // TODO: Add test cases. { name: "first", fields: fields{ origin: []int{1, 2, 3}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { this := &Solution{ origin: tt.fields.origin, } for i := 0; i < 12; i++ { got := this.Shuffle() fmt.Println(got) if !check(this.origin, got) { t.Errorf("Solution.Shuffle() = %v", got) } } }) } } func check(origin, got []int) bool { // I'm not consider the probability of call Shuffle() much more times. length := len(origin) if length != len(got) { return false } sort.Ints(origin) sort.Ints(got) for i := 0; i < length; i++ { if origin[i] != got[i] { return false } } return true }
package util // Replaced during build var Version = "devel"
package tonberry import ( "image" ) const ( CHKX = iota CHKY ) type player struct { spr Sprite box image.Rectangle xVel, yVel int velInc int boundsChk func(int, image.Rectangle) bool } func NewMoveable(file string, bounds image.Rectangle, bcheck func(int, image.Rectangle) bool) GameObject { return &player{ spr: NewSprite(file), velInc: 500, box: bounds, boundsChk: bcheck, } } func (d *player) ResetMovement() { d.xVel = 0 d.yVel = 0 } func (d *player) Update(deltaTicks uint32) { var multiplier float64 = (float64(deltaTicks) / 1000.0) d.box = d.box.Add(image.Pt(int(float64(d.xVel)*multiplier), 0)) if d.boundsChk(CHKX, d.box) { d.box = d.box.Sub(image.Pt(int(float64(d.xVel)*multiplier), 0)) } d.box = d.box.Add(image.Pt(0, int(float64(d.yVel)*multiplier))) if d.boundsChk(CHKY, d.box) { d.box = d.box.Sub(image.Pt(0, int(float64(d.yVel)*multiplier))) } } func (d *player) HandleInput(ev KeyboardEvent) { if ev.Type == KEYDOWN { switch ev.Keysym.Sym { case K_UP: d.yVel -= d.velInc case K_DOWN: d.yVel += d.velInc case K_LEFT: d.xVel -= d.velInc case K_RIGHT: d.xVel += d.velInc } } else if ev.Type == KEYUP && (d.xVel != 0 || d.yVel != 0) { switch ev.Keysym.Sym { case K_UP: d.yVel += d.velInc case K_DOWN: d.yVel -= d.velInc case K_LEFT: d.xVel += d.velInc case K_RIGHT: d.xVel -= d.velInc } } } func (d *player) Draw(sc Screen) { d.spr.Show(sc, d.box.Min.X, d.box.Min.Y) } func (d *player) DrawCam(sc Screen, c Camera) { d.spr.Show(sc, d.box.Min.X-int(c.X), d.box.Min.Y-int(c.Y)) } func (d *player) SetCamera(c *Camera, level string) { c.X = int16((d.box.Min.X + d.box.Dx()/2) - int(c.W/2)) c.Y = int16((d.box.Min.Y + d.box.Dy()/2) - int(c.H/2)) if c.X < 0 { c.X = 0 } if c.Y < 0 { c.Y = 0 } if c.X > int16(maps[level].LWidth-c.W) { c.X = int16(maps[level].LWidth - c.W) } if c.Y > int16(maps[level].LHeight-c.H) { c.Y = int16(maps[level].LHeight - c.H) } }
package main import ( "github.com/streadway/amqp" ) func setupRabbit(rabbitURI string, rabbitVHost string, rabbitQueue string) (*amqp.Connection, *amqp.Channel, <-chan amqp.Delivery) { log.Notice("Connecting to RabbitMQ...") conn, err := amqp.DialConfig(rabbitURI, amqp.Config{Vhost: rabbitVHost}) if err != nil { log.Fatalf("Failed to connect: %s", err) } ch, err := conn.Channel() if err != nil { log.Fatalf("Failed to open channel: %s", err) } q, err := ch.QueueDeclare( rabbitQueue, // name false, // durable false, // delete when unused true, // exclusive false, // no-wait amqp.Table{"x-expires": int32(180000)}, // arguments ) if err != nil { log.Fatalf("Failed to create queue: %s", err) } err = ch.QueueBind( q.Name, // queue name "", // routing key VncExchange, // exchange false, nil, ) if err != nil { log.Fatalf("Failed to bind queue: %s", err) } msgs, err := ch.Consume( q.Name, // queue "", // consumer false, // auto-ack false, // exclusive false, // no-local false, // no-wait nil, // args ) if err != nil { log.Fatalf("Failed to register consumer: %s", err) } log.Notice("Connected.") return conn, ch, msgs } func teardownRabbit(conn *amqp.Connection, ch *amqp.Channel, rabbitQueue string) error { err := ch.QueueUnbind(rabbitQueue, "", VncExchange, amqp.Table{}) if err != nil { return err } _, err = ch.QueueDelete(rabbitQueue, false, false, true) if err != nil { return err } return conn.Close() }
package _404_Sum_of_Left_Leaves type TreeNode struct { Val int Left *TreeNode Right *TreeNode } func sumOfLeftLeaves(root *TreeNode) int { // return sumOfLeftLeavesBFS(root) // return sumOfLeftLeavesDFSRecursion(root) return sumOfLeftLeavesDFSUnrecursion(root) } func sumOfLeftLeavesDFSUnrecursion(node *TreeNode) int { var ( s = []*TreeNode{node} ret int ) if node == nil { return 0 } for len(s) > 0 { x := s[len(s)-1] s = s[0 : len(s)-1] if x.Right != nil { s = append(s, x.Right) } if x.Left != nil { s = append(s, x.Left) if x.Left.Left == nil && x.Left.Right == nil { ret += x.Left.Val } } } return ret } func sumOfLeftLeavesDFSRecursion(node *TreeNode) int { var ret int if node == nil { return 0 } if node.Left != nil { ret += sumOfLeftLeavesDFSRecursion(node.Left) if node.Left.Left == nil && node.Left.Right == nil { ret += node.Left.Val } } if node.Right != nil { ret += sumOfLeftLeavesDFSRecursion(node.Right) } return ret } func sumOfLeftLeavesBFS(root *TreeNode) int { var ( q = []*TreeNode{root} ret int ) if root == nil { return 0 } for len(q) > 0 { x := q[0] q = q[1:] if x.Left != nil { q = append(q, x.Left) if x.Left.Left == nil && x.Left.Right == nil { ret += x.Left.Val } } if x.Right != nil { q = append(q, x.Right) } } return ret }
package main import ( "services" "log" "crypto/sha256" "crypto/x509" ) func main() { /*config, err := services.NewConfigObject() if err != nil { log.Println("Error creating config object: " + err.Error()) os.Exit(1) } // Open configuration file //priv, pub, err := services.ParseKeys("keypair3.pem") if err != nil { log.Println("Failed parsing sencond keys.") } // Encryption Part message := []byte("Hello World!") log.Println("Original Message: ", string(message)) encrytedMessage, err := services.EncryptData(config.PublicKey, message) if err != nil { log.Println("Error while encrypting.") } log.Println(len(encrytedMessage)) /*encrytedMessage2, err := services.EncryptData(pub, encrytedMessage[0:230]) if err != nil { log.Println("Error while encrypting.") } log.Println("Encrypted Message: ", encrytedMessage2) decryptedMessage2, err := services.DecryptData(priv, encrytedMessage2) log.Println("Decrypted Message: ", string(decryptedMessage2))*/ /*decryptedMessage, err := services.DecryptData(config.PrivateKey, encrytedMessage) log.Println("Decrypted Message: ", string(decryptedMessage))*/ //log.Println("Key: ", priv) priv, pub, g := services.GeneratePreMasterKey() priv2, pub2, g2 := services.GeneratePreMasterKey() k := services.ComputeEphemeralKey(g, pub2, priv) k2 := services.ComputeEphemeralKey(g2, pub, priv2) log.Println("Key1: ", k) log.Println("Key2: ", k2) data := []byte("Hello World! This is an awesome Day and I have no clue what else to write...") encryptedData,_ := services.EncryptData(k, data) log.Println("Encrypted Data: ", encryptedData) encryptedData2,_ := services.EncryptData(k, encryptedData) log.Println("Encrypted Data 2: ", encryptedData2) decryptedData,_ := services.DecryptData(k2, encryptedData2) log.Println("Decrypted Data: ", string(decryptedData)) decryptedData2,_ := services.DecryptData(k2, encryptedData) log.Println("Decrypted Data 2: ", string(decryptedData2)) _, pubRSA, _ := services.ParseKeys("keypair3.pem") log.Printf("Identity: %d\n", len(sha256.Sum256(x509.MarshalPKCS1PublicKey(pubRSA)))) }
package sand import ( "html/template" "io/ioutil" "log" "net/http" "os" "github.com/gorilla/mux" ) /* For Development Only , for InitDevRouter(TODO) */ func (s *Sand) addTmpl(router *mux.Router) { router.HandleFunc("/v1/{page}.html", func(w http.ResponseWriter, r *http.Request) { w.Header().Add("Vary", "Accept-Encoding") w.Header().Add("Content-Type", "text/html") ps := mux.Vars(r) //bytes, _ := Asset("tmpl/" + ps["page"] + ".tmpl") reader, err := os.Open("tmpl/" + ps["page"] + ".tmpl") if err != nil { log.Println("error reading") return } bytes, _ := ioutil.ReadAll(reader) tmpl := template.New("html") tmpl, err = tmpl.Parse(string(bytes)) if err != nil { log.Println("error parse template") return } //dir, _ := AssetDir("templates") dir, err := ioutil.ReadDir("templates") if err != nil { log.Println("error read templates") return } for _, d := range dir { //bytes, err1 := Asset("templates/" + d) reader, err = os.Open("templates/" + d.Name()) if err != nil { log.Println("error reading") return } bytes, err1 := ioutil.ReadAll(reader) if err1 != nil { log.Panicf("Unable to parse: template=%s, err=%s", d.Name(), err) return } tmpl.New(d.Name()).Parse(string(bytes)) } err = tmpl.Execute(w, s) //constant if err != nil { log.Println("error executing template") } else { log.Println("parsing", s) } }) }
package helpers import ( "encoding/json" "example.com/banking/models" ) func ConvertJsonToWallet(jsonStr string) *models.Wallet { model := models.Wallet{} json.Unmarshal([]byte(jsonStr), &model) return &model }
package main import ( "fmt" "log" "os" "path/filepath" "github.com/joho/godotenv" "github.com/sendgrid/sendgrid-go" "github.com/sendgrid/sendgrid-go/helpers/mail" "github.com/xuri/excelize/v2" ) func main() { err := godotenv.Load() if err != nil { log.Fatal("Error loading .env file") } path, err := os.Getwd() if err != nil { log.Println(err) } f, err := excelize.OpenFile(filepath.Join(path, "emails.xlsx")) if err != nil { fmt.Println(err) return } // Get all the rows in the Sheet1. rows, err := f.GetRows("Sheet1") if err != nil { fmt.Println(err) return } client := sendgrid.NewSendClient(os.Getenv("SENDGRID_API_KEY")) for _, row := range rows { from := mail.NewEmail(FROM_NAME, FROM_EMAIL) subject := EMAIL_SUBJECT to := mail.NewEmail("Me", row[0]) plainTextContent := EMAIL_TEXT htmlContent := "" message := mail.NewSingleEmail(from, subject, to, plainTextContent, htmlContent) response, err := client.Send(message) if err != nil { log.Fatal(err) } if response.StatusCode == 202 { log.Println("Successfully delivered to: ", row[0]) } else { log.Println("Email delivery failed: ", response.Body) } } }
package whookie import "encoding/json" type Event struct { Id string `json:"id"` Message string `json:"message"` Type string `json:"type"` Timestamp int64 `json:"timestamp"` Data json.RawMessage `json:"data"` }
package main import ( "fmt" "strconv" "sync" "time" ) var count int var wg sync.WaitGroup var so sync.Once var m = sync.Map{} func main() { wg.Add(21) for i :=0;i<21;i++ { go func(n int) { key := strconv.Itoa(n) m.Store(key,n) fmt.Println(m.Load(key)) wg.Done() }(i) } wg.Wait() } func add() { for i :=0;i<100;i++ { count +=1 fmt.Println(count) time.Sleep(time.Millisecond *50) } wg.Done() }
package main import ( "fmt" "math" "os" ) func isPowerOfTwo(n uint64) bool { if n == 0 || n == 1 { return false } return (n & (n - 1)) == 0 } // Convert 1024 to '1 KiB' etc func bytesToHuman(src uint64) string { if src < 10 { return fmt.Sprintf("%d B", src) } s := float64(src) base := float64(1024) sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} e := math.Floor(math.Log(s) / math.Log(base)) suffix := sizes[int(e)] val := math.Floor(s/math.Pow(base, e)*10+0.5) / 10 f := "%.0f %s" if val < 10 { f = "%.1f %s" } return fmt.Sprintf(f, val, suffix) } // is directory and exists func isDirectory(dir string) (b bool, err error) { fi, err := os.Stat(dir) if err != nil { if os.IsNotExist(err) { return false, err } } if !fi.IsDir() { return false, fmt.Errorf(`not a directory: %v`, dir) } return true, nil }
package main import "math" func Powerset1(nums []int) [][]int { if len(nums) == 0 { return [][]int{ []int{} } } length := int(math.Pow(2, float64(len(nums)))) result := make([][]int, length) for i := 0; i < length; i++ { bi := i s := []int{} for _, n := range nums { if bi % 2 != 0 { s = append(s, n) } bi = bi / 2 } result[i] = s } return result } func Powerset2(nums []int) [][]int { length := int(math.Pow(2, float64(len(nums)))) result := make([][]int, length) index := 0 result[index] = []int{} index++ for _, n := range nums { max := index for i := 0; i < max; i++ { result[index] = copyAndAppend(result[i], n) index++ } } return result } func copyAndAppend(nums []int, n int) []int { dst := make([]int, len(nums)+1) copy(dst, nums) dst[len(nums)] = n return dst } func Powerset3(nums []int, f func([]int)) { if len(nums) == 0 { f([]int{}) } length := int(math.Pow(2, float64(len(nums)))) for i := 0; i < length; i++ { bi := i s := []int{} for _, n := range nums { if bi % 2 != 0 { s = append(s, n) } bi = bi / 2 } f(s) } }
/* * Go Library (C) 2017 Inc. * * @project Project Globo / avaliacao.com * @author @jeffotoni * @size 01/03/2018 */ package handler import ( "github.com/jeffotoni/gmongocrud/conf" "github.com/jeffotoni/gmongocrud/lib/context" "github.com/jeffotoni/gmongocrud/lib/upload" "github.com/jeffotoni/gmongocrud/repo" "log" "net/http" "strings" ) var msgerror string // inserindo perguntas na base de dados func CurriculumCreate(ctx *context.Context) { // bytes json body var byteJson []byte // define // var error var err error // msg json var msgJson, Uuid string // tipo aceito de content-type cTypeAceito := "application/json" // capturando content-type cType := ctx.Req.Header.Get("Content-Type") // validando content-type if strings.ToLower(strings.TrimSpace(cType)) == cTypeAceito { // capturando json findo da requisicao // estamos pegando em bytes para ser // usado no Unmarshal que recebe bytes byteJson, err = ctx.Req.Body().Bytes() // fechando Req.Body defer ctx.Req.Body().ReadCloser() // caso ocorra // erro ao ler // envia msg // de error if err != nil { msgerror = "[CurriculumCreate] Erro ao capturar Json: " + err.Error() log.Println(msgerror) msgJson = `{"status":"error","msg":"` + msgerror + `}` ctx.JSON(http.StatusUnauthorized, msgJson) return } else { // se nao tiver nem um valor // no json, emite um erro // para o usuario if string(byteJson) != "" { // Criar registro no banco de dados Uuid, err = repo.AddCurriculum(byteJson) // tratando o erro if err != nil { log.Println(err.Error()) msgJson = `{"status":"error","msg":"` + err.Error() + `"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } else { // sucesso msgJson = `{"status":"ok","msg":"seus dados foram cadastrados com sucesso!", "uuid":"` + Uuid + `"}` ctx.JSON(http.StatusOK, msgJson) } } else { log.Println("[CurriculumCreate] Erro em sua string json nao pode ser vazia!") msgJson = `{"status":"error","msg":"Erro em sua string json"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } } // fim else } else { vetCot := strings.Split(cType, ";") Content := strings.ToLower(strings.TrimSpace(vetCot[0])) if Content == "multipart/form-data" { //log.Println("teste nome:: ", ctx.Req.Form.Get("nome")) //log.Println("teste cpf:: ", ctx.Req.Form.Get("cpf")) _, fh, err := ctx.GetFile(conf.NAME_FORM_FILE) fi := &upload.FileInfo{ Size: fh.Size, Name: fh.Filename, Type: fh.Header.Get("Content-Type"), } if !fi.ValidateSize() { msgTmp := "size not allowed!" msgJson := `{"status":"error","msg":"` + msgTmp + `"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } jsonl := `{` + `"nome":"` + ctx.Req.Form.Get("nome") + `",` + `"cpf":"` + ctx.Req.Form.Get("cpf") + `",` + `"rg":"` + ctx.Req.Form.Get("rg") + `",` + `"idade":"` + ctx.Req.Form.Get("idade") + `",` + `"bio":"` + ctx.Req.Form.Get("bio") + `",` + `"skill":"` + ctx.Req.Form.Get("skill") + `"` + `}` // log.Println(jsonl) byteJson := []byte(jsonl) // adicionado curriculo Uuid, err = repo.AddCurriculum(byteJson) // tratando o erro if err != nil { log.Println(err.Error()) msgJson = `{"status":"error","msg":"` + err.Error() + `"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } else { // path upload pathUpload := "/" + conf.PATH_UPLOAD + "/" + Uuid pathNewOrg := repo.GetWdLocal(0) pathAbs := pathNewOrg + pathUpload log.Println("upload:", pathAbs) // salvando arquivo em disco ctx.SaveToFile(conf.NAME_FORM_FILE, pathAbs) // sucesso msgJson = `{"status":"ok","msg":"seus dados foram cadastrados com sucesso!", "uuid":"` + Uuid + `"}` ctx.JSON(http.StatusOK, msgJson) } } else { log.Println("[CurriculumCreate] Erro Content-Type: aceitamos somente " + cTypeAceito) msgJson = `{"status":"error","msg":"error no Content-Type: ` + cType + `, aceitamos somente [Content-Type: ` + cTypeAceito + `]"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } } } // Busca Curriculum especifico na base de dados func CurriculumFind(ctx *context.Context) { // msg json var msgJson string // chave para atualizacao Uuid := ctx.Params(":id") if Uuid != "" { // para atualizacao temos o nome do collection a chave para efetuar o update e // os campose que sera feita o set update strJson, err := repo.GetCurriculum(Uuid) // testando se tudo // correu bem if err == nil { // Uuid msgJson = `{"status":"ok","msg":"Encontrou o id na base de dados!", "data":"` + strJson + `"}` // send write to client ctx.JSON(http.StatusOK, msgJson) } else { msgerror = "[CurriculumFind] " + err.Error() log.Println(msgerror) msgJson = `{"status":"error","msg":"` + msgerror + `]"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } } else { msgerror = "[CurriculumFind] Uuid é obrigatorio!" log.Println(msgerror) msgJson = `{"status":"error","msg":"` + msgerror + `]"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } } // Update na base de dados func CurriculumUpdate(ctx *context.Context) { // define err var err error // mensagem json var msgJson string // byjson var byteJson []byte // Chave unica Uuid := ctx.Params(":id") // testando // o Uuid if Uuid != "" { // capturando json findo da requisicao // estamos pegando em bytes para ser // usado no Unmarshal que recebe bytes byteJson, err = ctx.Req.Body().Bytes() if err != nil { msgJson = `{"status":"error","msg":"` + err.Error() + `"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } // fechando Req.Body defer ctx.Req.Body().ReadCloser() // del question err := repo.UpCurriculum(Uuid, byteJson) // if tudo correr bem // registro foi atualizado // com sucesso if err == nil { // Uuid msgJson = `{"status":"ok","msg":"atualizado com sucesso seu Uuid: ` + Uuid + `!"}` ctx.JSON(http.StatusOK, msgJson) } else { log.Println(err.Error()) msgJson = `{"status":"error","msg":"` + err.Error() + `]"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } } else { msgerror = "[QuestionsUpdate] Uuid é obrigatório!" log.Println(msgerror) msgJson = `{"status":"error","msg":"` + msgerror + `]"}` ctx.JSON(http.StatusUnauthorized, msgJson) return } }
package paperswithcode_go import ( "github.com/stretchr/testify/assert" "testing" ) func TestClient_PaperRepositoryList(t *testing.T) { c := NewClient() list, err := c.PaperRepositoryList("generative-adversarial-networks") assert.NoError(t, err) assert.NotEmpty(t, list.Results[0].URL) }
package server import ( "runtime" "github.com/sirupsen/logrus" "github.com/spf13/viper" ) // Init the server config for Gin func Init(config *viper.Viper, logger *logrus.Logger) { // Use all cpu cores runtime.GOMAXPROCS(runtime.NumCPU()) // Create router and listen on the configed port r := NewRouter(config, logger) r.Run(":" + config.GetString("server.port")) }