text
stringlengths
11
4.05M
// All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v20180808 import ( "encoding/json" tchttp "github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/http" ) type DescribeLogSearchRequest struct { *tchttp.BaseRequest // 日志开始时间 StartTime *string `json:"StartTime,omitempty" name:"StartTime"` // 日志结束时间 EndTime *string `json:"EndTime,omitempty" name:"EndTime"` // 服务id ServiceId *string `json:"ServiceId,omitempty" name:"ServiceId"` // 精确查询,支持apiid/reqid搜索 Filters []*Filter `json:"Filters,omitempty" name:"Filters" list` // 单次要返回的日志条数,单次返回的最大条数为100 Limit *uint64 `json:"Limit,omitempty" name:"Limit"` // 根据上次返回的ConText,获取后续的内容,最多可获取10000条 ConText *string `json:"ConText,omitempty" name:"ConText"` // 按时间排序 asc(升序)或者 desc(降序),默认为 desc Sort *string `json:"Sort,omitempty" name:"Sort"` // 模糊查询,根据关键字检索日志 Query *string `json:"Query,omitempty" name:"Query"` } func (r *DescribeLogSearchRequest) ToJsonString() string { b, _ := json.Marshal(r) return string(b) } func (r *DescribeLogSearchRequest) FromJsonString(s string) error { return json.Unmarshal([]byte(s), &r) } type DescribeLogSearchResponse struct { *tchttp.BaseResponse Response *struct { // 获取更多检索结果的游标,值为""表示无后续结果 ConText *string `json:"ConText,omitempty" name:"ConText"` // 由0或多条日志组成,每条日志格式如下: // '[$app_id][$env_name][$service_id][$http_host][$api_id][$uri][$scheme][rsp_st:$status][ups_st:$upstream_status]' // '[cip:$remote_addr][uip:$upstream_addr][vip:$server_addr][rsp_len:$bytes_sent][req_len:$request_length]' // '[req_t:$request_time][ups_rsp_t:$upstream_response_time][ups_conn_t:$upstream_connect_time][ups_head_t:$upstream_header_time]’ // '[err_msg:$err_msg][tcp_rtt:$tcpinfo_rtt][$pid][$time_local][req_id:$request_id]'; // // 说明: // app_id: 用户 ID。 // env_name:环境名称。 // service_id: 服务 ID。 // http_host: 域名。 // api_id: API 的 ID。 // uri:请求的路径。 // scheme: HTTP/HTTPS 协议。 // rsp_st: 请求响应状态码。 // ups_st: 后端业务服务器的响应状态码(如果请求透传到后端,改变量不为空。如果请求在 APIGW 就被拦截了,那么该变量显示为 -)。 // cip: 客户端 IP。 // uip: 后端业务服务(upstream)的 IP。 // vip: 请求访问的 VIP。 // rsp_len: 响应长度。 // req_len: 请求长度。 // req_t: 请求响应的总时间。 // ups_rsp_t: 后端响应的总时间(apigw 建立连接到接收到后端响应的时间)。 // ups_conn_t: 与后端业务服务器连接建立成功时间。 // ups_head_t:后端响应的头部到达时间。 // err_msg: 错误信息。 // tcp_rtt: 客户端 TCP 连接信息,RTT(Round Trip Time)由三部分组成:链路的传播时间(propagation delay)、末端系统的处理时间、路由器缓存中的排队和处理时间(queuing delay)。 // req_id:请求id。 LogSet []*string `json:"LogSet,omitempty" name:"LogSet" list` // 单次搜索返回的日志条数,TotalCount <= Limit TotalCount *uint64 `json:"TotalCount,omitempty" name:"TotalCount"` // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 RequestId *string `json:"RequestId,omitempty" name:"RequestId"` } `json:"Response"` } func (r *DescribeLogSearchResponse) ToJsonString() string { b, _ := json.Marshal(r) return string(b) } func (r *DescribeLogSearchResponse) FromJsonString(s string) error { return json.Unmarshal([]byte(s), &r) } type Filter struct { // 需要过滤的字段。 Name *string `json:"Name,omitempty" name:"Name"` // 字段的过滤值。 Values []*string `json:"Values,omitempty" name:"Values" list` } type ModifyApiIncrementRequest struct { *tchttp.BaseRequest // 服务ID ServiceId *string `json:"ServiceId,omitempty" name:"ServiceId"` // 接口ID ApiId *string `json:"ApiId,omitempty" name:"ApiId"` // 需要修改的API auth类型(可选择OAUTH-授权API) BusinessType *string `json:"BusinessType,omitempty" name:"BusinessType"` // oauth接口需要修改的公钥值 PublicKey *string `json:"PublicKey,omitempty" name:"PublicKey"` // oauth接口重定向地址 LoginRedirectUrl *string `json:"LoginRedirectUrl,omitempty" name:"LoginRedirectUrl"` } func (r *ModifyApiIncrementRequest) ToJsonString() string { b, _ := json.Marshal(r) return string(b) } func (r *ModifyApiIncrementRequest) FromJsonString(s string) error { return json.Unmarshal([]byte(s), &r) } type ModifyApiIncrementResponse struct { *tchttp.BaseResponse Response *struct { // 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 RequestId *string `json:"RequestId,omitempty" name:"RequestId"` } `json:"Response"` } func (r *ModifyApiIncrementResponse) ToJsonString() string { b, _ := json.Marshal(r) return string(b) } func (r *ModifyApiIncrementResponse) FromJsonString(s string) error { return json.Unmarshal([]byte(s), &r) }
package gps import "encoding/json" var ( DefaultValueCodec Codec = &JSONCodec{} DefaultStreamCodec Codec = &JSONCodec{} ) type Codec interface { Encode(value interface{}) ([]byte, error) Decode(v []byte, value interface{}) error } type JSONCodec struct{} func (c *JSONCodec) Encode(value interface{}) ([]byte, error) { return json.Marshal(value) } func (c *JSONCodec) Decode(v []byte, value interface{}) error { return json.Unmarshal(v, value) }
package main /** 回文数 判断一个整数是否是回文数。回文数是指正序(从左向右)和倒序(从右向左)读都是一样的整数。 示例1: ``` 输入: 121 输出: true ``` 示例2: ``` 输入: -121 输出: false 解释: 从左向右读, 为 -121 。 从右向左读, 为 121- 。因此它不是一个回文数。 ``` 示例2: ``` 输入: 10 输出: false 解释: 从右向左读, 为 01 。因此它不是一个回文数。 ``` 进阶: 你能不将整数转为字符串来解决这个问题吗? ``` */ /** 还是暴力吧,简单,明了 */ func DailyTemperatures(T []int) []int { le := len(T) var ans []int for i := 0; i < le; i++ { count := 0 for j := i + 1; j < le; j++ { if T[j] > T[i] { count = j - i break } } ans = append(ans, count) } return ans }
package main import ( "bytes" "fmt" "net/url" "os" "strconv" "strings" "github.com/BurntSushi/toml" "github.com/docopt/docopt-go" "github.com/mitchellh/go-homedir" "github.com/root-gg/plik/plik" "github.com/root-gg/plik/server/common" ) // CliConfig object type CliConfig struct { Debug bool Quiet bool URL string OneShot bool Removable bool Stream bool Secure bool SecureMethod string SecureOptions map[string]interface{} Archive bool ArchiveMethod string ArchiveOptions map[string]interface{} DownloadBinary string Comments string Login string Password string TTL int ExtendTTL bool AutoUpdate bool Token string DisableStdin bool Insecure bool filePaths []string filenameOverride string } // NewUploadConfig construct a new configuration with default values func NewUploadConfig() (config *CliConfig) { config = new(CliConfig) config.URL = "http://127.0.0.1:8080" config.ArchiveMethod = "tar" config.ArchiveOptions = make(map[string]interface{}) config.ArchiveOptions["Tar"] = "/bin/tar" config.ArchiveOptions["Compress"] = "gzip" config.ArchiveOptions["Options"] = "" config.SecureMethod = "openssl" config.SecureOptions = make(map[string]interface{}) config.SecureOptions["Openssl"] = "/usr/bin/openssl" config.SecureOptions["Cipher"] = "aes-256-cbc" config.SecureOptions["Options"] = "-md sha512 -pbkdf2 -iter 120000" config.DownloadBinary = "curl" return } // LoadConfigFromFile load TOML config file func LoadConfigFromFile(path string) (*CliConfig, error) { config := NewUploadConfig() if _, err := toml.DecodeFile(path, config); err != nil { return nil, fmt.Errorf("Failed to deserialize ~/.plickrc : %s", err) } // Sanitize URL config.URL = strings.TrimSuffix(config.URL, "/") return config, nil } // LoadConfig creates a new default configuration and override it with .plikrc file. // If .plikrc does not exist, ask domain, and create a new one in user HOMEDIR func LoadConfig(opts docopt.Opts) (config *CliConfig, err error) { // Load config file from environment variable path := os.Getenv("PLIKRC") if path != "" { _, err := os.Stat(path) if err != nil { return nil, fmt.Errorf("Plikrc file %s not found", path) } return LoadConfigFromFile(path) } // Detect home dir home, err := homedir.Dir() if err != nil { home = os.Getenv("HOME") if home == "" { home = "." } } // Load config file from ~/.plikrc path = home + "/.plikrc" _, err = os.Stat(path) if err == nil { config, err = LoadConfigFromFile(path) if err == nil { return config, nil } } else { // Load global config file from /etc directory path = "/etc/plik/plikrc" _, err = os.Stat(path) if err == nil { config, err = LoadConfigFromFile(path) if err == nil { return config, nil } } } config = NewUploadConfig() // Bypass ~/.plikrc file creation if quiet mode and/or --server flag if opts["--quiet"].(bool) || (opts["--server"] != nil && opts["--server"].(string) != "") { return config, nil } // Config file not found. Create one. path = home + "/.plikrc" // Ask for domain var domain string fmt.Println("Please enter your plik domain [default:http://127.0.0.1:8080] : ") _, err = fmt.Scanf("%s", &domain) if err == nil { domain = strings.TrimRight(domain, "/") parsedDomain, err := url.Parse(domain) if err == nil { if parsedDomain.Scheme == "" { parsedDomain.Scheme = "http" } config.URL = parsedDomain.String() } } // Try to HEAD the site to see if we have a redirection client := plik.NewClient(config.URL) client.Insecure() resp, err := client.HTTPClient.Head(config.URL) if err != nil { return nil, err } finalURL := resp.Request.URL.String() if finalURL != "" && finalURL != config.URL { fmt.Printf("We have been redirected to : %s\n", finalURL) fmt.Printf("Replace current url (%s) with the new one ? [Y/n] ", config.URL) ok, err := common.AskConfirmation(true) if err != nil { return nil, fmt.Errorf("Unable to ask for confirmation : %s", err) } if ok { config.URL = strings.TrimSuffix(finalURL, "/") } } // Try to get server config to sync default values serverConfig, err := client.GetServerConfig() if err != nil { fmt.Printf("Unable to get server configuration : %s", err) } else { config.OneShot = common.IsFeatureDefault(serverConfig.FeatureOneShot) config.Removable = common.IsFeatureDefault(serverConfig.FeatureRemovable) config.Stream = common.IsFeatureDefault(serverConfig.FeatureStream) config.ExtendTTL = common.IsFeatureDefault(serverConfig.FeatureExtendTTL) if serverConfig.FeatureAuthentication == common.FeatureForced { fmt.Printf("Anonymous uploads are disabled on this server") fmt.Printf("Do you want to provide a user authentication token ? [Y/n] ") ok, err := common.AskConfirmation(true) if err != nil { return nil, fmt.Errorf("Unable to ask for confirmation : %s", err) } if ok { var token string fmt.Println("Please enter a valid user token : ") _, err = fmt.Scanf("%s", &token) if err == nil { config.Token = token } } } } // Enable client updates ? fmt.Println("Do you want to enable client auto update ? [Y/n] ") ok, err := common.AskConfirmation(true) if err != nil { return nil, fmt.Errorf("Unable to ask for confirmation : %s", err) } if ok { config.AutoUpdate = true } // Encode in TOML buf := new(bytes.Buffer) if err = toml.NewEncoder(buf).Encode(config); err != nil { return nil, fmt.Errorf("Failed to serialize ~/.plickrc : %s", err) } // Write file f, err := os.OpenFile(path, os.O_CREATE|os.O_RDWR, 0700) if err != nil { return nil, fmt.Errorf("Failed to save ~/.plickrc : %s", err) } _, _ = f.Write(buf.Bytes()) _ = f.Close() fmt.Println("Plik client settings successfully saved to " + path) return config, nil } // UnmarshalArgs turns command line arguments into upload settings // Command line arguments override config file settings func (config *CliConfig) UnmarshalArgs(opts docopt.Opts) (err error) { if opts["--debug"].(bool) { config.Debug = true } if opts["--quiet"].(bool) { config.Quiet = true } // Plik server url if opts["--server"] != nil && opts["--server"].(string) != "" { config.URL = opts["--server"].(string) } // Paths if _, ok := opts["FILE"].([]string); ok { config.filePaths = opts["FILE"].([]string) } else { return fmt.Errorf("No files specified") } for _, path := range config.filePaths { // Test if file exists fileInfo, err := os.Stat(path) if err != nil { return fmt.Errorf("File %s not found", path) } // Automatically enable archive mode is at least one file is a directory if fileInfo.IsDir() { config.Archive = true } } // Override file name if specified if opts["--name"] != nil && opts["--name"].(string) != "" { config.filenameOverride = opts["--name"].(string) } // Upload options if opts["--oneshot"].(bool) { config.OneShot = true } if opts["--removable"].(bool) { config.Removable = true } if opts["--stream"].(bool) { config.Stream = true } if opts["--comments"] != nil && opts["--comments"].(string) != "" { config.Comments = opts["--comments"].(string) } // Configure upload expire date if opts["--ttl"] != nil && opts["--ttl"].(string) != "" { ttlStr := opts["--ttl"].(string) mul := 1 if string(ttlStr[len(ttlStr)-1]) == "m" { mul = 60 } else if string(ttlStr[len(ttlStr)-1]) == "h" { mul = 3600 } else if string(ttlStr[len(ttlStr)-1]) == "d" { mul = 86400 } if mul != 1 { ttlStr = ttlStr[:len(ttlStr)-1] } ttl, err := strconv.Atoi(ttlStr) if err != nil { return fmt.Errorf("Invalid TTL %s", opts["--ttl"].(string)) } config.TTL = ttl * mul } if opts["--extend-ttl"].(bool) { config.ExtendTTL = true } // Enable archive mode ? if opts["-a"].(bool) || opts["--archive"] != nil || config.Archive { config.Archive = true if opts["--archive"] != nil && opts["--archive"] != "" { config.ArchiveMethod = opts["--archive"].(string) } } // Enable secure mode ? if opts["--not-secure"].(bool) { config.Secure = false } else if opts["-s"].(bool) || opts["--secure"] != nil || config.Secure { config.Secure = true if opts["--secure"] != nil && opts["--secure"].(string) != "" { config.SecureMethod = opts["--secure"].(string) } } // Enable password protection ? if opts["-p"].(bool) { fmt.Printf("Login [plik]: ") var err error _, err = fmt.Scanln(&config.Login) if err != nil && err.Error() != "unexpected newline" { return fmt.Errorf("Unable to get login : %s", err) } if config.Login == "" { config.Login = "plik" } fmt.Printf("Password: ") _, err = fmt.Scanln(&config.Password) if err != nil { return fmt.Errorf("Unable to get password : %s", err) } } else if opts["--password"] != nil && opts["--password"].(string) != "" { credentials := opts["--password"].(string) sepIndex := strings.Index(credentials, ":") var login, password string if sepIndex > 0 { login = credentials[:sepIndex] password = credentials[sepIndex+1:] } else { login = "plik" password = credentials } config.Login = login config.Password = password } // Override upload token ? if opts["--token"] != nil && opts["--token"].(string) != "" { config.Token = opts["--token"].(string) } // Ask for token if config.Token == "-" { fmt.Printf("Token: ") var err error _, err = fmt.Scanln(&config.Token) if err != nil { return fmt.Errorf("Unable to get token : %s", err) } } if opts["--stdin"].(bool) { config.DisableStdin = false } return }
package utility import ( "bytes" "fmt" "github.com/vallard/spark" "io/ioutil" "net/http" ) //SendJSON allows us to send JSON to a remote device func SendJSON(jsonStr []byte, url string) { req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) req.Header.Set("X-Custom-Header", "myvalue") req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { panic(err) } defer resp.Body.Close() fmt.Println("response Status:", resp.Status) fmt.Println("response Headers:", resp.Header) body, _ := ioutil.ReadAll(resp.Body) fmt.Println("response Body:", string(body)) } // SendSparkMessage to Room func SendSparkMessage(sparkToken, sparkRoomId, sparkMessage string) { s := spark.New(sparkToken) m := spark.Message{ RoomId: sparkRoomId, Text: sparkMessage, } // Post the message to the room _, err := s.CreateMessage(m) if err != nil { panic(err) } }
package types import ( "bytes" "fmt" "strings" "time" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" ) // DefaultParamspace defines the default auth module parameter subspace const ( // todo: implement oracle constants as params DefaultParamspace = ModuleName AttestationPeriod = 24 * time.Hour // TODO: value???? ) var ( // AttestationVotesPowerThreshold threshold of votes power to succeed AttestationVotesPowerThreshold = sdk.NewInt(66) // ParamsStoreKeyPeggyID stores the peggy id ParamsStoreKeyPeggyID = []byte("PeggyID") // ParamsStoreKeyContractHash stores the contract hash ParamsStoreKeyContractHash = []byte("ContractHash") // ParamsStoreKeyStartThreshold stores the start threshold ParamsStoreKeyStartThreshold = []byte("StartThreshold") // ParamsStoreKeyBridgeContractAddress stores the contract address ParamsStoreKeyBridgeContractAddress = []byte("BridgeContractAddress") // ParamsStoreKeyBridgeContractChainID stores the bridge chain id ParamsStoreKeyBridgeContractChainID = []byte("BridgeChainID") // ParamsStoreKeyInjContractAddress stores INJ ERC-20 contract address. ParamsStoreKeyInjContractAddress = []byte("InjContractAddress") // Ensure that params implements the proper interface _ paramtypes.ParamSet = &Params{} ) // ValidateBasic validates genesis state by looping through the params and // calling their validation functions func (s GenesisState) ValidateBasic() error { if err := s.Params.ValidateBasic(); err != nil { return sdkerrors.Wrap(err, "params") } return nil } // DefaultGenesisState returns empty genesis state // TODO: set some better defaults here func DefaultGenesisState() *GenesisState { return &GenesisState{ Params: DefaultParams(), } } // DefaultParams returns a copy of the default params func DefaultParams() *Params { return &Params{ PeggyId: "injecitve-peggyid", BridgeChainId: 888, } } // ValidateBasic checks that the parameters have valid values. func (p Params) ValidateBasic() error { if err := validatePeggyID(p.PeggyId); err != nil { return sdkerrors.Wrap(err, "peggy id") } if err := validateContractHash(p.ContractSourceHash); err != nil { return sdkerrors.Wrap(err, "contract hash") } if err := validateStartThreshold(p.StartThreshold); err != nil { return sdkerrors.Wrap(err, "start threshold") } if err := validateBridgeContractAddress(p.EthereumAddress); err != nil { return sdkerrors.Wrap(err, "bridge contract address") } if err := validateBridgeChainID(p.BridgeChainId); err != nil { return sdkerrors.Wrap(err, "bridge chain id") } if err := validateInjContractAddress(p.InjContractAddress); err != nil { return sdkerrors.Wrap(err, "bridge contract address") } return nil } // ParamKeyTable for auth module func ParamKeyTable() paramtypes.KeyTable { return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) } // ParamSetPairs implements the ParamSet interface and returns all the key/value pairs // pairs of auth module's parameters. func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { return paramtypes.ParamSetPairs{ paramtypes.NewParamSetPair(ParamsStoreKeyPeggyID, &p.PeggyId, validatePeggyID), paramtypes.NewParamSetPair(ParamsStoreKeyContractHash, &p.ContractSourceHash, validateContractHash), paramtypes.NewParamSetPair(ParamsStoreKeyStartThreshold, &p.StartThreshold, validateStartThreshold), paramtypes.NewParamSetPair(ParamsStoreKeyBridgeContractAddress, &p.EthereumAddress, validateBridgeContractAddress), paramtypes.NewParamSetPair(ParamsStoreKeyBridgeContractChainID, &p.BridgeChainId, validateBridgeChainID), paramtypes.NewParamSetPair(ParamsStoreKeyInjContractAddress, &p.InjContractAddress, validateInjContractAddress), } } // Equal returns a boolean determining if two Params types are identical. func (p Params) Equal(p2 Params) bool { bz1 := ModuleCdc.MustMarshalBinaryLengthPrefixed(&p) bz2 := ModuleCdc.MustMarshalBinaryLengthPrefixed(&p2) return bytes.Equal(bz1, bz2) } func validatePeggyID(i interface{}) error { v, ok := i.(string) if !ok { return fmt.Errorf("invalid parameter type: %T", i) } if _, err := strToFixByteArray(v); err != nil { return err } return nil } func validateContractHash(i interface{}) error { if _, ok := i.(string); !ok { return fmt.Errorf("invalid parameter type: %T", i) } return nil } func validateStartThreshold(i interface{}) error { if _, ok := i.(uint64); !ok { return fmt.Errorf("invalid parameter type: %T", i) } return nil } func validateBridgeChainID(i interface{}) error { if _, ok := i.(uint64); !ok { return fmt.Errorf("invalid parameter type: %T", i) } return nil } func validateBridgeContractAddress(i interface{}) error { v, ok := i.(string) if !ok { return fmt.Errorf("invalid parameter type: %T", i) } if err := ValidateEthAddress(v); err != nil { // TODO: ensure that empty addresses are valid in params if !strings.Contains(err.Error(), "empty") { return err } } return nil } func strToFixByteArray(s string) ([32]byte, error) { var out [32]byte if len([]byte(s)) > 32 { return out, fmt.Errorf("string too long") } copy(out[:], s) return out, nil } func validateInjContractAddress(i interface{}) error { v, ok := i.(string) if !ok { return fmt.Errorf("invalid parameter type: %T", i) } if err := ValidateEthAddress(v); err != nil { // TODO: ensure that empty addresses are valid in params if !strings.Contains(err.Error(), "empty") { return err } } return nil }
/* * Remove a secondary network adapter from a given server. */ package main import ( "encoding/hex" "flag" "fmt" "os" "path" "github.com/grrtrr/clcv2/clcv2cli" "github.com/grrtrr/exit" ) func main() { var net = flag.String("net", "", "ID or name of the Network to use (REQUIRED)") var location = flag.String("l", "", "Data centre alias (to resolve network name if not using hex ID)") flag.Usage = func() { fmt.Fprintf(os.Stderr, "usage: %s [options] <Server-Name>\n", path.Base(os.Args[0])) flag.PrintDefaults() } flag.Parse() if flag.NArg() != 1 || *net == "" { flag.Usage() os.Exit(0) } client, err := clcv2cli.NewCLIClient() if err != nil { exit.Fatal(err.Error()) } /* net is supposed to be a (hex) ID, but allow network names, too */ if _, err := hex.DecodeString(*net); err == nil { /* already looks like a HEX ID */ } else if *location == "" { exit.Errorf("Need a location argument (-l) if not using a network ID (%s)", *net) } else { fmt.Printf("Resolving network id of %q ...\n", *net) if netw, err := client.GetNetworkIdByName(*net, *location); err != nil { exit.Errorf("failed to resolve network name %q: %s", *net, err) } else if netw == nil { exit.Errorf("No network named %q was found in %s", *net, *location) } else { *net = netw.Id } } if err = client.ServerDelNic(flag.Arg(0), *net); err != nil { exit.Fatalf("failed to remove NIC from %s: %s", flag.Arg(0), err) } fmt.Printf("Successfully removed secondary NIC from %s.\n", flag.Arg(0)) }
/* Copyright 2022 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package policy import ( "context" "testing" "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1alpha1 "github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1" clustercommon "github.com/oam-dev/cluster-gateway/pkg/common" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha1" "github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1" "github.com/oam-dev/kubevela/apis/types" "github.com/oam-dev/kubevela/pkg/features" "github.com/oam-dev/kubevela/pkg/multicluster" "github.com/oam-dev/kubevela/pkg/utils/common" ) func TestGetClusterLabelSelectorInTopology(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DeprecatedPolicySpec, true)() multicluster.ClusterGatewaySecretNamespace = types.DefaultKubeVelaNS cli := fake.NewClientBuilder().WithScheme(common.Scheme).WithObjects(&corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-a", Namespace: multicluster.ClusterGatewaySecretNamespace, Labels: map[string]string{ clustercommon.LabelKeyClusterEndpointType: string(clusterv1alpha1.ClusterEndpointTypeConst), clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate), "key": "value", }, }, }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-b", Namespace: multicluster.ClusterGatewaySecretNamespace, Labels: map[string]string{ clustercommon.LabelKeyClusterEndpointType: string(clusterv1alpha1.ClusterEndpointTypeConst), clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate), "key": "value", }, }, }, &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "cluster-c", Namespace: multicluster.ClusterGatewaySecretNamespace, Labels: map[string]string{ clustercommon.LabelKeyClusterEndpointType: string(clusterv1alpha1.ClusterEndpointTypeConst), clustercommon.LabelKeyClusterCredentialType: string(clusterv1alpha1.CredentialTypeX509Certificate), "key": "none", }, }, }).Build() appNs := "test" testCases := map[string]struct { Inputs []v1beta1.AppPolicy Outputs []v1alpha1.PlacementDecision Error string AllowCrossNamespace bool }{ "invalid-topology-policy": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"cluster":"x"}`)}, }}, Error: "failed to parse topology policy", }, "cluster-not-found": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusters":["cluster-x"]}`)}, }}, Error: "failed to get cluster", }, "topology-by-clusters": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusters":["cluster-a"]}`)}, }}, Outputs: []v1alpha1.PlacementDecision{{Cluster: "cluster-a", Namespace: ""}}, }, "topology-by-cluster-selector-404": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusterSelector":{"key":"bad-value"}}`)}, }}, Error: "failed to find any cluster matches given labels", }, "topology-by-cluster-selector-ignore-404": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusterSelector":{"key":"bad-value"},"allowEmpty":true}`)}, }}, Outputs: []v1alpha1.PlacementDecision{}, }, "topology-by-cluster-selector": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusterSelector":{"key":"value"}}`)}, }}, Outputs: []v1alpha1.PlacementDecision{{Cluster: "cluster-a", Namespace: ""}, {Cluster: "cluster-b", Namespace: ""}}, }, "topology-by-cluster-label-selector": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusterLabelSelector":{"key":"value"}}`)}, }}, Outputs: []v1alpha1.PlacementDecision{{Cluster: "cluster-a", Namespace: ""}, {Cluster: "cluster-b", Namespace: ""}}, }, "topology-by-cluster-selector-and-namespace-invalid": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusterSelector":{"key":"value"},"namespace":"override"}`)}, }}, Error: "cannot cross namespace", }, "topology-by-cluster-selector-and-namespace": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"clusterSelector":{"key":"value"},"namespace":"override"}`)}, }}, Outputs: []v1alpha1.PlacementDecision{{Cluster: "cluster-a", Namespace: "override"}, {Cluster: "cluster-b", Namespace: "override"}}, AllowCrossNamespace: true, }, "topology-no-clusters-and-cluster-label-selector": { Inputs: []v1beta1.AppPolicy{{ Name: "topology-policy", Type: "topology", Properties: &runtime.RawExtension{Raw: []byte(`{"namespace":"override"}`)}, }}, Outputs: []v1alpha1.PlacementDecision{{Cluster: "local", Namespace: "override"}}, AllowCrossNamespace: true, }, "no-topology-policy": { Inputs: []v1beta1.AppPolicy{}, Outputs: []v1alpha1.PlacementDecision{{Cluster: "local", Namespace: ""}}, }, "empty-topology-policy": { Inputs: []v1beta1.AppPolicy{{Type: "topology", Name: "some-name", Properties: nil}}, Error: "have empty properties", }, } for name, tt := range testCases { t.Run(name, func(t *testing.T) { r := require.New(t) pds, err := GetPlacementsFromTopologyPolicies(context.Background(), cli, appNs, tt.Inputs, tt.AllowCrossNamespace) if tt.Error != "" { r.NotNil(err) r.Contains(err.Error(), tt.Error) } else { r.NoError(err) r.Equal(tt.Outputs, pds) } }) } }
package main import ( "fmt" "time" ) func FirstDataBase(out1 chan string) { time.Sleep(2 * time.Second) out1 <- "Answer from DB1" } func SecondDataBase(out2 chan string) { time.Sleep(1 * time.Second) out2 <- "Answer from DB2" } func main() { out1 := make(chan string) out2 := make(chan string) go FirstDataBase(out1) go SecondDataBase(out2) select { case val := <-out1: fmt.Println(val) case val := <-out2: fmt.Println(val) } }
package metrics // SortMetrics ... type SortMetrics []PodMetric func (ms SortMetrics) Len() int { return len(ms) } func (ms SortMetrics) Less(i, j int) bool { switch ms[0].SortBy { case "cpu": return ms[i].CPU > ms[j].CPU case "memory": return ms[i].Memory > ms[j].Memory default: return false } } func (ms SortMetrics) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
package webui import ( "os" "strings" "time" "github.com/go-macaron/binding" "github.com/toni-moreno/snmpcollector/pkg/agent" "github.com/toni-moreno/snmpcollector/pkg/config" "github.com/toni-moreno/snmpcollector/pkg/data/snmp" "gopkg.in/macaron.v1" ) // NewAPIRtAgent Runtime Agent REST API creator func NewAPIRtAgent(m *macaron.Macaron) error { bind := binding.Bind m.Group("/api/rt/agent", func() { m.Get("/reload/", reqSignedIn, AgentReloadConf) m.Get("/shutdown/", reqSignedIn, AgentShutdown) m.Post("/snmpconsole/ping/", reqSignedIn, bind(config.SnmpDeviceCfg{}), PingSNMPDevice) m.Post("/snmpconsole/query/:getmode/:obtype/:data", reqSignedIn, bind(config.SnmpDeviceCfg{}), QuerySNMPDevice) m.Get("/info/version/", RTGetVersion) }) return nil } // AgentReloadConf xx func AgentReloadConf(ctx *Context) { log.Info("trying to reload configuration for all devices") time, err := agent.ReloadConf() if err != nil { ctx.JSON(405, err.Error()) return } ctx.JSON(200, time) } // AgentShutdown xx func AgentShutdown(ctx *Context) { log.Info("receiving shutdown") ctx.JSON(200, "Init shutdown....") os.Exit(0) } //PingSNMPDevice xx func PingSNMPDevice(ctx *Context, cfg config.SnmpDeviceCfg) { log.Infof("trying to ping device %s : %+v", cfg.ID, cfg) _, sysinfo, err := snmp.GetClient(&cfg, log, "ping", false, 0) if err != nil { log.Debugf("ERROR on query device : %s", err) ctx.JSON(400, err.Error()) } else { log.Debugf("OK on query device ") ctx.JSON(200, sysinfo) } } // QuerySNMPDevice xx func QuerySNMPDevice(ctx *Context, cfg config.SnmpDeviceCfg) { getmode := ctx.Params(":getmode") obtype := ctx.Params(":obtype") data := strings.TrimSpace(ctx.Params(":data")) log.Infof("trying to query device %s : getmode: %s objectype: %s data %s", cfg.ID, getmode, obtype, data) if obtype != "oid" { log.Warnf("Object Type [%s] Not Supperted", obtype) ctx.JSON(400, "Object Type [ "+obtype+"] Not Supperted") return } snmpcli, info, err := snmp.GetClient(&cfg, log, "query", false, 0) if err != nil { log.Debugf("ERROR on open connection with device %s : %s", cfg.ID, err) ctx.JSON(400, err.Error()) return } start := time.Now() result, err := snmp.Query(snmpcli, getmode, data) elapsed := time.Since(start) if err != nil { log.Debugf("ERROR on query device : %s", err) ctx.JSON(400, err.Error()) return } log.Debugf("OK on query device ") snmpdata := struct { DeviceCfg *config.SnmpDeviceCfg TimeTaken float64 PingInfo *snmp.SysInfo QueryResult []snmp.EasyPDU }{ &cfg, elapsed.Seconds(), info, result, } ctx.JSON(200, snmpdata) } //RTGetVersion xx func RTGetVersion(ctx *Context) { info := agent.GetRInfo() ctx.JSON(200, &info) }
package main import ( "net" "strconv" "sync" ) type Peers struct { sync.Mutex peerList []*peerState } func newPeers() *Peers { return &Peers{peerList: make([]*peerState, 0)} } func (lp *Peers) Know(peer, id string) bool { lp.Lock() defer lp.Unlock() for _, p := range lp.peerList { if p.id != id { continue } phost, _, err := net.SplitHostPort(p.address) if err != nil { return false } candidateHost, _, err := net.SplitHostPort(peer) if err != nil { return false } if phost == candidateHost { return true } } return false } func (lp *Peers) All() []*peerState { lp.Lock() defer lp.Unlock() ret := make([]*peerState, len(lp.peerList)) for i, p := range lp.peerList { ret[i] = p } return ret } func (lp *Peers) Len() (l int) { lp.Lock() l = len(lp.peerList) lp.Unlock() return l } func getConnInfo(conn net.Conn) (remoteHost string, lower, upper int, ok bool) { _, localPort, err1 := net.SplitHostPort(conn.LocalAddr().String()) remoteHost, remotePort, err2 := net.SplitHostPort(conn.RemoteAddr().String()) if err1 != nil || err2 != nil { return } localPortInt, err3 := strconv.Atoi(localPort) remotePortInt, err4 := strconv.Atoi(remotePort) if err3 != nil || err4 != nil { return } if localPortInt < remotePortInt { return remoteHost, localPortInt, remotePortInt, true } return remoteHost, remotePortInt, localPortInt, true } // Add compares the new peer to be added, and adds it if it is the // winner at our duplicate elimination algorithm. In that case we remove // any other duplicate and return true. func (lp *Peers) Add(peer *peerState) (keep bool) { thisHost, thisLower, thisUpper, ok := getConnInfo(peer.conn) if !ok { return } lp.Lock() defer lp.Unlock() toDelete := make([]int, 0) for i, p := range lp.peerList { host, lower, upper, ok := getConnInfo(p.conn) if !ok { continue } if thisHost != host { continue } // We already have one that's better. Keep it, and don't add the new // one if lower < thisLower || (lower == thisLower && upper < thisUpper) { return false } // We already have one but it should be removed and the new one be // added. toDelete = append(toDelete, i) } // Remove old ones for _, delIdx := range toDelete { lp.peerList[delIdx].Close() lp.peerList[delIdx] = lp.peerList[len(lp.peerList)-1] lp.peerList = lp.peerList[:len(lp.peerList)] } lp.peerList = append(lp.peerList, peer) return true } func (lp *Peers) Delete(peer *peerState) { lp.Lock() defer lp.Unlock() for i, p := range lp.peerList { if p.address == peer.address { // We don't care about the order, just put the last one here lp.peerList[i] = lp.peerList[len(lp.peerList)-1] lp.peerList = lp.peerList[:len(lp.peerList)-1] return } } } func (lp *Peers) HasPeer(peer string) bool { lp.Lock() defer lp.Unlock() for _, p := range lp.peerList { if p.address == peer { return true } } return false }
package loadbalance_test import ( "fmt" "log" "time" providerRestApi "code.huawei.com/cse/api/provider/rest" "code.huawei.com/cse/common" "code.huawei.com/cse/testkit" "code.huawei.com/cse/util" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) func IsRoundRoubin(cycle int, n ...string) bool { Expect((len(n) - cycle) > 0).To(BeTrue()) Expect(cycle > 1).To(BeTrue()) length := len(n) - cycle for i := 0; i < length; i++ { Expect(n[i]).NotTo(BeEmpty()) if !Expect(n[i+cycle]).To(Equal(n[i])) { return false } } return true } func isStrategySessionStickiness(cycle int, n ...string) bool { Expect((len(n) - cycle) > 0).To(BeTrue()) Expect(cycle > 1).To(BeTrue()) length := len(n) - 1 for i := 0; i < length; i++ { Expect(n[i]).NotTo(BeEmpty()) if !Expect(n[i+1]).To(Equal(n[i])) { return false } } return true } func isWeightedResponse(cycle int, n ...string) bool { Expect((len(n) - cycle) > 0).To(BeTrue()) Expect(cycle > 1).To(BeTrue()) l := len(n) - 1 ma := make(map[string]int) for i := 0; i < l; i++ { Expect(n[i]).NotTo(BeEmpty()) if _, ok := ma[n[i]]; !ok { ma[n[i]] = 1 } ma[n[i]] += 1 } for _, v := range ma { if v/len(n)*100 >= 80 { return true } } return false } func isRand(cycle int, n ...string) { Expect((len(n) - cycle) > 0).To(BeTrue()) Expect(cycle > 1).To(BeTrue()) Expect(isStrategySessionStickiness(cycle, n...)).To(BeFalse()) Expect(IsRoundRoubin(cycle, n...)).To(BeFalse()) Expect(isWeightedResponse(cycle, n...)).To(BeFalse()) } func test(consumerAddr, providerName, protocol, dimensionInfo, instanceName string, instancesLength int) { testUri := fmt.Sprintf("http://%s%s?%s", consumerAddr, providerRestApi.Svc, util.FncodeParams([]util.URLParameter{ {common.ParamProvider: providerName}, {common.ParamProtocol: protocol}, {common.ParamTimes: common.CallTimes20Str}, })) // RoundRobin test testLB("Instance name list should be round robin", dimensionInfo, consumerAddr, testUri, "RoundRobin", "", providerName, instanceName, instancesLength) // session stickiness test testLB("Instance name list should be session stickiness", dimensionInfo, consumerAddr, testUri, "SessionStickiness", "10", providerName, instanceName, instancesLength) // WeightedResponse test testLB("Instance name list should be WeightedResponse", dimensionInfo, consumerAddr, testUri, "WeightedResponse", "", providerName, instanceName, instancesLength) // Rand test testLB("Instance name list should be Random", dimensionInfo, consumerAddr, testUri, "Random", "", providerName, instanceName, instancesLength) } func testLB(text, dimensionInfo, consumerAddr, testUri, t, st, providerName, instanceName string, instancesLength int) { m := map[string]interface{}{ "cse.loadbalance.strategy.name": t, "cse.loadbalance.SessionStickinessRule.sessionTimeoutInSeconds": st, } testkit.Callcc(fmt.Sprintf("http://%s%s", consumerAddr, providerRestApi.ConfigCenterAdd), "add", dimensionInfo, m, nil) time.Sleep(3 * time.Second) if t == "WeightedResponse" { curl := fmt.Sprintf("http://%s%s?%s", consumerAddr, providerRestApi.Svc, util.FncodeParams([]util.URLParameter{ {common.ParamProvider: providerName}, {common.ParamProtocol: fmt.Sprintf("delayInstance/%s/1000", instanceName)}, {common.ParamTimes: common.CallTimes10Str}, })) testkit.GetResponceInstanceAliasList(curl) time.Sleep(30 * time.Second) } It(text, func() { nameList := testkit.GetResponceInstanceAliasList(testUri) log.Println(fmt.Sprintf("type:%s,---instance list:%v", t, nameList)) switch t { case "RoundRobin": IsRoundRoubin(instancesLength, nameList...) case "SessionStickiness": isStrategySessionStickiness(instancesLength, nameList...) case "WeightedResponse": isWeightedResponse(instancesLength, nameList...) case "Random": isRand(instancesLength, nameList...) default: } }) } var _ = Describe("Load balance", func() { testkit.SDKATContext(test) })
package depmain_test import ( "os" "testing" "github.com/onemedical/depmain" ) func TestEnv(t *testing.T) { os.Setenv("DEPMAIN_TEST_VALUE", "set") ext := depmain.New() if e := ext.Getenv("DEPMAIN_TEST_VALUE"); e != "set" { t.Errorf("Getenv: want %s got %s", "set", e) } if e := ext.Getenv("DEPMAIN_UNKNOWN_TEST_VALUE"); e != "" { t.Errorf("Getenv bad value: want %q got %s", "", e) } _, found := ext.LookupEnv("DEPMAIN_TEST_VALUE") if !found { t.Errorf("LookupEnv: should have found value, got false") } _, found = ext.LookupEnv("DEPMAIN_UNKNOWN_TEST_VALUE") if found { t.Errorf("LookupEnv: should not have found value, got true") } } func TestTestExt(t *testing.T) { main := func(ext *depmain.Ext) { ext.Exit(3) } te := depmain.NewTestExt() main(te.Ext) if te.Exited() != true { t.Errorf("expected Exited to be true, got false") } if te.Code() != 3 { t.Errorf("expected ExitCode to be 3, got %d", te.Code()) } }
package main import ( // "fmt" "fmt" "golang-web-api/book" "golang-web-api/handler" "log" "github.com/gin-gonic/gin" "gorm.io/driver/mysql" "gorm.io/gorm" ) func main(){ //connection database Mysql dsn := "root:@tcp(127.0.0.1:3306)/golang_web?charset=utf8mb4&parseTime=True&loc=Local" db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{}) if err !=nil { log.Fatal("DB connection error") } db.AutoMigrate(&book.Book{}) // fmt.Println("Database Connected") //CRUD //Create data // book := book.Book{} // book.Title = "Belajar Golang Lanjut" // book.Description = "Go Super" // book.Price = 100000 // book.Discount = 50 // book.Rating = 5 // err = db.Create(&book).Error // if err !=nil { // fmt.Println("============"); // fmt.Println("Error Creating Data"); // fmt.Println("============"); // } var book book.Book err = db.Debug().First(&book,1).Error if err !=nil { fmt.Println("============"); fmt.Println("Error Finding Data"); fmt.Println("============"); } fmt.Println("Title :", book.Title) fmt.Println("book object %v", book) router :=gin.Default() //cara membuat versioning API V1 v1 := router.Group("/v1") //membuat root url yang disarankan v1.GET("/", handler.RootHandler) v1.GET("/artikel", handler.RootArtikel) v1.GET("/books/:id/:title/:author", handler.BookHandler) v1.GET("/query", handler.QueryHandler) v1.POST("/books", handler.PostBooksHandler) //cara membuat versioning API V2 v2 := router.Group("/v2") //membuat root url yang disarankan v2.GET("/", handler.RootHandler) v2.GET("/artikel", handler.RootArtikel) v2.GET("/books/:id/:title/:author", handler.BookHandler) v2.GET("/query", handler.QueryHandler) v2.POST("/books", handler.PostBooksHandler) //--root url yang kurang disarankan-- // router.GET("/", func(c *gin.Context) { // c.JSON(http.StatusOK, gin.H{ // "name": "Luthfi Azizi", // "bio": "A story of coding", // }) // }) // router.GET("/artikel", func(c *gin.Context) { // c.JSON(http.StatusOK, gin.H{ // "title": "Learning Golang", // "content": "Learning Go language programming is fund and amazing", // }) // }) router.Run(":9090") } //memperbaiki root func yang lebih disarankan
package main import ( "log" "fmt" "github.com/gotk3/gotk3/gtk" // config "./config" file_transfer "./file_transfer" // utils "./utils" video_stream "./video_stream" widgets "./widgets" ) var ( role = "server" close = false win *gtk.Window ) func addClientSide(win *gtk.Window) { stackSwitcher := widgets.StackSwitcherNew() stack := widgets.StackNew() gridFileTransfer := file_transfer.SetupSenderUI(win) gridVideoStream := video_stream.SetupSenderUI(win) stack.AddTitled(gridFileTransfer, "Page1", "File Transfer") stack.AddTitled(gridVideoStream, "Page2", "Video Stream") stackSwitcher.SetStack(stack) box := widgets.BoxNew(gtk.ORIENTATION_VERTICAL, 0) box.PackStart(stackSwitcher, false, false, 0) box.PackStart(stack, true, true, 0) win.Add(box) } func addServerSide(win *gtk.Window){ stackSwitcher := widgets.StackSwitcherNew() stack := widgets.StackNew() gridFileTransfer := file_transfer.SetupReceiverUI(win) gridVideoStream := video_stream.SetupReceiverUI(win) stack.AddTitled(gridFileTransfer, "Page1", "File Transfer") stack.AddTitled(gridVideoStream, "Page2", "Video Stream") stackSwitcher.SetStack(stack) box := widgets.BoxNew(gtk.ORIENTATION_VERTICAL, 0) box.PackStart(stackSwitcher, false, false, 0) box.PackStart(stack, true, true, 0) win.Add(box) } func setupDialog() (){ dialog := widgets.DialogNew("MPQUIC Experiment", 300, 150) dialog.AddButton("OK", gtk.RESPONSE_OK) dialog.AddButton("Cancel", gtk.RESPONSE_CLOSE) contentArea, err := dialog.GetContentArea() if err != nil { log.Fatal("Unable to fetch contentArea: ", err) } box := widgets.BoxNew(gtk.ORIENTATION_VERTICAL, 0) clientButton := widgets.RadioButtonNew(nil, "Server", func(){ role = "server" }) serverButton := widgets.RadioButtonNew(clientButton, "Client", func(){ role = "client" }) box.PackStart(clientButton, false, false, 0) box.PackStart(serverButton, false, false, 0) contentArea.PackStart(widgets.LabelNew("Which role do you want to start?", false), false, false, 0) contentArea.PackStart(box, false, false, 0) dialog.ShowAll() reply := dialog.Run() if reply == gtk.RESPONSE_OK { fmt.Println("OK") } else { close = true } dialog.Destroy() } func main(){ gtk.Init(nil) setupDialog() log.Printf("Selected Role: %s", role) if role == "client"{ win = widgets.WindowNew("Client", 800, 200) addClientSide(win) } else { win = widgets.WindowNew("Server", 800, 600) addServerSide(win) } if !close { win.ShowAll() gtk.Main() } }
package main import ( "context" "fmt" "net/http" "github.com/PacktPublishing/Go-Programming-Cookbook-Second-Edition/chapter7/twirp/rpc/greeter" ) func main() { // you can put in a custom client for tighter controls on timeouts etc. client := greeter.NewGreeterServiceProtobufClient("http://localhost:4444", &http.Client{}) ctx := context.Background() req := greeter.GreetRequest{Greeting: "Hello", Name: "Reader"} resp, err := client.Greet(ctx, &req) if err != nil { panic(err) } fmt.Println(resp) req.Greeting = "Goodbye" resp, err = client.Greet(ctx, &req) if err != nil { panic(err) } fmt.Println(resp) }
package space import ( "github.com/gomeetups/gomeetups/fixtures" "github.com/gomeetups/gomeetups/models" ) // ServiceMemory Address store uses an in memory store type ServiceMemory struct{} // Get Returns space details for given space id func (*ServiceMemory) Get(spaceID string) (space *models.Space, err error) { for idx, record := range fixtures.Spaces { if record.SpaceID != spaceID { continue } space = &fixtures.Spaces[idx] } return space, nil } func (*ServiceMemory) GetMultiple(spaceIds []string) (spaces map[string]*models.Space, err error) { spaceMap := make(map[string]bool, len(spaceIds)) spaces = make(map[string]*models.Space) for _, id := range spaceIds { spaceMap[id] = true } for idx, space := range fixtures.Spaces { if _, ok := spaceMap[space.SpaceID]; !ok { continue } spaces[space.SpaceID] = &fixtures.Spaces[idx] } return spaces, nil }
package collector import ( "time" "github.com/jtaczanowski/tcp-pinger/pkg/config" "github.com/jtaczanowski/tcp-pinger/pkg/models" ) func Start(config *config.Config, pingerToCollectorChan chan models.Ping, collectorToAggregatorChan chan models.PingsCollection) { collection := models.PingsCollection{} ticker := time.NewTicker(time.Second * time.Duration(config.GraphiteIntervalSecond)) for { select { case ping := <-pingerToCollectorChan: collection = append(collection, ping) case <-ticker.C: collectorToAggregatorChan <- collection collection = models.PingsCollection{} } } }
package tools import ( config "Web_Api/tools/config" "Web_Api/models/tools" "Web_Api/pkg" "Web_Api/pkg/app" "github.com/gin-gonic/gin" "net/http" ) // @Summary 分页列表数据 / page list data // @Description 数据库表分页列表 / database table page list // @Tags 工具 / Tools // @Param tableName query string false "tableName / 数据表名称" // @Param pageSize query int false "pageSize / 页条数" // @Param pageIndex query int false "pageIndex / 页码" // @Success 200 {object} models.Response "{"code": 200, "data": [...]}" // @Router /api/v1/db/tables/page [get] func GetDBTableList(c *gin.Context) { var res app.Response var data tools.DBTables var err error var pageSize = 10 var pageIndex = 1 if config.DatabaseConfig.Dbtype == "sqlite3" { res.Msg = "对不起,sqlite3 暂时不支持代码生成!" c.JSON(http.StatusOK, res.ReturnError(500)) return } if size := c.Request.FormValue("pageSzie");size != ""{ pageSize =pkg.StrToInt(err, size) } if index := c.Request.FormValue("pageIndex");index!=""{ pageIndex = pkg.StrToInt(err, index) } data.TableName = c.Request.FormValue("tableNmae") result, count, err := data.GetPage(pageSize, pageIndex) pkg.HasError(err, "",-1) var mp = make(map[string]interface{}) mp["list"] = result mp["count"] = count mp["pageIndex"] = pageIndex mp["pageSize"] =pageSize res.Data = mp c.JSON(http.StatusOK, res.ReturnOK()) }
package schema import ( "time" "gopkg.in/mgo.v2/bson" ) // MemberRole describes the Membership role type MemberRole int // Member roles const ( MemberRoleOwner MemberRole = iota MemberRoleMember MemberRoleGuest ) var roles = [...]string{"owner", "member", "guest"} func (m MemberRole) String() string { return roles[m] } // Member represents an organization membership for a user type Member struct { ID bson.ObjectId `bson:"_id,omitempty"` UserID bson.ObjectId `bson:"user_id"` OrganizationID bson.ObjectId `bson:"org_id"` State string `bson:"state"` Published bool `bson:"published"` Role MemberRole `bson:"role"` InvitedBy bson.ObjectId `bson:"invited_by,omitempty"` InviteEmail string `bson:"invite_email,omitempty"` InviteToken string `bson:"invite_token,omitempty"` InviteSentAt time.Time `bson:"invite_sent_at,omitempty"` InviteAcceptedAt time.Time `bson:"invite_accepted_at,omitempty"` CreatedAt time.Time `bson:"created_at"` UpdatedAt time.Time `bson:"updated_at"` Errors Errors `json:"-" bson:"-"` } // IsPending returns if the organization membership state is pending func (m *Member) IsPending() bool { return m.State == "pending" } // IsActive returns if the organization membership state is active func (m *Member) IsActive() bool { return m.State == "active" } // IsPublished returns if the organization membership visibility is public func (m *Member) IsPublished() bool { return m.Published } // IsPrivate returns if the organization membership visibility is concealed func (m *Member) IsPrivate() bool { return !m.Published } // IsOwner retusn if the user is part of the Owners role group func (m *Member) IsOwner() bool { return m.Role == MemberRoleOwner } // IsMember returns if the user is part of the Member role group func (m *Member) IsMember() bool { return m.Role == MemberRoleMember } // IsGuest returns if the user is part of the Guest member role group func (m *Member) IsGuest() bool { return m.Role == MemberRoleGuest }
package main import ( "fmt" "crypto/sha256" "os" "io" ) func main() { /*//hash函数第一种 sum := sha256.Sum256([]byte("audiRStony")) fmt.Printf("%X\n",sum)*/ //第二种 /* h := sha256.New() h.Write([]byte("audiRStony")) fmt.Printf("%X\n",h.Sum(nil))*/ //第三种。文件操作 h := sha256.New() file_data,err1 := os.Open("test_hash.txt") defer file_data.Close() if err1 != nil{ fmt.Println(err1) } if _,err2 := io.Copy(h,file_data); err2 != nil{ fmt.Println(err2) } fmt.Printf("%X\n",h.Sum(nil)) }
package aws import ( "context" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/ec2" "github.com/pkg/errors" ) // InstanceTypeInfo describes the instance type type InstanceTypeInfo struct { Name string vCPU int64 } // InstanceTypes returns information of the all the instance types available for a region. // It returns a map of instance type name to it's information. func InstanceTypes(ctx context.Context, sess *session.Session, region string) (map[string]InstanceTypeInfo, error) { ret := map[string]InstanceTypeInfo{} client := ec2.New(sess, aws.NewConfig().WithRegion(region)) if err := client.DescribeInstanceTypesPagesWithContext(ctx, &ec2.DescribeInstanceTypesInput{}, func(page *ec2.DescribeInstanceTypesOutput, lastPage bool) bool { for _, info := range page.InstanceTypes { ti := InstanceTypeInfo{Name: aws.StringValue(info.InstanceType)} if info.VCpuInfo == nil { continue } ti.vCPU = aws.Int64Value(info.VCpuInfo.DefaultVCpus) ret[ti.Name] = ti } return !lastPage }); err != nil { return nil, err } return ret, nil } // IsUnauthorizedOperation checks if the error is un authorized due to permission failure or lack of service availability. func IsUnauthorizedOperation(err error) bool { if err == nil { return false } var awsErr awserr.Error if errors.As(err, &awsErr) { // see reference: // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html#CommonErrors return awsErr.Code() == "UnauthorizedOperation" || awsErr.Code() == "AuthFailure" || awsErr.Code() == "Blocked" } return false }
package authority import ( "time" "github.com/aghape/core" "github.com/aghape/core/utils" "github.com/moisespsena/go-route" ) // ClaimsContextKey authority claims key var ClaimsContextKey utils.ContextKey = "authority_claims" // Middleware authority middleware used to record activity time func (authority *Authority) Middleware() *route.Middleware { return &route.Middleware{ Name: "qor:authority", After: []string{"qor:session"}, Handler: func(chain *route.ChainHandler) { context := core.ContexFromChain(chain) sm := context.SessionManager() if claims, err := authority.Auth.Get(sm); err == nil { var zero time.Duration lastActiveAt := claims.LastActiveAt if lastActiveAt != nil { lastDistractionTime := time.Now().Sub(*lastActiveAt) if claims.LongestDistractionSinceLastLogin == nil || *claims.LongestDistractionSinceLastLogin < lastDistractionTime { claims.LongestDistractionSinceLastLogin = &lastDistractionTime } if claims.LastLoginAt != nil { if claims.LastLoginAt.After(*claims.LastActiveAt) { claims.LongestDistractionSinceLastLogin = &zero } else if loggedDuration := claims.LastActiveAt.Sub(*claims.LastLoginAt); *claims.LongestDistractionSinceLastLogin > loggedDuration { claims.LongestDistractionSinceLastLogin = &loggedDuration } } } else { claims.LongestDistractionSinceLastLogin = &zero } now := time.Now() claims.LastActiveAt = &now authority.Auth.Update(sm, claims) } chain.Pass() }, } }
package database import ( "github.com/jinzhu/gorm" ) // Package is the ORM mapping for the packages table in MySql type Package struct { gorm.Model UserID uint Name string `gorm:"type:varchar(100)"` Path string `gorm:"not null"` Version string `gorm:"type:varchar(40);not null"` IsLatest bool }
package alicloud import ( "github.com/hashicorp/terraform/helper/resource" "testing" ) func TestAccAlicloudDnsDomainGroupsDataSource_name_regex(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ { Config: testAccCheckAlicloudDomainGroupsDataSourceNameRegexConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAlicloudDataSourceID("data.alicloud_dns_domain_groups.group"), resource.TestCheckResourceAttr("data.alicloud_dns_domain_groups.group", "groups.#", "1"), resource.TestCheckResourceAttr("data.alicloud_dns_domain_groups.group", "groups.0.group_id", "520fa32a-076b-4f80-854d-987046e223fe"), resource.TestCheckResourceAttr("data.alicloud_dns_domain_groups.group", "groups.0.group_name", "yuy"), ), }, }, }) } const testAccCheckAlicloudDomainGroupsDataSourceNameRegexConfig = ` data "alicloud_dns_domain_groups" "group" { name_regex = "^yu" }`
package main import ( "fmt" "html/template" "os" ) type Person struct { Name string Age int } func main() { t,err:=template.ParseFiles("E:/Project/src/GoRoutine/Web/template/index.html") //加载模板 if err!=nil{ fmt.Println("parse file err",err) return } p:=Person{"张三",19} err=t.Execute(os.Stdout,p) //渲染模板 if err!=nil{ fmt.Println("There was an err",err) } }
package dbserver_test import ( "os" "time" "io/ioutil" "gopkg.in/mgo.v2" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "themis/mockdb" ) var _ = Describe("Dbserver", func() { type M map[string]interface{} var oldCheckSessions string var server dbserver.DBServer BeforeEach(func() { oldCheckSessions = os.Getenv("CHECK_SESSIONS") os.Setenv("CHECK_SESSIONS", "1") dir, _ := ioutil.TempDir("", "themis_test") server.SetPath(dir) defer server.Stop() }) AfterEach(func() { os.Setenv("CHECK_SESSIONS", oldCheckSessions) }) Describe("Mongo database server", func() { Context("As a mock service", func() { It("should be able to wipe the database", func() { session := server.Session() err := session.DB("mydb").C("mycoll").Insert(M{"a": 1}) session.Close() Expect(err).To(BeNil()) server.Wipe() session = server.Session() names, err := session.DatabaseNames() session.Close() Expect(err).To(BeNil()) for _, name := range names { if name != "local" && name != "admin" { Fail("Wipe should have removed this database: " + name) } } }) It("should be able to stop", func() { // Server should not be running. process := server.ProcessTest() Expect(process).To(BeNil()) session := server.Session() addr := session.LiveServers()[0] session.Close() // Server should be running now. process = server.ProcessTest() p, err := os.FindProcess(process.Pid) Expect(err).To(BeNil()) p.Release() server.Stop() // Server should not be running anymore. session, err = mgo.DialWithTimeout(addr, 500*time.Millisecond) if session != nil { session.Close() Fail("Stop did not stop the server") } }) It("be able to wipe with no check sessions", func() { os.Setenv("CHECK_SESSIONS", "0") // Should not panic, although it looks to Wipe like this session will leak. session := server.Session() defer session.Close() server.Wipe() }) }) }) })
package gorms import ( "gorm.io/driver/mysql" ) func NewMySqlAdapter(settings GormSettings) *adapter { return &adapter{ dialector: mysql.Open(settings.ConnectionString), settings: settings, } }
package main import ( "encoding/json" "net" ) type udpMesh struct { conn *net.UDPConn address *net.UDPAddr onPacket func(packet *adPacket) } func newUdpMesh(udpHostAndPort string, onPacket func(packet *adPacket)) (udp *udpMesh, err error) { var udpaddr *net.UDPAddr if udpaddr, err = net.ResolveUDPAddr("udp4", udpHostAndPort); err != nil { return nil, err } var conn *net.UDPConn if conn, err = net.ListenUDP("udp4", udpaddr); err != nil { return nil, err } mesh := &udpMesh{ conn: conn, address: udpaddr, onPacket: onPacket, } go mesh.start() return mesh, nil } func (m *udpMesh) send(packet *adPacket) error { payload, err := json.Marshal(packet) if err != nil { return err } _, err = m.conn.WriteToUDP(payload, m.address) return err } func (m *udpMesh) start() { for { buffer := make([]byte, 256) if c, addr, err := m.conn.ReadFromUDP(buffer); err != nil { log.Infof("ble mesh: %d byte datagram from %s with error %s\n", c, addr.String(), err.Error()) } else { packet := &adPacket{} err := json.Unmarshal(buffer[:c], packet) if err != nil { log.Errorf("Invalid JSON: %s", err) } else { m.onPacket(packet) } } } }
package kbucket import ( "math/rand" "testing" "time" peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer" pstore "gx/ipfs/QmQFFp4ntkd4C14sP3FaH9WJyBuetuGUVo6dShNHvnoEvC/go-libp2p-peerstore" tu "gx/ipfs/QmVnJMgafh5MBYiyqbvDtoCL8pcQvbEGD2k9o9GFpBWPzY/go-testutil" ) // Test basic features of the bucket struct func TestBucket(t *testing.T) { b := newBucket() peers := make([]peer.ID, 100) for i := 0; i < 100; i++ { peers[i] = tu.RandPeerIDFatal(t) b.PushFront(peers[i]) } local := tu.RandPeerIDFatal(t) localID := ConvertPeerID(local) i := rand.Intn(len(peers)) if !b.Has(peers[i]) { t.Errorf("Failed to find peer: %v", peers[i]) } spl := b.Split(0, ConvertPeerID(local)) llist := b.list for e := llist.Front(); e != nil; e = e.Next() { p := ConvertPeerID(e.Value.(peer.ID)) cpl := commonPrefixLen(p, localID) if cpl > 0 { t.Fatalf("Split failed. found id with cpl > 0 in 0 bucket") } } rlist := spl.list for e := rlist.Front(); e != nil; e = e.Next() { p := ConvertPeerID(e.Value.(peer.ID)) cpl := commonPrefixLen(p, localID) if cpl == 0 { t.Fatalf("Split failed. found id with cpl == 0 in non 0 bucket") } } } func TestTableCallbacks(t *testing.T) { local := tu.RandPeerIDFatal(t) m := pstore.NewMetrics() rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m) peers := make([]peer.ID, 100) for i := 0; i < 100; i++ { peers[i] = tu.RandPeerIDFatal(t) } pset := make(map[peer.ID]struct{}) rt.PeerAdded = func(p peer.ID) { pset[p] = struct{}{} } rt.PeerRemoved = func(p peer.ID) { delete(pset, p) } rt.Update(peers[0]) if _, ok := pset[peers[0]]; !ok { t.Fatal("should have this peer") } rt.Remove(peers[0]) if _, ok := pset[peers[0]]; ok { t.Fatal("should not have this peer") } for _, p := range peers { rt.Update(p) } out := rt.ListPeers() for _, outp := range out { if _, ok := pset[outp]; !ok { t.Fatal("should have peer in the peerset") } delete(pset, outp) } if len(pset) > 0 { t.Fatal("have peers in peerset that were not in the table", len(pset)) } } // Right now, this just makes sure that it doesnt hang or crash func TestTableUpdate(t *testing.T) { local := tu.RandPeerIDFatal(t) m := pstore.NewMetrics() rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m) peers := make([]peer.ID, 100) for i := 0; i < 100; i++ { peers[i] = tu.RandPeerIDFatal(t) } // Testing Update for i := 0; i < 10000; i++ { rt.Update(peers[rand.Intn(len(peers))]) } for i := 0; i < 100; i++ { id := ConvertPeerID(tu.RandPeerIDFatal(t)) ret := rt.NearestPeers(id, 5) if len(ret) == 0 { t.Fatal("Failed to find node near ID.") } } } func TestTableFind(t *testing.T) { local := tu.RandPeerIDFatal(t) m := pstore.NewMetrics() rt := NewRoutingTable(10, ConvertPeerID(local), time.Hour, m) peers := make([]peer.ID, 100) for i := 0; i < 5; i++ { peers[i] = tu.RandPeerIDFatal(t) rt.Update(peers[i]) } t.Logf("Searching for peer: '%s'", peers[2]) found := rt.NearestPeer(ConvertPeerID(peers[2])) if !(found == peers[2]) { t.Fatalf("Failed to lookup known node...") } } func TestTableFindMultiple(t *testing.T) { local := tu.RandPeerIDFatal(t) m := pstore.NewMetrics() rt := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m) peers := make([]peer.ID, 100) for i := 0; i < 18; i++ { peers[i] = tu.RandPeerIDFatal(t) rt.Update(peers[i]) } t.Logf("Searching for peer: '%s'", peers[2]) found := rt.NearestPeers(ConvertPeerID(peers[2]), 15) if len(found) != 15 { t.Fatalf("Got back different number of peers than we expected.") } } // Looks for race conditions in table operations. For a more 'certain' // test, increase the loop counter from 1000 to a much higher number // and set GOMAXPROCS above 1 func TestTableMultithreaded(t *testing.T) { local := peer.ID("localPeer") m := pstore.NewMetrics() tab := NewRoutingTable(20, ConvertPeerID(local), time.Hour, m) var peers []peer.ID for i := 0; i < 500; i++ { peers = append(peers, tu.RandPeerIDFatal(t)) } done := make(chan struct{}) go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Update(peers[n]) } done <- struct{}{} }() go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Update(peers[n]) } done <- struct{}{} }() go func() { for i := 0; i < 1000; i++ { n := rand.Intn(len(peers)) tab.Find(peers[n]) } done <- struct{}{} }() <-done <-done <-done } func BenchmarkUpdates(b *testing.B) { b.StopTimer() local := ConvertKey("localKey") m := pstore.NewMetrics() tab := NewRoutingTable(20, local, time.Hour, m) var peers []peer.ID for i := 0; i < b.N; i++ { peers = append(peers, tu.RandPeerIDFatal(b)) } b.StartTimer() for i := 0; i < b.N; i++ { tab.Update(peers[i]) } } func BenchmarkFinds(b *testing.B) { b.StopTimer() local := ConvertKey("localKey") m := pstore.NewMetrics() tab := NewRoutingTable(20, local, time.Hour, m) var peers []peer.ID for i := 0; i < b.N; i++ { peers = append(peers, tu.RandPeerIDFatal(b)) tab.Update(peers[i]) } b.StartTimer() for i := 0; i < b.N; i++ { tab.Find(peers[i]) } }
/* The challenge is simply; output the following six 2D integer arrays: [[ 1, 11, 21, 31, 41, 51], [ 3, 13, 23, 33, 43, 53], [ 5, 15, 25, 35, 45, 55], [ 7, 17, 27, 37, 47, 57], [ 9, 19, 29, 39, 49, 59]] [[ 2, 11, 22, 31, 42, 51], [ 3, 14, 23, 34, 43, 54], [ 6, 15, 26, 35, 46, 55], [ 7, 18, 27, 38, 47, 58], [10, 19, 30, 39, 50, 59]] [[ 4, 13, 22, 31, 44, 53], [ 5, 14, 23, 36, 45, 54], [ 6, 15, 28, 37, 46, 55], [ 7, 20, 29, 38, 47, 60], [12, 21, 30, 39, 52]] [[ 8, 13, 26, 31, 44, 57], [ 9, 14, 27, 40, 45, 58], [10, 15, 28, 41, 46, 59], [11, 24, 29, 42, 47, 60], [12, 25, 30, 43, 56]] [[16, 21, 26, 31, 52, 57], [17, 22, 27, 48, 53, 58], [18, 23, 28, 49, 54, 59], [19, 24, 29, 50, 55, 60], [20, 25, 30, 51, 56]] [[32, 37, 42, 47, 52, 57], [33, 38, 43, 48, 53, 58], [34, 39, 44, 49, 54, 59], [35, 40, 45, 50, 55, 60], [36, 41, 46, 51, 56]] What are these 2D integer arrays? These are the numbers used in a magic trick with cards containing these numbers: https://i.stack.imgur.com/OtNyd.png The magic trick asks someone to think of a number in the range [1, 60], and give the one performing the magic trick all the cards which contain this number. The one performing the magic trick can then sum the top-left numbers (all a power of 2) of the given cards to get to the number the person was thinking of. Some additional explanation of why this works can be found here. Challenge rules: You can output the six 2D integer arrays in any reasonable format. Can be printed with delimiters; can be a 3D integer array containing the six 2D integer arrays; can be a string-list of lines; etc. You are allowed to fill the bottom right position of the last four cards with a negative value in the range [-60, -1] or character '*' instead of leaving it out to make the 2D integer arrays rectangular matrices (no, you are not allowed to fill them with 0 or a non-integer like null/undefined as alternative, with the exception of * since a star is also used in the actual cards). The order of the numbers in the matrices is mandatory. Although it doesn't matter for the physical magic trick, I see this challenge mainly as a matrix-kolmogorov-complexity one, hence the restriction on order. The order of the matrices themselves in the output list can be in any order, since it's clear from the top-left card which matrix is which. General rules: This is code-golf, so shortest answer in bytes wins. Don't let code-golf languages discourage you from posting answers with non-codegolfing languages. Try to come up with an as short as possible answer for 'any' programming language. Standard rules apply for your answer with default I/O rules, so you are allowed to use STDIN/STDOUT, functions/method with the proper parameters and return-type, full programs. Your call. Default Loopholes are forbidden. If possible, please add a link with a test for your code (i.e. TIO). Also, adding an explanation for your answer is highly recommended. */ package main import "fmt" func main() { output() } func output() { A1 := [][]int{ {1, 11, 21, 31, 41, 51}, {3, 13, 23, 33, 43, 53}, {5, 15, 25, 35, 45, 55}, {7, 17, 27, 37, 47, 57}, {9, 19, 29, 39, 49, 59}, } A2 := [][]int{ {2, 11, 22, 31, 42, 51}, {3, 14, 23, 34, 43, 54}, {6, 15, 26, 35, 46, 55}, {7, 18, 27, 38, 47, 58}, {10, 19, 30, 39, 50, 59}, } A3 := [][]int{ {4, 13, 22, 31, 44, 53}, {5, 14, 23, 36, 45, 54}, {6, 15, 28, 37, 46, 55}, {7, 20, 29, 38, 47, 60}, {12, 21, 30, 39, 52}, } A4 := [][]int{ {8, 13, 26, 31, 44, 57}, {9, 14, 27, 40, 45, 58}, {10, 15, 28, 41, 46, 59}, {11, 24, 29, 42, 47, 60}, {12, 25, 30, 43, 56}, } A5 := [][]int{ {16, 21, 26, 31, 52, 57}, {17, 22, 27, 48, 53, 58}, {18, 23, 28, 49, 54, 59}, {19, 24, 29, 50, 55, 60}, {20, 25, 30, 51, 56}, } A6 := [][]int{ {32, 37, 42, 47, 52, 57}, {33, 38, 43, 48, 53, 58}, {34, 39, 44, 49, 54, 59}, {35, 40, 45, 50, 55, 60}, {36, 41, 46, 51, 56}, } fmt.Println(A1) fmt.Println(A2) fmt.Println(A3) fmt.Println(A4) fmt.Println(A5) fmt.Println(A6) }
package feed import ( "encoding/json" "net/http" "camp/lib" "camp/skel/api" "camp/skel/service" "github.com/simplejia/clog/api" ) // GetReq 定义输入 type GetReq struct { ID int `json:"id"` Txt string `json:"txt"` } // Regular 用于参数校验 func (getReq *GetReq) Regular() (ok bool) { if getReq == nil { return } if getReq.ID <= 0 ||getReq.Txt==""{ return } ok = true return } // GetResp 定义输出 type GetResp struct { Feed *api.Feed `json:"skel,omitempty"` } // Get just for demo // @postfilter("Boss") func (feedc *FeedC) Get(w http.ResponseWriter, r *http.Request) { fun := "feed.Feed.Get" var getReq *GetReq if err := json.Unmarshal(feedc.ReadBody(r), &getReq); err != nil || !getReq.Regular() { clog.Error("%s param err: %v, req: %v", fun, err, getReq) feedc.ReplyFail(w, lib.CodePara) return } feedApi, err := service.NewFeed().Get(getReq.ID,getReq.Txt) if err != nil { clog.Error("%s feed.Get err: %v, req: %v", fun, err, getReq) feedc.ReplyFail(w, lib.CodeSrv) return } resp := &GetResp{ Feed: feedApi, } feedc.ReplyOk(w, resp) // 进行一些异步处理的工作 go lib.Updates(feedApi, lib.GET, nil) return }
package main func deleteDuplicates(head *ListNode) *ListNode { p := head for p != nil && p.Next != nil{ if p.Val == p.Next.Val{ p.Next = p.Next.Next }else{ p = p.Next } } return head }
package spidercore import ( "fmt" "io/ioutil" "net/http" ) func getRequest(request *http.Request) (string, error) { client := &http.Client{} res, err := client.Do(request) if err != nil { fmt.Println("client.Do error") return "", err } defer res.Body.Close() content, err := ioutil.ReadAll(res.Body) if err != nil { fmt.Println("ioutil.ReadAll error") return "", err } return string(content), nil } func Request(reqType string, url string) string { if reqType == "GET" { request, err := BuildGetHeaders(url) if err != nil { fmt.Println("GET request error", err) } content, err := getRequest(request) if err != nil { fmt.Println("getRequest error", err) } return content } return "" }
package main import ( "fmt" "os/exec" "strings" ) func getOpenPorts() []string { // Bash file containing command that gets battery level cmd := "./Bash Functions/getOpenPorts.sh" // Gets battery level openPortsByte, _ := exec.Command(cmd).Output() openPortsString := string(openPortsByte) openPortsString = strings.Trim(openPortsString, "\n") openPortsArray := strings.Split(openPortsString, "\n") return openPortsArray } func main() { fmt.Println(getOpenPorts()) }
package bilibili import ( "bytes" "encoding/binary" "fmt" ) const ( headerLENGTH = 16 // in bytes deviceTYPE = 1 device = 1 ) const ( // cmd types danmuMSG = "DANMU_MSG" danmuGIFT = "DANMU_GIFT" danmuWelcome = "WELCOME" DANMU_MSG = "DANMU_MSG" // 停播 LIVE_OFF = 0 // 直播 LIVE_ON = 1 // 轮播 LiVE_ROTATE = 2 ) type Message struct { body []byte bodyType int32 } func NewHandshakeMessage(roomid, uid int) *Message { data := fmt.Sprintf(`{"roomid":%d,"uid":%d}`, roomid, uid) message := &Message{ body: []byte(data), bodyType: 7, } return message } func NewHeartbeatMessage() *Message { data := "" message := &Message{ body: []byte(data), bodyType: 2, } return message } func NewMessage(b []byte, btype int) *Message { return &Message{ body: b, bodyType: int32(btype), } } func (msg *Message) Encode() []byte { buffer := bytes.NewBuffer([]byte{}) binary.Write(buffer, binary.BigEndian, int32(len(msg.body)+headerLENGTH)) // write package length binary.Write(buffer, binary.BigEndian, int16(headerLENGTH)) // header length binary.Write(buffer, binary.BigEndian, int16(deviceTYPE)) binary.Write(buffer, binary.BigEndian, int32(msg.bodyType)) binary.Write(buffer, binary.BigEndian, int32(device)) binary.Write(buffer, binary.BigEndian, msg.body) return buffer.Bytes() } func (msg *Message) Decode() *Message { // TODO return msg } // func (msg *Message) GetCmd() string { // jc, err := rrconfig.LoadJsonConfigFromBytes(msg.body) // if err != nil { // log.Error(err) // return "INVALID" // } // cmd, err := jc.GetString("cmd") // if err != nil { // log.Error(err) // return "ERROR" // } // return cmd // } func (msg *Message) Bytes() []byte { return msg.body }
package main import ( "fmt" "log" "gopkg.in/couchbase/gocb.v1" ) func main() { // Uncomment following line to enable logging // gocb.SetLogger(gocb.VerboseStdioLogger()) endpoint := "cb.e493356f-f395-4561-a6b5-a3a1ec0aaa29.dp.cloud.couchbase.com" bucketName := "couchbasecloudbucket" username := "user" password := "password" // Initialize the Connection cluster, err := gocb.Connect("couchbases://" + endpoint + "?ssl=no_verify") if err != nil { log.Fatal(err) } _ = cluster.Authenticate(gocb.PasswordAuthenticator{ Username: username, Password: password, }) bucket, err := cluster.OpenBucket(bucketName, "") if err != nil { log.Fatal(err) } fmt.Println("Connected..") // Create a N1QL Primary Index (but ignore if it exists) err = bucket.Manager("", "").CreatePrimaryIndex("", true, false) if err != nil { log.Fatal(err) } type User struct { Name string `json:"name"` Email string `json:"email"` Interests []string `json:"interests"` } // Create and store a Document _, err = bucket.Upsert("u:kingarthur", User{ Name: "Arthur", Email: "kingarthur@couchbase.com", Interests: []string{"Holy Grail", "African Swallows"}, }, 0) if err != nil { log.Fatal(err) } // Get the document back var inUser User _, err = bucket.Get("u:kingarthur", &inUser) if err != nil { log.Fatal(err) } fmt.Printf("User: %v\n", inUser) // Perform a N1QL Query query := gocb.NewN1qlQuery(fmt.Sprintf("SELECT name FROM `%s` WHERE $1 IN interests", bucketName)) rows, err := bucket.ExecuteN1qlQuery(query, []interface{}{"African Swallows"}) if err != nil { log.Fatal(err) } // Print each found Row var row interface{} for rows.Next(&row) { fmt.Printf("Row: %v", row) } }
// ========================================================================== // 云捷GO自动生成业务逻辑层相关代码,只生成一次,按需修改,再次生成不会覆盖. // 生成日期:2020-02-18 15:44:13 // 生成路径: app/service/module/job/job_service.go // 生成人:yunjie // ========================================================================== package job import ( "errors" "github.com/gin-gonic/gin" "strings" "time" jobModel "yj-app/app/model/monitor/job" userService "yj-app/app/service/system/user" "yj-app/app/task" "yj-app/app/yjgframe/cron" "yj-app/app/yjgframe/utils/convert" "yj-app/app/yjgframe/utils/gconv" "yj-app/app/yjgframe/utils/page" ) //根据主键查询数据 func SelectRecordById(id int64) (*jobModel.Entity, error) { entity := &jobModel.Entity{JobId: id} _, err := entity.FindOne() return entity, err } //根据主键删除数据 func DeleteRecordById(id int64) bool { entity := &jobModel.Entity{JobId: id} result, err := entity.Delete() if err == nil && result > 0 { return true } return false } //批量删除数据记录 func DeleteRecordByIds(ids string) int64 { idarr := convert.ToInt64Array(ids, ",") result, _ := jobModel.DeleteBatch(idarr...) return result } //添加数据 func AddSave(req *jobModel.AddReq, c *gin.Context) (int64, error) { //检查任务名称是否存在 rs := cron.Get(req.JobName) if rs != nil { return 0, errors.New("任务名称已经存在") } //可以task目录下是否绑定对应的方法 f := task.GetByName(req.JobName) if f == nil { return 0, errors.New("当前task目录下没有绑定这个方法") } var entity jobModel.Entity entity.JobName = req.JobName entity.JobParams = req.JobParams entity.JobGroup = req.JobGroup entity.InvokeTarget = req.InvokeTarget entity.CronExpression = req.CronExpression entity.MisfirePolicy = req.MisfirePolicy entity.Concurrent = req.Concurrent entity.Status = req.Status entity.Remark = req.Remark entity.CreateTime = time.Now() entity.CreateBy = "" user := userService.GetProfile(c) if user != nil { entity.CreateBy = user.LoginName } entity.Insert() return entity.JobId, nil } //修改数据 func EditSave(req *jobModel.EditReq, c *gin.Context) (int64, error) { //检查任务名称是否存在 tmp := cron.Get(req.JobName) if tmp != nil { tmp.Stop() } //可以task目录下是否绑定对应的方法 f := task.GetByName(req.JobName) if f == nil { return 0, errors.New("当前task目录下没有绑定这个方法") } entity := &jobModel.Entity{JobId: req.JobId} _, err := entity.FindOne() if err != nil { return 0, err } if entity == nil { return 0, errors.New("数据不存在") } entity.InvokeTarget = req.InvokeTarget entity.JobParams = req.JobParams entity.CronExpression = req.CronExpression entity.MisfirePolicy = req.MisfirePolicy entity.Concurrent = req.Concurrent entity.Status = req.Status entity.Remark = req.Remark entity.UpdateTime = time.Now() entity.UpdateBy = "" user := userService.GetProfile(c) if user == nil { entity.UpdateBy = user.LoginName } return entity.Update() } //初始化任务状态 func Init() { list, err := jobModel.SelectListAll(nil) if err != nil { return } stopIds := "" startIds := "" for i := 0; i < len(list); i++ { if len(list[i].JobName) > 0 { rs := cron.Get(list[i].JobName) if list[i].Status == "0" && rs == nil { if stopIds == "" { stopIds = gconv.String(list[i].JobId) } else { stopIds += "," + gconv.String(list[i].JobId) } } if list[i].Status == "1" && rs != nil { if startIds == "" { startIds = gconv.String(list[i].JobId) } else { startIds += "," + gconv.String(list[i].JobId) } } } } if stopIds != "" { jobModel.UpdateState(stopIds, "1") } if startIds != "" { jobModel.UpdateState(startIds, "0") } } //启动任务 func Start(entity *jobModel.Entity) error { //可以task目录下是否绑定对应的方法 f := task.GetByName(entity.JobName) if f == nil { return errors.New("当前task目录下没有绑定这个方法") } //传参 paramArr := strings.Split(entity.JobParams, "|") task.EditParams(f.FuncName, paramArr) rs := cron.Get(entity.JobName) if rs == nil { if entity.MisfirePolicy == "1" { j, err := cron.New(entity, f.Run) if err != nil && j == nil { return err } entity.Status = "0" entity.Update() } else { f.Run() } } else { return errors.New("任务已存在") } return nil } //停止任务 func Stop(entity *jobModel.Entity) error { //可以task目录下是否绑定对应的方法 f := task.GetByName(entity.JobName) if f == nil { return errors.New("当前task目录下没有绑定这个方法") } rs := cron.Get(entity.JobName) if rs != nil { rs.Stop() } entity.Status = "1" entity.Update() return nil } //根据条件查询数据 func SelectListAll(params *jobModel.SelectPageReq) ([]jobModel.Entity, error) { return jobModel.SelectListAll(params) } //根据条件分页查询数据 func SelectListByPage(params *jobModel.SelectPageReq) (*[]jobModel.Entity, *page.Paging, error) { return jobModel.SelectListByPage(params) } // 导出excel func Export(param *jobModel.SelectPageReq) (string, error) { head := []string{"任务ID", "任务名称", "任务组名", "调用目标字符串", "cron执行表达式", "计划执行错误策略(1立即执行 2执行一次 3放弃执行)", "是否并发执行(0允许 1禁止)", "状态(0正常 1暂停)", "创建者", "创建时间", "更新者", "更新时间", "备注信息"} col := []string{"job_id", "job_name", "job_group", "invoke_target", "cron_expression", "misfire_policy", "concurrent", "status", "create_by", "create_time", "update_by", "update_time", "remark"} return jobModel.SelectListExport(param, head, col) }
package nginx import ( "sort" "testing" "time" ) func expectDate(t *testing.T, a Date, i int, yy int, mm time.Month, dd int) { if a.Year != yy { t.Errorf("Unexpected year for index %d: %d", i, a.Year) } if a.Month != mm { t.Errorf("Unexpected month for index %d: %s", i, a.Month) } if a.Day != dd { t.Errorf("Unexpected day for index %d: %d", i, a.Day) } } // Test the sorting of dates func TestDates(t *testing.T) { examples := []Date{ {2014, 3, 4}, {2014, 2, 4}, {2014, 3, 2}, {2014, 4, 4}, {2015, 3, 4}, {2013, 3, 4}, } sort.Sort(Dates(examples)) expectDate(t, examples[0], 0, 2013, 3, 4) expectDate(t, examples[2], 0, 2014, 3, 2) expectDate(t, examples[3], 0, 2014, 3, 4) expectDate(t, examples[5], 0, 2015, 3, 4) } func expectDateCount(t *testing.T, a DateCount, d Date, c int64) { dd := a.Date cc := a.Count if dd.Year != d.Year { t.Errorf("Unexpected year %d", dd.Year) } if dd.Month != d.Month { t.Errorf("Unexpected month %s", dd.Month) } if dd.Day != d.Day { t.Errorf("Unexpected day %d", dd.Day) } if cc != c { t.Errorf("Unexpected count: %d", cc) } } func TestDateCounter(t *testing.T) { feb27 := Date{2014, 2, 27} feb28 := Date{2014, 2, 28} mar1 := Date{2014, 3, 1} dc := DateCounter{} dc[feb27] += 1 dc[mar1] += 1 counts := dc.Range() if len(counts) != 3 { t.Fatalf("Unexpected length of counts: %d", len(counts)) } expectDateCount(t, counts[0], feb27, 1) expectDateCount(t, counts[1], feb28, 0) expectDateCount(t, counts[2], mar1, 1) }
package main import ( "fmt" "os" "strconv" ) func isError(err error) bool { //for handling errors while creating or writing file if err != nil { fmt.Println(err.Error()) } return (err != nil) } func createFile(path string) { //creates a file in a path //path is the location at which the file will be created var _, err = os.Stat(path) //check if path exists if os.IsNotExist(err) { var file, err = os.Create(path) // create file if not exists if isError(err) { return } defer file.Close() } fmt.Println("File Created Successfully", path) } func createPath(i int) string { //path generation routine path := "./files/" fileN := "text" No := strconv.Itoa(i) fileN = fileN + No + ".txt" path = path + fileN return path } func writeFile(path string) { var file, err = os.OpenFile(path, os.O_RDWR, 0644) //opening the file if isError(err) { return } defer file.Close() _, err = file.WriteString(path) //writing the path of the file into the file if isError(err) { return } err = file.Sync() //saving if isError(err) { return } fmt.Println("File Updated Successfully.") } func main() { var path string var count int fmt.Println("Enter the number of files you want to create: ") fmt.Scanln(&count) for i := 0; i < count; i++ { //creating n number of files path = createPath(i) //creating a new path createFile(path) //creating the file writeFile(path) //writing content into the file } }
package runtime import ( "github.com/devfeel/dotweb/test" "sync" "testing" "time" ) const ( DefaultTestGCInterval = 2 TEST_CACHE_KEY = "joe" TEST_CACHE_VALUE = "zou" //int value TEST_CACHE_INT_VALUE = 1 //int64 value TEST_CACHE_INT64_VALUE = 1 ) func TestRuntimeCache_Get(t *testing.T) { cache := NewTestRuntimeCache() cache.Set(TEST_CACHE_KEY, TEST_CACHE_VALUE, 5) //check value go func(cache *RuntimeCache, t *testing.T) { time.Sleep(4 * time.Second) value, err := cache.Get(TEST_CACHE_KEY) test.Nil(t, err) test.Equal(t, TEST_CACHE_VALUE, value) }(cache, t) //check expired go func(cache *RuntimeCache, t *testing.T) { time.Sleep(5 * time.Second) value, err := cache.Exists(TEST_CACHE_KEY) test.Nil(t, err) test.Equal(t, true, value) }(cache, t) time.Sleep(5 * time.Second) } func TestRuntimeCache_GetInt(t *testing.T) { testRuntimeCache(t, TEST_CACHE_INT_VALUE, func(cache *RuntimeCache, key string) (interface{}, error) { return cache.GetInt(key) }) } func TestRuntimeCache_GetInt64(t *testing.T) { testRuntimeCache(t, TEST_CACHE_INT64_VALUE, func(cache *RuntimeCache, key string) (interface{}, error) { return cache.GetInt64(key) }) } func TestRuntimeCache_GetString(t *testing.T) { testRuntimeCache(t, TEST_CACHE_VALUE, func(cache *RuntimeCache, key string) (interface{}, error) { return cache.GetString(key) }) } func testRuntimeCache(t *testing.T, insertValue interface{}, f func(cache *RuntimeCache, key string) (interface{}, error)) { cache := NewTestRuntimeCache() cache.Set(TEST_CACHE_KEY, insertValue, 5) //check value go func(cache *RuntimeCache, t *testing.T) { time.Sleep(4 * time.Second) value, err := f(cache, TEST_CACHE_KEY) test.Nil(t, err) test.Equal(t, insertValue, value) }(cache, t) time.Sleep(5 * time.Second) } func TestRuntimeCache_Delete(t *testing.T) { cache := NewTestRuntimeCache() cache.Set(TEST_CACHE_KEY, TEST_CACHE_VALUE, 5) value, e := cache.Get(TEST_CACHE_KEY) test.Nil(t, e) test.Equal(t, TEST_CACHE_VALUE, value) cache.Delete(TEST_CACHE_KEY) value, e = cache.Get(TEST_CACHE_KEY) test.Nil(t, e) test.Nil(t, value) } func TestRuntimeCache_ClearAll(t *testing.T) { cache := NewTestRuntimeCache() cache.Set(TEST_CACHE_KEY, TEST_CACHE_VALUE, 5) cache.Set("2", TEST_CACHE_VALUE, 5) cache.Set("3", TEST_CACHE_VALUE, 5) test.Equal(t, 3, len(cache.items)) cache.ClearAll() test.Equal(t, 0, len(cache.items)) } func TestRuntimeCache_Incr(t *testing.T) { cache := NewTestRuntimeCache() var wg sync.WaitGroup wg.Add(2) go func(cache *RuntimeCache) { for i := 0; i < 50; i++ { cache.Incr(TEST_CACHE_KEY) } wg.Add(-1) }(cache) go func(cache *RuntimeCache) { for i := 0; i < 50; i++ { cache.Incr(TEST_CACHE_KEY) } wg.Add(-1) }(cache) wg.Wait() value, e := cache.GetInt(TEST_CACHE_KEY) test.Nil(t, e) test.Equal(t, 100, value) } func TestRuntimeCache_Decr(t *testing.T) { cache := NewTestRuntimeCache() var wg sync.WaitGroup wg.Add(2) go func(cache *RuntimeCache) { for i := 0; i < 50; i++ { cache.Decr(TEST_CACHE_KEY) } wg.Add(-1) }(cache) go func(cache *RuntimeCache) { for i := 0; i < 50; i++ { cache.Decr(TEST_CACHE_KEY) } wg.Add(-1) }(cache) wg.Wait() value, e := cache.GetInt(TEST_CACHE_KEY) test.Nil(t, e) test.Equal(t, -100, value) } func NewTestRuntimeCache() *RuntimeCache { cache := RuntimeCache{items: make(map[string]*RuntimeItem), gcInterval: DefaultTestGCInterval} go cache.gc() return &cache }
// Copyright 2023 PingCAP, Inc. Licensed under Apache-2.0. package operator import ( "time" "github.com/pingcap/tidb/br/pkg/task" "github.com/spf13/pflag" ) type PauseGcConfig struct { task.Config SafePoint uint64 `json:"safepoint" yaml:"safepoint"` TTL time.Duration `json:"ttl" yaml:"ttl"` } func DefineFlagsForPauseGcConfig(f *pflag.FlagSet) { _ = f.DurationP("ttl", "i", 5*time.Minute, "The time-to-live of the safepoint.") _ = f.Uint64P("safepoint", "t", 0, "The GC safepoint to be kept.") } // ParseFromFlags fills the config via the flags. func (cfg *PauseGcConfig) ParseFromFlags(flags *pflag.FlagSet) error { if err := cfg.Config.ParseFromFlags(flags); err != nil { return err } var err error cfg.SafePoint, err = flags.GetUint64("safepoint") if err != nil { return err } cfg.TTL, err = flags.GetDuration("ttl") if err != nil { return err } return nil }
package pgsql import ( "testing" ) func TestInt8(t *testing.T) { testlist2{{ data: []testdata{ { input: int(-9223372036854775808), output: int(-9223372036854775808)}, { input: int(9223372036854775807), output: int(9223372036854775807)}, }, }, { data: []testdata{ { input: int8(-128), output: int8(-128)}, { input: int8(127), output: int8(127)}, }, }, { data: []testdata{ { input: int16(-32768), output: int16(-32768)}, { input: int16(32767), output: int16(32767)}, }, }, { data: []testdata{ { input: int32(-2147483648), output: int32(-2147483648)}, { input: int32(2147483647), output: int32(2147483647)}, }, }, { data: []testdata{ { input: int64(-9223372036854775808), output: int64(-9223372036854775808)}, { input: int64(9223372036854775807), output: int64(9223372036854775807)}, }, }, { data: []testdata{ { input: uint(0), output: uint(0)}, { input: uint(9223372036854775807), output: uint(9223372036854775807)}, }, }, { data: []testdata{ { input: uint8(0), output: uint8(0)}, { input: uint8(255), output: uint8(255)}, }, }, { data: []testdata{ { input: uint16(0), output: uint16(0)}, { input: uint16(65535), output: uint16(65535)}, }, }, { data: []testdata{ { input: uint32(0), output: uint32(0)}, { input: uint32(4294967295), output: uint32(4294967295)}, }, }, { data: []testdata{ { input: uint64(0), output: uint64(0)}, { input: uint64(9223372036854775807), output: uint64(9223372036854775807)}, }, }, { data: []testdata{ { input: float32(-2147483648.0), output: float32(-2147483648.0)}, { input: float32(2147483647.0), output: float32(2147483647.0)}, }, }, { data: []testdata{ { input: float64(-922337203685477580.0), output: float64(-922337203685477580.0)}, { input: float64(922337203685477580.0), output: float64(922337203685477580.0)}, }, }, { data: []testdata{ { input: string("-9223372036854775808"), output: string(`-9223372036854775808`)}, { input: string("9223372036854775807"), output: string(`9223372036854775807`)}, }, }, { data: []testdata{ { input: []byte("-9223372036854775808"), output: []byte(`-9223372036854775808`)}, { input: []byte("9223372036854775807"), output: []byte(`9223372036854775807`)}, }, }}.execute(t, "int8") }
package app import "net/http" func (a *App) checkSession(w http.ResponseWriter, r *http.Request) { s, err := a.getSession(w, r) hdrs := w.Header() hdrs.Set("Content-Type", "text/plain; charset=utf-8") if err != nil { a.ctrCheckSessionErr.Inc() internalError(w, err, "getting session on check") return } ud := getUserData(s) if ud == nil { a.ctrCheckSessionUnauthed.Inc() http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) // TODO: Set a response header for the proxy to redirect to? return } user := a.users.User(ud.Username) if user == nil { a.ctrCheckSessionUnknownUser.Inc() http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden) // TODO: Set a response header for the proxy to redirect to? return } a.ctrCheckSessionSuccess.Inc() hdrs.Set("X-Auth-User", ud.Username) http.Error(w, http.StatusText(http.StatusOK), http.StatusOK) }
// Package inptils contains utilities to read the input required for the // application package inptils import ( "bufio" "bytes" "log" "github.com/google/uuid" c "github.com/pedromss/kafli/config" "github.com/pedromss/kafli/model" ) func createChannel() chan *model.RecordToSend { return make(chan *model.RecordToSend, 100) } // StreamInput will stream all lines of the input that was configured in the // given AppConf. If AppConf.Loop was specified the returned channel will never // stop sending values func StreamInput(conf *c.AppConf) (chan *model.RecordToSend, error) { if conf.Loop { log.Println("Looping over the input!") return ChannelLinesForever(conf) } return ChannelLines(conf) } // ChannelLines returns a channel that will emit all the lines in the input // configured in AppConf. func ChannelLines(conf *c.AppConf) (chan *model.RecordToSend, error) { sc := bufio.NewScanner(conf.In) ch := createChannel() tokenParser := parseLine(conf, ch) noop := func(bts *[]byte) {} go func() { scan(sc, conf.Skip, conf.Limit, noop, tokenParser) close(ch) // ignoring errors for now. Should propagate (line, err) through // channel }() return ch, nil } // ChannelLinesForever will emit all lines of the input configured in AppConf // repeting them when it reaches the end in an infinite loop func ChannelLinesForever(conf *c.AppConf) (chan *model.RecordToSend, error) { sc := bufio.NewScanner(conf.In) ch := createChannel() contents := make([]*[]byte, 0) tokenParser := parseLine(conf, ch) go func() { scan(sc, conf.Skip, conf.Limit, func(bts *[]byte) { contents = append(contents, bts) }, tokenParser) recordCount := int64(len(contents)) for i := int64(0); (i + recordCount) < conf.Limit; i++ { tokenParser(i, contents[i%recordCount]) } close(ch) }() return ch, nil } type kvp struct { Key *[]byte Val *[]byte } func (x *kvp) Len() int { result := 0 if x.Key != nil { result++ } if x.Val != nil { result++ } return result } func parseKvp(conf *c.AppConf, bts *[]byte, keySep *[]byte) *kvp { kv := bytes.Split(*bts, *keySep) result := kvp{} parts := len(kv) if parts == 2 { result.Key = &kv[0] result.Val = &kv[1] } else if parts == 1 { result.Val = &kv[0] } return &result } func processKey(conf *c.AppConf, kvp *kvp, keyGen func() *[]byte) { if !conf.ParseKey && !conf.GenerateKeys { kvp.Key = nil return } key := kvp.Key if conf.GenerateKeys { key = keyGen() } kvp.Key = key } func processValue(conf *c.AppConf, kvp *kvp) { valueIsMissing := kvp.Val == nil || len(*kvp.Val) == 0 if valueIsMissing { kvp.Val = conf.NullValueStr } } func generateKey() *[]byte { result := []byte(uuid.New().String()) return &result } func parseLine(conf *c.AppConf, ch chan *model.RecordToSend) func(int64, *[]byte) { keySep := *conf.KeySeparator return func(line int64, bts *[]byte) { kv := parseKvp(conf, bts, &keySep) processKey(conf, kv, generateKey) processValue(conf, kv) ch <- &model.RecordToSend{Key: kv.Key, Val: kv.Val} } } func scan(sc *bufio.Scanner, skip int64, limit int64, onTokenFound func(*[]byte), onTokenProduced func(int64, *[]byte)) { count := int64(0) for sc.Scan() { bts := bytes.Trim(sc.Bytes(), " ") if len(bts) == 0 { continue } count++ onTokenFound(&bts) if skip >= count { continue } // This copy shouldn't be needed but without it, produced messages get // scrambled. Need to investigate later tmp := make([]byte, len(bts)) copy(tmp, bts) onTokenProduced(count, &tmp) if skip+limit <= count { break } } }
package main import ( "bytes" "crypto/sha1" "encoding/hex" "io/ioutil" "net/http" "sort" "strings" "time" ) // RemoteCallWithBody send http func RemoteCallWithBody(method, url string, token, user string, body []byte, contentType string) (*http.Response, []byte, error) { var request *http.Request var err error if len(body) == 0 { request, err = http.NewRequest(method, url, nil) } else { request, err = http.NewRequest(method, url, bytes.NewReader(body)) } if err != nil { return nil, nil, err } if contentType != "" { request.Header.Set("Content-Type", contentType) } if token != "" { request.Header.Set("Authorization", token) } if user != "" { request.Header.Set("User", user) } client := &http.Client{ Timeout: 30 * time.Second, } response, err := client.Do(request) if response != nil { defer response.Body.Close() } if err != nil { return nil, nil, err } bytes, err := ioutil.ReadAll(response.Body) return response, bytes, err } // GetResponseData parse Response func GetResponseData(r *http.Response) ([]byte, error) { if r != nil { defer r.Body.Close() } return ioutil.ReadAll(r.Body) } // GetRequestData parse Request func GetRequestData(r *http.Request) ([]byte, error) { if r.Body == nil { return nil, nil } defer r.Body.Close() data, err := ioutil.ReadAll(r.Body) if err != nil { return nil, err } return data, nil } func checkSignature(r *http.Request) bool { signature := r.FormValue("signature") timestamp := r.FormValue("timestamp") nonce := r.FormValue("nonce") token := "winxin" tmpArr := sort.StringSlice{token, timestamp, nonce} sort.Sort(tmpArr) tmpStr := strings.Join(tmpArr, "") //产生一个散列值得方式是 sha1.New(),sha1.Write(bytes),然后 sha1.Sum([]byte{})。这里我们从一个新的散列开始。 h := sha1.New() //写入要处理的字节。如果是一个字符串,需要使用[]byte(s) 来强制转换成字节数组。 h.Write([]byte(tmpStr)) //这个用来得到最终的散列值的字符切片。Sum 的参数可以用来都现有的字符切片追加额外的字节切片:一般不需要要。 bs := h.Sum(nil) //SHA1 值经常以 16 进制输出,例如在 git commit 中。使用%x 来将散列结果格式化为 16 进制字符串。 if hex.EncodeToString(bs) == signature { return true } return false }
package main import ( "fmt" "math/rand" "time" ) // 1生成一个随机1维数组,10个元素,int类型 // 随机数seed生成一个10维数组函数 func createArr(arr *[10]int) { rand.Seed(time.Now().Unix()) // 以当前时间的unix秒作为种子 for i := 0; i < len(arr); i++ { (*arr)[i] = rand.Intn(100) // 随机产生100以内的整数 } } // 2倒置数组输出 // 递归调用一下试试? /* func convert(arr [10]int){ for i:=0;i<len(arr)-1;i++{ arr[i],arr[len(arr)-1-i]=arr[len(arr)-1-i],arr[i] } fmt.Println("arr转置后为:",arr) } */ func convert(arr [10]int) { var temp int = 0 // 临时变量,用来接收交换用的值 var len int = len(arr) // 10 for i := 0; i < len-1; i++ { if arr[i] > arr[len-1-i] { // 退出机制~!! temp = arr[i] arr[i] = arr[len-1-i] arr[len-1-i] = temp } } fmt.Println("arr排序后= ", arr) } // 3对其进行标准运算,sum、average、max、min func sumArr(arr [10]int) int { var sum int for _, value := range arr { sum += value } return sum } // 二分法查找(递归调用) func serch(arr [10]int, value, left, right, mid int) { if left > right { fmt.Printf("没找到%v! \n",value) return } mid = (left+right)/ 2 if value < arr[mid] { right = mid - 1 serch(arr, value, left, right, mid) } else if value >arr[mid] { left = mid + 1 serch(arr, value, left, right, mid) } else { fmt.Printf("找到了%v,位于数组的%v位。 \n", value, mid+1) } } func main() { //var arrInt = [10]int[62,6,13,54,23,52,51,36,37,56] var arrInt [10]int // 生成数组~ createArr(&arrInt) fmt.Println("生成的随机数组为:", arrInt) // 数组的转置 convert(arrInt) // jisuan sum := sumArr(arrInt) fmt.Printf("arrInt的总和为:%v;平均值是:%v \n", sum, sum/len(arrInt)) // 查找既定数值~ (数组,value,left,right,mid) serch(arrInt, 6, 0, len(arrInt)-1, 0) }
package discovery import ( "os" "time" "github.com/alecthomas/log4go" "github.com/wanghongfei/go-eureka-client/eureka" "github.com/wanghongfei/gogate/conf" "github.com/wanghongfei/gogate/utils" ) var euClient *eureka.Client var gogateApp *eureka.InstanceInfo func InitEurekaClient() { c, err := eureka.NewClientFromFile(conf.App.EurekaConfigFile) if nil != err { panic(err) } euClient = c } func StartRegister() { ip, err := utils.GetFirstNoneLoopIp() if nil != err { panic(err) } host, err := os.Hostname() if nil != err { panic(err) } // 注册 log4go.Info("register to eureka") gogateApp = eureka.NewInstanceInfo(host, conf.App.ServerConfig.AppName, ip, conf.App.ServerConfig.Port, 30, false) gogateApp.Metadata = &eureka.MetaData{ Class: "", Map: map[string]string {"version": conf.App.Version}, } err = euClient.RegisterInstance("gogate", gogateApp) if nil != err { log4go.Warn("failed to register to eureka, %v", err) } // 心跳 go func() { ticker := time.NewTicker(time.Second * 20) <- ticker.C heartbeat() }() } func heartbeat() { err := euClient.SendHeartbeat(gogateApp.App, gogateApp.HostName) if nil != err { log4go.Warn("failed to send heartbeat, %v", err) return } log4go.Info("heartbeat sent") }
package gogen import ( "go/ast" "strings" ) // Import is a structure representing import string in ast tree type Import struct{ BaseType parent *ast.ImportSpec importString string } // String will return import string func (i *Import) String() string { return i.importString } // NewImport will construct a new import func NewImport(parent *ast.ImportSpec, annotations *AnnotationMap) *Import{ var n string if parent.Name != nil { n = parent.Name.Name } else { raw := strings.Replace(parent.Path.Value, "\"", "", -1) split := strings.Split(raw, "/") n = split[len(split) - 1] } return &Import{ BaseType: BaseType{ name: n, annotations: annotations, }, parent: parent, importString: parent.Path.Value, } } // ParseImport will parse an import and return its structure func ParseImport(parent *ast.ImportSpec, comments ast.CommentMap) *Import { i := NewImport(parent, ParseAnnotations(comments)) return i }
/* Copyright 2021 The Nuclio Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package loggerus import ( "bytes" "io/ioutil" "strings" "testing" "github.com/stretchr/testify/suite" ) type redactorSuite struct { suite.Suite redactor *Redactor } func (suite *redactorSuite) TestKeyValueTypeValueRedactions() { buf := new(bytes.Buffer) // prepare redactor valueRedactions := []string{"artifactVersionManifestContents", "systemConfigContents"} suite.redactor = NewRedactor(buf) suite.redactor.AddValueRedactions(valueRedactions) // read file into byte string unredactedCommand, err := ioutil.ReadFile("test/key_value.txt") suite.Assert().Nil(err) // write it using the redactor write function bytesWritten, err := suite.redactor.Write(unredactedCommand) suite.Assert().Nil(err) suite.Assert().True(bytesWritten > 0) // verify that command was indeed redacted redactedCommand := buf.String() suite.Assert().True(strings.Contains(redactedCommand, "artifactVersionManifestContents=[redacted]")) suite.Assert().True(strings.Contains(redactedCommand, "systemConfigContents=[redacted]")) } func (suite *redactorSuite) TestDictTypeValueRedactions() { buf := new(bytes.Buffer) // prepare redactor valueRedactions := []string{"java_key_store"} suite.redactor = NewRedactor(buf) suite.redactor.AddValueRedactions(valueRedactions) // read file into byte string unredactedCommand, err := ioutil.ReadFile("test/dict.txt") suite.Assert().Nil(err) // write it using the redactor write function bytesWritten, err := suite.redactor.Write(unredactedCommand) suite.Assert().Nil(err) suite.Assert().True(bytesWritten > 0) // verify that command was indeed redacted redactedCommand := buf.String() suite.Assert().True(strings.Contains(redactedCommand, `"java_key_store":[redacted]`)) } func (suite *redactorSuite) TestRegularRedactions() { buf := new(bytes.Buffer) // prepare redactor redactions := []string{"password"} suite.redactor = NewRedactor(buf) suite.redactor.AddRedactions(redactions) // push some string to writer unredactedCommand := "{asdhaksjd:\\ password \\ \n}" bytesWritten, err := suite.redactor.Write([]byte(unredactedCommand)) suite.Assert().Nil(err) suite.Assert().True(bytesWritten > 0) // verify that command was indeed redacted redactedCommand := buf.String() suite.Assert().True(strings.Contains(redactedCommand, "{asdhaksjd:\\ ***** \\ \n}")) } func TestRedactorTestSuite(t *testing.T) { suite.Run(t, new(redactorSuite)) }
package dalmodel import "github.com/jinzhu/gorm" type Notification struct { gorm.Model UserID uint text string }
package main import ( "github.com/hashicorp/terraform/plugin" "github.com/hashicorp/terraform/terraform" "github.com/nukosuke/terraform-provider-zendesk/zendesk" ) func main() { plugin.Serve(&plugin.ServeOpts{ ProviderFunc: func() terraform.ResourceProvider { return zendesk.Provider() }, }) }
package constants const ServerAddress = ":9090"
/* A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a^2 + b^2 = c^2 For example, 32 + 42 = 9 + 16 = 25 = 52. There exists exactly one Pythagorean triplet for which a + b + c = 1000. Find the product abc. */ package main import ( "flag" "fmt" "strconv" ) func main() { N := 1000 flag.Parse() if flag.NArg() >= 1 { N, _ = strconv.Atoi(flag.Arg(0)) } for a := 1; a <= N; a++ { for b := 1; b <= N; b++ { for c := 1; c <= N; c++ { if a*a+b*b == c*c && a+b+c == N { fmt.Printf("%d + %d + %d = %d\n", a, b, c, a+b+c) fmt.Printf("%d * %d * %d = %d\n", a, b, c, a*b*c) return } } } } }
package service import ( "context" client2 "krpc/client" "log" "net" "sync" "testing" "time" ) //type Foo int //type Args struct {Num1, Num2 int} //func (f Foo) Sum(args Args, reply *int) error { // *reply = args.Num2 + args.Num1 // return nil //} func startServer(addr chan string) { var foo Foo if err := DefaultServer.Register(&foo); err != nil { log.Fatal("register error: ", err) } // pick a free port l, err := net.Listen("tcp", ":0") if err != nil { log.Fatal("init listen error:", err) return } log.Println("start rpc server on", l.Addr()) addr <- l.Addr().String() Accept(l) } func TestServeDay3(t *testing.T) { log.SetFlags(0) addr := make(chan string) // init a server first. go startServer(addr) // kRPC client. client, _ := client2.Dial("tcp", <- addr) defer func() { _ = client.Close() }() time.Sleep(time.Second) // send request to server & receive response var wg sync.WaitGroup for i := 0; i < 5; i++ { wg.Add(1) go func(i int) { defer wg.Done() args := &Args{Num2: i, Num1: i * i} var reply int if err := client.Call(context.Background(), "Foo.Sum", args, &reply); err != nil { log.Fatal("call Foo.Sum error: ", err) } log.Printf("%d + %d = %d", args.Num1, args.Num2, reply) }(i) } wg.Wait() } func TestServeDay4(t *testing.T) { log.SetFlags(0) addr := make(chan string) // init a server first. go startServer(addr) // kRPC client. client, _ := client2.Dial("tcp", <- addr) defer func() { _ = client.Close() }() time.Sleep(time.Second) // send request to server & receive response var wg sync.WaitGroup for i := 0; i < 5; i++ { wg.Add(1) go func(i int) { // Call timeout ctx, _ := context.WithTimeout(context.Background(), time.Second * 5) defer wg.Done() args := &Args{Num2: i, Num1: i * i} var reply int if err := client.Call(ctx, "Foo.Sum", args, &reply); err != nil { log.Fatal("call Foo.Sum error: ", err) } log.Printf("%d + %d = %d", args.Num1, args.Num2, reply) }(i) } wg.Wait() }
package main import ( "github.com/notassigned/p2p-tools/cli" ) // main func main() { cli.Root.Run() }
package main import ( "testing" ) func TestAssignmentOperator(t *testing.T) { // ---------------- 赋值运算符 ------------------------ // = += -= *= /= t.Log(".......") a := 10 b := 20 c := a a = b b = c t.Logf("%d,%d \n", a, b) n, m := 10, 20 x := n + m n = x - n m = x - n t.Logf("%d,%d \n", n, m) } func TestArithmeticOperator(t *testing.T) { // ----------------- 算数运算符 ----------------- // '/' 相当于整除 t.Log(10 / 4) // 2 t.Log(10 * 1.0 / 4) // 2.5 t.Log(float32(10 / 4)) // 2 t.Log(4 / 10) // 0 t.Log(4 * 1.0 / 10) // 0.4 // a % b = a - a / b * b t.Log(10 % 4) // 2 t.Log(-10 % -3) // -1 t.Log(10 % -3) // 1 // 10 - 10 / -3 * -3 t.Log(-10 % 3) // -1 a, b := 10, 20 t.Log(a, b) t.Log(a / b) t.Log(a * 1.0 / b) // ++ -- i := 1 i++ t.Log(i) i-- t.Log(i) // 不能 --i 或者 ++i 或者 j:=i++ i=i++ // 即:++ , -- 只能单独使用 } func TestLogicalOperators(t *testing.T) { // ----------------- 逻辑运算符 ----------------- t.Log(1 == 1 && 2 > 1) // true t.Log(1 == 1 && 2 > 3) // false t.Log(1 == 1 || 2 > 3) // true t.Log(1 != 1 || 2 > 3) // false t.Log(!true) // false t.Log(1 != 1) // false t.Log(!(1 != 1)) // true // 如果 && 左边为假,则不判断右边 // 如果 || 右边为真,则不判断右边 } func TestRelationalOperator(t *testing.T) { // ----------------- 关系运算符 --------------------- t.Log(1 == 1) // true t.Log(1 != -1) // true t.Log(1 < -1) // false t.Log(1 > -1) // true t.Log(1 >= -1) // true t.Log(1 <= -1) // false flag := 1 > 1 t.Log(flag) // false flag2 := 1 == 1 t.Log(flag2) // true } func TestBitOperator(t *testing.T) { // ---------------- 位运算符 ------------------- var a int8 = 3 var b int8 = -2 // 左移 t.Log(a << 1) // 6 t.Log(b << 1) // -4 // 右移 t.Log(a >> 1) // 1 // 负数右移最小值为 -1,操作原码 t.Log(b >> 1) // -1 // 负数按照补码计算: | & ^ t.Log(a | b) // -1 t.Log(a & b) // 2 t.Log(a ^ b) // -3 t.Log(a &^ b) // 1 // 按位清零 ,右边为 1 左边无论是 0 还是 1 结果都是 0,右边为 0 左边是什么结果就是什么 t.Log(1 &^ 0) t.Log(1 &^ 1) t.Log(0 &^ 0) t.Log(0 &^ 1) }
package utils import ( "bufio" "fmt" "github.com/fatih/color" "golang.org/x/crypto/ssh/terminal" "os" "strings" "syscall" ) func GetInput(prompt string) (string, error) { reader := bufio.NewReader(os.Stdin) printPrompt(prompt) userInput, err := reader.ReadString('\n') if err != nil { return "", err } return strings.TrimSpace(userInput), nil } func GetHiddenInput(prompt string) (string, error) { printPrompt(prompt) byteInput, err := terminal.ReadPassword(int(syscall.Stdin)) if err != nil { return "", err } userInput := string(byteInput) return strings.TrimSpace(userInput), nil } func printPrompt(prompt string) { color.Set(color.FgWhite, color.Bold) fmt.Print(prompt) color.Unset() }
package server import ( "encoding/base64" "errors" "github.com/golang/glog" "net/http" "strconv" "time" ) func CheckToken(token string, w http.ResponseWriter, req *http.Request)(user_uuid string, err error){ req.ParseForm() glog.Info("rec token:", token, " token length:", len(token)) if len(token) > 18 { s, err1 := base64.StdEncoding.DecodeString(token) if err1 != nil { glog.Info(err1) return "", err1 } glog.Info("token: ", string(s)) time_token := s[0:10] user_uuid = string(s[18:]) glog.Info("time_token: ", string(time_token)) glog.Info("user_uuid: ", user_uuid) time_now := time.Now().Unix() i, _ := strconv.ParseInt(string(time_token), 10, 64) //token过期处理 if time_now-i >= 24*60*60 { glog.Info("token 过期: ", (time.Unix(i, 0).Format("2006-01-02 03:04:05 PM"))) //过期 退出登录 stmt, err := Db.Prepare(`update proxypro.userInfo set loginStatus = ? where uuid = ?`) res, err := stmt.Exec(0, user_uuid) glog.Info(err) glog.Info(res) stmt.Close() glog.Info("============================================",user_uuid) return "", errors.New("token error: token 过期 请重新登") } else { return user_uuid, nil } } else { return "", errors.New("token error") /*retValue := NewBaseJsonData() retValue.Code = 900 retValue.Message = "token error" retValue.Data = "" bytes, _ := json.Marshal(retValue) w.Write([]byte(bytes)) return*/ } return "", errors.New("token error: no token") }
package leetcode /*给定一棵二叉搜索树,请找出其中第k大的节点。*/ /** * Definition for a binary tree node. * type TreeNode struct { * Val int * Left *TreeNode * Right *TreeNode * } */ func kthLargest(root *TreeNode, k int) int { nums := make([]int, 0, k) getNums(root, &nums) return nums[k-1] } func getNums(root *TreeNode, nums *[]int) { if root.Right != nil { getNums(root.Right, nums) } if root != nil { *nums = append(*nums, root.Val) } if root.Left != nil { getNums(root.Left, nums) } }
package slack import ( "encoding/json" "testing" "github.com/stretchr/testify/assert" ) const ( dialogSubmissionCallback = `{ "type": "dialog_submission", "submission": { "name": "Sigourney Dreamweaver", "email": "sigdre@example.com", "phone": "+1 800-555-1212", "meal": "burrito", "comment": "No sour cream please", "team_channel": "C0LFFBKPB", "who_should_sing": "U0MJRG1AL" }, "callback_id": "employee_offsite_1138b", "team": { "id": "T1ABCD2E12", "domain": "coverbands" }, "user": { "id": "W12A3BCDEF", "name": "dreamweaver" }, "channel": { "id": "C1AB2C3DE", "name": "coverthon-1999" }, "action_ts": "936893340.702759", "token": "M1AqUUw3FqayAbqNtsGMch72", "response_url": "https://hooks.slack.com/app/T012AB0A1/123456789/JpmK0yzoZDeRiqfeduTBYXWQ" }` actionCallback = `{}` viewClosedCallback = `{ "type": "view_closed", "team": { "id": "T1ABCD2E12", "domain": "coverbands" }, "user": { "id": "W12A3BCDEF", "name": "dreamweaver" }, "view": { "type": "modal", "title": { "type": "plain_text", "text": "launch project" }, "blocks": [{ "type": "section", "text": { "text": "*Sally* has requested you set the deadline for the Nano launch project", "type": "mrkdwn" }, "accessory": { "type": "datepicker", "action_id": "datepicker123", "initial_date": "1990-04-28", "placeholder": { "type": "plain_text", "text": "Select a date" } } }], "app_installed_team_id": "T1ABCD2E12" }, "api_app_id": "A123ABC", "is_cleared": false }` viewSubmissionCallback = `{ "type": "view_submission", "team": { "id": "T1ABCD2E12", "domain": "coverbands" }, "user": { "id": "W12A3BCDEF", "name": "dreamweaver" }, "channel": { "id": "C1AB2C3DE", "name": "coverthon-1999" }, "view": { "type": "modal", "title": { "type": "plain_text", "text": "meal choice" }, "blocks": [ { "type": "input", "block_id": "multi-line", "label": { "type": "plain_text", "text": "dietary restrictions" }, "element": { "type": "plain_text_input", "multiline": true, "action_id": "ml-value" } }, { "type": "input", "block_id": "target_channel", "label": { "type": "plain_text", "text": "Select a channel to post the result on" }, "element": { "type": "conversations_select", "action_id": "target_select", "default_to_current_conversation": true, "response_url_enabled": true } } ], "state": { "values": { "multi-line": { "ml-value": { "type": "plain_text_input", "value": "No onions" } }, "target_channel": { "target_select": { "type": "conversations_select", "value": "C1AB2C3DE" } } } }, "app_installed_team_id": "T1ABCD2E12" }, "hash": "156663117.cd33ad1f", "response_urls": [ { "block_id": "target_channel", "action_id": "target_select", "channel_id": "C1AB2C3DE", "response_url": "https:\/\/hooks.slack.com\/app\/ABC12312\/1234567890\/A100B100C100d100" } ] }` ) func assertInteractionCallback(t *testing.T, callback InteractionCallback, encoded string) { var decoded InteractionCallback assert.Nil(t, json.Unmarshal([]byte(encoded), &decoded)) assert.Equal(t, decoded, callback) } func TestDialogCallback(t *testing.T) { expected := InteractionCallback{ Type: InteractionTypeDialogSubmission, Token: "M1AqUUw3FqayAbqNtsGMch72", CallbackID: "employee_offsite_1138b", ResponseURL: "https://hooks.slack.com/app/T012AB0A1/123456789/JpmK0yzoZDeRiqfeduTBYXWQ", ActionTs: "936893340.702759", Team: Team{ID: "T1ABCD2E12", Name: "", Domain: "coverbands"}, Channel: Channel{ GroupConversation: GroupConversation{ Conversation: Conversation{ ID: "C1AB2C3DE", }, Name: "coverthon-1999", }, }, User: User{ ID: "W12A3BCDEF", Name: "dreamweaver", }, DialogSubmissionCallback: DialogSubmissionCallback{ Submission: map[string]string{ "team_channel": "C0LFFBKPB", "who_should_sing": "U0MJRG1AL", "name": "Sigourney Dreamweaver", "email": "sigdre@example.com", "phone": "+1 800-555-1212", "meal": "burrito", "comment": "No sour cream please", }, }, } assertInteractionCallback(t, expected, dialogSubmissionCallback) } func TestActionCallback(t *testing.T) { assertInteractionCallback(t, InteractionCallback{}, actionCallback) } func TestViewClosedck(t *testing.T) { expected := InteractionCallback{ Type: InteractionTypeViewClosed, Team: Team{ID: "T1ABCD2E12", Name: "", Domain: "coverbands"}, User: User{ ID: "W12A3BCDEF", Name: "dreamweaver", }, View: View{ Type: VTModal, Title: NewTextBlockObject("plain_text", "launch project", false, false), Blocks: Blocks{ BlockSet: []Block{ NewSectionBlock( NewTextBlockObject("mrkdwn", "*Sally* has requested you set the deadline for the Nano launch project", false, false), nil, NewAccessory(&DatePickerBlockElement{ Type: METDatepicker, ActionID: "datepicker123", InitialDate: "1990-04-28", Placeholder: NewTextBlockObject("plain_text", "Select a date", false, false), }), ), }, }, AppInstalledTeamID: "T1ABCD2E12", }, APIAppID: "A123ABC", } assertInteractionCallback(t, expected, viewClosedCallback) } func TestViewSubmissionCallback(t *testing.T) { expected := InteractionCallback{ Type: InteractionTypeViewSubmission, Team: Team{ID: "T1ABCD2E12", Name: "", Domain: "coverbands"}, Channel: Channel{ GroupConversation: GroupConversation{ Conversation: Conversation{ ID: "C1AB2C3DE", }, Name: "coverthon-1999", }, }, User: User{ ID: "W12A3BCDEF", Name: "dreamweaver", }, View: View{ Type: VTModal, Title: NewTextBlockObject("plain_text", "meal choice", false, false), Blocks: Blocks{ BlockSet: []Block{ NewInputBlock( "multi-line", NewTextBlockObject( "plain_text", "dietary restrictions", false, false, ), nil, &PlainTextInputBlockElement{ Type: "plain_text_input", ActionID: "ml-value", Multiline: true, }, ), NewInputBlock( "target_channel", NewTextBlockObject( "plain_text", "Select a channel to post the result on", false, false, ), nil, &SelectBlockElement{ Type: "conversations_select", ActionID: "target_select", DefaultToCurrentConversation: true, ResponseURLEnabled: true, }, ), }, }, State: &ViewState{ Values: map[string]map[string]BlockAction{ "multi-line": { "ml-value": { Type: "plain_text_input", Value: "No onions", }, }, "target_channel": { "target_select": { Type: "conversations_select", Value: "C1AB2C3DE", }, }, }, }, AppInstalledTeamID: "T1ABCD2E12", }, ViewSubmissionCallback: ViewSubmissionCallback{ Hash: "156663117.cd33ad1f", ResponseURLs: []ViewSubmissionCallbackResponseURL{ { BlockID: "target_channel", ActionID: "target_select", ChannelID: "C1AB2C3DE", ResponseURL: "https://hooks.slack.com/app/ABC12312/1234567890/A100B100C100d100", }, }, }, } assertInteractionCallback(t, expected, viewSubmissionCallback) } func TestInteractionCallbackJSONMarshalAndUnmarshal(t *testing.T) { cb := &InteractionCallback{ Type: InteractionTypeBlockActions, Token: "token", CallbackID: "", ResponseURL: "responseURL", TriggerID: "triggerID", ActionTs: "actionTS", Team: Team{ ID: "teamid", Name: "teamname", }, Channel: Channel{ GroupConversation: GroupConversation{ Name: "channelname", Conversation: Conversation{ID: "channelid"}, }, }, User: User{ ID: "userid", Name: "username", Profile: UserProfile{RealName: "userrealname"}, }, OriginalMessage: Message{ Msg: Msg{ Text: "ogmsg text", Timestamp: "ogmsg ts", }, }, Message: Message{ Msg: Msg{ Text: "text", Timestamp: "ts", }, }, Name: "name", Value: "value", MessageTs: "messageTs", AttachmentID: "attachmentID", ActionCallback: ActionCallbacks{ AttachmentActions: []*AttachmentAction{ {Value: "value"}, {Value: "value2"}, }, BlockActions: []*BlockAction{ {ActionID: "id123"}, {ActionID: "id456"}, }, }, View: View{ Type: VTModal, Title: NewTextBlockObject("plain_text", "title", false, false), Blocks: Blocks{ BlockSet: []Block{NewDividerBlock()}, }, }, DialogSubmissionCallback: DialogSubmissionCallback{State: ""}, RawState: json.RawMessage(`{}`), } cbJSONBytes, err := json.Marshal(cb) assert.NoError(t, err) jsonCB := new(InteractionCallback) err = json.Unmarshal(cbJSONBytes, jsonCB) assert.NoError(t, err) assert.Equal(t, cb.Type, jsonCB.Type) assert.Equal(t, cb.Token, jsonCB.Token) assert.Equal(t, cb.CallbackID, jsonCB.CallbackID) assert.Equal(t, cb.ResponseURL, jsonCB.ResponseURL) assert.Equal(t, cb.TriggerID, jsonCB.TriggerID) assert.Equal(t, cb.ActionTs, jsonCB.ActionTs) assert.Equal(t, cb.Team.ID, jsonCB.Team.ID) assert.Equal(t, cb.Team.Name, jsonCB.Team.Name) assert.Equal(t, cb.Channel.ID, jsonCB.Channel.ID) assert.Equal(t, cb.Channel.Name, jsonCB.Channel.Name) assert.Equal(t, cb.Channel.Created, jsonCB.Channel.Created) assert.Equal(t, cb.User.ID, jsonCB.User.ID) assert.Equal(t, cb.User.Name, jsonCB.User.Name) assert.Equal(t, cb.User.Profile.RealName, jsonCB.User.Profile.RealName) assert.Equal(t, cb.OriginalMessage.Text, jsonCB.OriginalMessage.Text) assert.Equal(t, cb.OriginalMessage.Timestamp, jsonCB.OriginalMessage.Timestamp) assert.Equal(t, cb.Message.Text, jsonCB.Message.Text) assert.Equal(t, cb.Message.Timestamp, jsonCB.Message.Timestamp) assert.Equal(t, cb.Name, jsonCB.Name) assert.Equal(t, cb.Value, jsonCB.Value) assert.Equal(t, cb.MessageTs, jsonCB.MessageTs) assert.Equal(t, cb.AttachmentID, jsonCB.AttachmentID) assert.Equal(t, len(cb.ActionCallback.AttachmentActions), len(jsonCB.ActionCallback.AttachmentActions)) assert.Equal(t, len(cb.ActionCallback.BlockActions), len(jsonCB.ActionCallback.BlockActions)) assert.Equal(t, cb.View.Type, jsonCB.View.Type) assert.Equal(t, cb.View.Title, jsonCB.View.Title) assert.Equal(t, cb.View.Blocks, jsonCB.View.Blocks) assert.Equal(t, cb.DialogSubmissionCallback.State, jsonCB.DialogSubmissionCallback.State) } func TestInteractionCallback_InteractionTypeBlockActions_Unmarshal(t *testing.T) { raw := []byte(`{ "type": "block_actions", "actions": [ { "type": "multi_conversations_select", "action_id": "multi_convos", "block_id": "test123", "selected_conversations": ["G12345"] } ], "container": { "type": "view", "view_id": "V12345" }, "state": { "values": { "section_block_id": { "multi_convos": { "type": "multi_conversations_select", "selected_conversations": ["G12345"] } }, "other_block_id": { "other_action_id": { "type": "plain_text_input", "value": "test123" } } } } }`) var cb InteractionCallback assert.NoError(t, json.Unmarshal(raw, &cb)) assert.Equal(t, cb.State, "") assert.Equal(t, cb.BlockActionState.Values["section_block_id"]["multi_convos"].actionType(), ActionType(MultiOptTypeConversations)) assert.Equal(t, cb.BlockActionState.Values["section_block_id"]["multi_convos"].SelectedConversations, []string{"G12345"}) } func TestInteractionCallback_Container_Marshal_And_Unmarshal(t *testing.T) { // Contrived - you generally won't see all of the fields set in a single message raw := []byte( ` { "container": { "type": "message", "view_id": "viewID", "message_ts": "messageTS", "attachment_id": "123", "channel_id": "channelID", "is_ephemeral": false, "is_app_unfurl": false } } `) expected := &InteractionCallback{ Container: Container{ Type: "message", ViewID: "viewID", MessageTs: "messageTS", AttachmentID: "123", ChannelID: "channelID", IsEphemeral: false, IsAppUnfurl: false, }, RawState: json.RawMessage(`{}`), } actual := new(InteractionCallback) err := json.Unmarshal(raw, actual) assert.NoError(t, err) assert.Equal(t, expected.Container, actual.Container) expectedJSON := []byte(`{"type":"message","view_id":"viewID","message_ts":"messageTS","attachment_id":123,"channel_id":"channelID","is_ephemeral":false,"is_app_unfurl":false}`) actualJSON, err := json.Marshal(actual.Container) assert.NoError(t, err) assert.Equal(t, expectedJSON, actualJSON) } func TestInteractionCallback_In_Thread_Container_Marshal_And_Unmarshal(t *testing.T) { // Contrived - you generally won't see all of the fields set in a single message raw := []byte( ` { "container": { "type": "message", "view_id": "viewID", "message_ts": "messageTS", "thread_ts": "threadTS", "attachment_id": "123", "channel_id": "channelID", "is_ephemeral": false, "is_app_unfurl": false } } `) expected := &InteractionCallback{ Container: Container{ Type: "message", ViewID: "viewID", MessageTs: "messageTS", ThreadTs: "threadTS", AttachmentID: "123", ChannelID: "channelID", IsEphemeral: false, IsAppUnfurl: false, }, RawState: json.RawMessage(`{}`), } actual := new(InteractionCallback) err := json.Unmarshal(raw, actual) assert.NoError(t, err) assert.Equal(t, expected.Container, actual.Container) expectedJSON := []byte(`{"type":"message","view_id":"viewID","message_ts":"messageTS","thread_ts":"threadTS","attachment_id":123,"channel_id":"channelID","is_ephemeral":false,"is_app_unfurl":false}`) actualJSON, err := json.Marshal(actual.Container) assert.NoError(t, err) assert.Equal(t, expectedJSON, actualJSON) }
package mymath /**/ func GetSum(n int) int { var sum = 0 for i := 1;i<n+1;i++{ sum += i } return sum } func GetSumRecursively(n int) int { if n == 1{ return 1 } return n + GetSumRecursively(n-1) }
package romaininteger import ( "strconv" "testing" ) var isIntegerRoman = []struct { roman string numero int }{ {"IV", 4}, {"III", 3}, {"MM", 2000}, {"LVIII", 58}, {"MCMXCIV", 1994}, {"MCCXLIX", 1249}, {"CMXCIX", 999}, } func TestRomanToInt(t *testing.T) { for _, tt := range isIntegerRoman { t.Run(strconv.Itoa(tt.numero), func(t *testing.T) { got := RomanToInt(tt.roman) if got != tt.numero { t.Errorf("Expected: %v, got: %v", tt.numero, got) } }) } }
package types import ( "app-auth/db" "context" "fmt" "log" "github.com/mongodb/mongo-go-driver/bson" "github.com/mongodb/mongo-go-driver/mongo/options" ) type TeamMapType = map[string]interface{} type OrganisationMapType = map[string]interface{} type ScopeObjectMapping = map[string]OrganisationMapType type Scopes struct { App string Id string `json:"id"` Name string `json:"name"` Permissions []string `json:"permissions"` } type ScopeAlreadyExists struct { Name string `json:"name"` Status int `json:"status"` Message string `json:"message"` } type ScopeNotFound struct { Type string `json:"type"` Status int `json:"status"` Message string `json:"message"` Error bool `json:"error"` } type ScopeOperation struct { Type string `json:"type"` Status int `json:"status"` Message string `json:"message"` State string `json:"state"` Error bool `json:"error"` } type ScopeTeam struct { Name string `json:"name"` Id string `json:"id"` Scope string `json:"scope"` } type ScopeOrganisation struct { Name string `json:"name"` Id string `json:"id"` Admin bool `json:"admin"` Team []ScopeTeam `json:"team"` } // experimental searches for list fails with longer search times... type ScopeObjectList struct { Scopes []ScopeOrganisation } type ScopeObject struct { Scopes ScopeObjectMapping } type UserMemberScope struct { Id string `json:"id"` OrganisationName string `json:"organisation_name"` OrganisationId string `json:"organisation_id"` TeamName string `json:"team_name"` TeamId string `json:"team_id"` UserEmail string `json:"user_email"` UserId string `json:"user_id"` App string `json:"app"` // this state would correspond to a user invite State string `json:"state"` // this state would correspond to a user invite Scopes []string `json:"scopes"` } func (userMemberScopes UserMemberScope) String() string { return fmt.Sprintf( `<UserMemberScopes email="%s" id="%s" team_id="%s" organisation_id="%" />`, userMemberScopes.UserEmail, userMemberScopes.Id, userMemberScopes.TeamId, userMemberScopes.OrganisationId, ) } func (userMemberScopes UserMemberScope) getScopeFilter() bson.D { return bson.D{ {"id", userMemberScopes.Id}, {"app", userMemberScopes.App}, {"userid", userMemberScopes.UserId}, {"teamid", userMemberScopes.TeamId}, {"organisationid", userMemberScopes.OrganisationId}, } } func (userMemberScopes UserMemberScope) GetUserMemberScope() *UserMemberScope { userMemberScopesData := UserMemberScope{} err := db.UserScopesCollection.FindOne(context.Background(), userMemberScopes.getScopeFilter()).Decode(&userMemberScopesData) if err != nil { log.Println(err) return nil } return &userMemberScopesData } func (userMemberScopes UserMemberScope) SaveUserMemberScope() *UserMemberScope { userScopes := userMemberScopes.GetUserMemberScope() if userScopes != nil { return nil } _, err := db.UserScopesCollection.InsertOne(context.Background(), userMemberScopes) if err != nil { log.Println(err) return nil } return &userMemberScopes } func (userMemberScopes UserMemberScope) RemoveUserMemberScope() *bool { userScopes := userMemberScopes.GetUserMemberScope() if userScopes == nil { return nil } _, err := db.UserScopesCollection.DeleteOne(context.Background(), userMemberScopes.getScopeFilter()) if err != nil { log.Println(err) return &AndFalse } return &AndTrue } func (userMemberScopes UserMemberScope) RemoveUserMembersScope(userScopeIds []string) *bool { filter := bson.D{{ "id", bson.D{{"$in", userScopeIds}}, }} _, err := db.UserScopesCollection.DeleteMany(context.Background(), filter) if err != nil { log.Println(err) return &AndFalse } return &AndTrue } func (userMemberScopes UserMemberScope) UpdateUserMemberScope(userScopeUpdateInfo bson.D) *UserMemberScope { userScopes := userMemberScopes.GetUserMemberScope() if userScopes == nil { return nil } userScopesData := UserMemberScope{} userScopesUpdateData := bson.D{{"$set", userScopeUpdateInfo}} tr := true rd := options.After opts := &options.FindOneAndUpdateOptions{Upsert: &tr, BypassDocumentValidation: &tr, ReturnDocument: &rd} err := db.UserScopesCollection.FindOneAndUpdate( context.Background(), userMemberScopes.getScopeFilter(), userScopesUpdateData, opts, ).Decode(&userScopesData) if err != nil { log.Println(err) return nil } return &userScopesData } func (userMemberScopes UserMemberScope) AddUserMemberScopeScopes(userScopeData string) *UserMemberScope { userScope := userMemberScopes.GetUserMemberScope() if userScope == nil { return nil } return userMemberScopes.UpdateUserMemberScope(bson.D{{"scopes", append(userScope.Scopes, userScopeData)}}) } func (userMemberScopes UserMemberScope) RemoveUserMemberScopeScopes(userScopeData string) *UserMemberScope { // this scope doesn't exists, return nil userScope := userMemberScopes.GetUserMemberScope() if userScope == nil { return nil } // remove the permission from the list of permission index := FindIndex(userScope.Scopes, userScopeData) newUserScopes := RemoveIndex(userScope.Scopes, index) return userMemberScopes.UpdateUserMemberScope(bson.D{{"permissions", newUserScopes}}) } var data = ` { "StandardClaims": { "exp": 1585989679, "iat": 1585988779, "iss": "Clipsynphony" }, "age": 21, "email": "user_two.test@clipsynphony.com", "expires": "2020-04-04T08:41:19.520330457Z", "firstname": "Test", "id": "987ff34b-e68b-4ff2-8f2c-804a03209315", "lastname": "User", "provider": "local", "username": "test_user", "verified": true "scopes": { "987ff34b-e68b-4ff2-8f2c-804a03209315": { "id": "987ff34b-e68b-4ff2-8f2c-804a03209315", "name": "CNN" "987ff34b-e68b-4ff2-8f2c-804a03209315": { "name": "CNN AFRICA" "id": "987ff34b-e68b-4ff2-8f2c-804a03209315" "scope": ["EDITOR"] } }, } Example: "987ff34b-e68b-4ff2-8f2c-804a03209315": { "id": "987ff34b-e68b-4ff2-8f2c-804a03209315", "name": "CNN" "987ff34b-e68b-4ff2-8f2c-804a03209315": { "name": "CNN AFRICA" "id": "987ff34b-e68b-4ff2-8f2c-804a03209315" "scope": ["EDITOR"] } } }, } `
package main import ( "fmt" "net/http" "flag" "net/url" "strings" "io/ioutil" "encoding/json" ) var opType, from, to, date, page *string var state, t *int type Result struct { Success bool Content Content } type Content struct { BusNumberList []BusNumber } type BusNumber struct { BeginStationName string LeaveTime string RemainSeat string } func init() { opType = flag.String("type", "ticket", "please input your opeartion type!") from = flag.String("from", "杭州", "please input from city") to = flag.String("to", "绍兴", "please input to city") date = flag.String("date", "2017-01-25", "please input when to go") page = flag.String("page", "1", "please input search page") state = flag.Int("state", 1,"please choose which begin station") t = flag.Int("time", 0,"please choose which begin time range") flag.Parse() } func main() { v := url.Values{} v.Set("type", *opType) v.Set("from", *from) v.Set("to", *to) v.Set("date", *date) v.Set("page", *page) if *state == 1 { v.Set("startStations", "010106") } switch *t { case 1 : v.Set("leaveTimes", "00:00-06:00") case 2 : v.Set("leaveTimes", "06:00-12:00") case 3 : v.Set("leaveTimes", "12:00-18:00") case 4 : v.Set("leaveTimes", "18:00-24:00") } body := strings.NewReader(v.Encode()) client := http.DefaultClient req, err := http.NewRequest("POST", "http://www.bababus.com/ticket/ticketList.htm", body) if err != nil { fmt.Printf("无法访问该网址") return } req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8") req.Header.Set("Host", "www.bababus.com") req.Header.Set("Origin", "http://www.bababus.com") req.Header.Set("Referer", "http://www.bababus.com/ticket/searchbus.htm") resp, er := client.Do(req) if er != nil { fmt.Printf("不能执行此请求") return } ans, _ := ioutil.ReadAll(resp.Body) result := &Result{} json.Unmarshal(ans, &result) if result.Success == true { busList := result.Content.BusNumberList if len(busList) == 0 { fmt.Printf("no any record!") return } for i := 0 ; i < len(busList); i ++ { bus := busList[i] fmt.Printf(bus.BeginStationName + "(" + bus.LeaveTime + "): " + bus.RemainSeat + "张\n") } } else { fmt.Printf("请检查你是否在短时间内执行次数过多, 需要登录网站输入图形校验码(待改进 todo)") } }
package main import ( "fmt" "os" "strconv" "strings" ) func parseStdin(args []string) [][]int { result := make([][]int, len(args)) for i, arg := range args { s := strings.Split(arg, "") inner := make([]int, len(s)) for j, str := range s { inty, _ := strconv.Atoi(str) inner[j] = inty } result[i] = inner } return result } func main() { args := os.Args[1:] stdin := parseStdin(args) result := SelectionSort(stdin[0], true) // result = InsertionSort(MakeRandIntArray(20, 300), true) // binsumresult, err := BinarySum(stdin[0], stdin[1]) // if err != nil { // panic(err) // } fmt.Println(result) // fmt.Println(binsumresult) }
package main import ( "bytes" "compress/gzip" "encoding/base64" "encoding/json" "errors" "fmt" "github.com/denisbrodbeck/machineid" "github.com/kbinani/screenshot" "golang.org/x/net/websocket" "image/png" "io/ioutil" "os" "strconv" "syscall" "time" "unsafe" ) const ( ON_SCREEN = "70" //打开监控屏幕 OFF_SCREEN = "71" //关闭监控屏幕 strKEY = "fhu84ygf8643" //字符串加密key ) var ( baseUrl string conn *websocket.Conn origin string url string Machineid = getMachineid() intervalTime = 3000 ) type Message struct { Uuid string `json:"uuid"` Machineid string `json:"machineid"` //客户端唯一识别码 Ip string `json:"ip"` Name string `json:"name"` Msg string `json:"msg"` ByteData []byte `json:"byteData"` //截屏,文件,等等大的数据 FileName string `json:"fileName"` //FileBody string `json:"fileBody"` } func creatWebsocket() (*websocket.Conn, error) { return websocket.Dial(url, token(), origin) } func token() string { Unow := time.Now().Unix() tokenBytes := encDec([]byte(fmt.Sprint(Unow) + "--" + Machineid)) //当前时间戳+系统的唯一识别码 token := base64.StdEncoding.EncodeToString(tokenBytes) return token } func getMachineid() string { //每个系统的唯一识别码 m, _ := machineid.ID() return m } func encDec(byt []byte) []byte { for i, v := range byt { byt[i] = (byte(i+95) & (^v)) | (v & (^byte(i + 95))) } return byt } //读取数据 func readMessage(conn *websocket.Conn) ([]byte, error) { again: fr, err := conn.NewFrameReader() if err != nil { return nil, err } frame, err := conn.HandleFrame(fr) if err != nil { return nil, err } if frame == nil { goto again } reqBytes, err := ioutil.ReadAll(frame) if err != nil { return reqBytes, err } reqBytes = encDec(reqBytes) //解密数据 reqBytes = UnGzipBytes(reqBytes) //解压数据 return reqBytes, nil } //发送websocket消息 func sendMessage(message Message) error { jsonBytes, _ := json.Marshal(message) //结构体转json jsonBytes = gzipBytes(jsonBytes) //压缩结构体 jsonBytes = encDec(jsonBytes) //加密 if conn != nil { _, err := conn.Write(jsonBytes) //发送消息 return err } else { return errors.New("conn is null pointer") } } //gzip压缩 func gzipBytes(byt []byte) []byte { var buf bytes.Buffer //zw := gzip.NewWriter(&buf) zw, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) zw.Write(byt) if err := zw.Close(); err != nil { } return buf.Bytes() } //gzip解压缩 func UnGzipBytes(byt []byte) []byte { var buf bytes.Buffer buf.Write(byt) zr, _ := gzip.NewReader(&buf) defer func() { if zr != nil { zr.Close() } }() a, _ := ioutil.ReadAll(zr) return a } func json2Message(strByte []byte) (Message, error) { var dat Message if err := json.Unmarshal(strByte, &dat); err == nil { return dat, nil } else { return dat, err } } func windowsLock() bool { kernel32, err := syscall.LoadDLL("kernel32.dll") if err != nil { return false } CreateMutexA, err := kernel32.FindProc("CreateMutexA") if err != nil { return false } _, _, lastErr := CreateMutexA.Call(uintptr(0), 0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr("Local\\aaaa")))) if lastErr != nil && lastErr.Error() != "The operation completed successfully." { return false } return true } func onScreen() { //监控屏幕 message := Message{Name: ON_SCREEN, Machineid: Machineid} for { img, _ := screenshot.CaptureDisplay(0) var b bytes.Buffer _ = png.Encode(&b, img) message.ByteData = b.Bytes() err := sendMessage(message) if err != nil { //连接出错 return } time.Sleep(time.Duration(intervalTime) * time.Millisecond) //间隔时间 } } //关闭退出 func settingScreen() { reqBytes, err := readMessage(conn) //会阻塞,直到收到消息或者报错 if err != nil { os.Exit(0) } reqM, err := json2Message(reqBytes) if err != nil { os.Exit(0) } if reqM.Name == OFF_SCREEN { os.Exit(0) } } func main() { if !windowsLock() { //互斥锁 return } if len(os.Args) != 3 { //屏幕比例,刷新间隔,url return } intervalTime, _ = strconv.Atoi(os.Args[1]) //截屏间隔时间毫秒 if intervalTime == 0 { //传错参数则纠正为1秒一次 intervalTime = 3000 } baseUrl = os.Args[2] origin = "http://" + baseUrl + "/" url = "ws://" + baseUrl + "/screenhfuiefdhuiwe32uhi" var err error conn, err = creatWebsocket() defer func() { if conn != nil { conn.Close() } }() if err != nil { return } go settingScreen() //设置 onScreen() }
package rakuten // IchibaService に メソッドを追加していく type IchibaService service
package controller import ( "net/http" "time" "feeyashop/models" "github.com/gin-gonic/gin" "gorm.io/gorm" ) type categoryInput struct { Name string `json:"name"` } // GetAllCategory godoc // @Summary Get all Category. // @Description Get a list of Category. // @Tags Category // @Produce json // @Success 200 {object} []models.Category // @Router /category [get] func GetAllCategory(c *gin.Context) { // get db from gin context db := c.MustGet("db").(*gorm.DB) var categories []models.Category db.Find(&categories) c.JSON(http.StatusOK, gin.H{"data": categories}) } // GetProductByCategoryId godoc // @Summary Get Products. // @Description Get all Products by CategoryId. // @Tags Category // @Produce json // @Param id path string true "Category id" // @Success 200 {object} []models.Product // @Router /category/{id} [get] func GetProductsByCategoryId(c *gin.Context) { // Get model if exist var categories []models.Category db := c.MustGet("db").(*gorm.DB) if err := db.Where("category_id = ?", c.Param("id")).Find(&categories).Error; err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Record not found!"}) return } c.JSON(http.StatusOK, gin.H{"data": categories}) } // CreateCategory godoc // @Summary Create New Category. // @Description Creating a new Category. // @Tags Category // @Param Body body categoryInput true "the body to create a new Category" // @Produce json // @Success 200 {object} models.Category // @Router /category [post] // @Security ApiKeyAuth // @Param Authorization header string true "Insert your access token" default(Bearer ) func CreateCategory(c *gin.Context) { // Validate input var input categoryInput if err := c.ShouldBindJSON(&input); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } // Create Rating category := models.Category{Name: input.Name} db := c.MustGet("db").(*gorm.DB) db.Create(&category) c.JSON(http.StatusOK, gin.H{"data": category}) } // UpdateCategory godoc // @Summary Update Category. // @Description Update Category by id. // @Tags Category // @Produce json // @Param id path string true "Category id" // @Param Body body categoryInput true "the body to update category" // @Success 200 {object} models.Category // @Router /category/{id} [patch] // @Security ApiKeyAuth // @Param Authorization header string true "Insert your access token" default(Bearer ) func UpdateCategory(c *gin.Context) { db := c.MustGet("db").(*gorm.DB) // Get model if exist var category models.Category if err := db.Where("id = ?", c.Param("id")).First(&category).Error; err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Record not found!"}) return } // Validate input var input categoryInput if err := c.ShouldBindJSON(&input); err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) return } var updatedInput models.Category updatedInput.Name = input.Name updatedInput.UpdatedAt = time.Now() db.Model(&category).Updates(updatedInput) c.JSON(http.StatusOK, gin.H{"data": category}) } // DeleteCategory godoc // @Summary Delete one Category. // @Description Delete a Category by id. // @Tags Category // @Produce json // @Param id path string true "Category id" // @Success 200 {object} map[string]boolean // @Router /category/{id} [delete] // @Security ApiKeyAuth // @Param Authorization header string true "Insert your access token" default(Bearer ) func DeleteCategory(c *gin.Context) { // Get model if exist db := c.MustGet("db").(*gorm.DB) var category models.Category if err := db.Where("id = ?", c.Param("id")).First(&category).Error; err != nil { c.JSON(http.StatusBadRequest, gin.H{"error": "Record not found!"}) return } db.Delete(&category) c.JSON(http.StatusOK, gin.H{"data": true}) }
package model type Hand struct { Cards } func (h *Hand) Discard(c Card) bool { return h.Cards.Remove(c) }
package serverconfigs import ( "github.com/iwind/TeaGo/assert" "testing" ) func TestServerGroup_Protocol(t *testing.T) { a := assert.NewAssertion(t) { group := NewServerGroup("tcp://127.0.0.1:1234") a.IsTrue(group.Protocol() == ProtocolTCP) a.IsTrue(group.Addr() == "127.0.0.1:1234") } { group := NewServerGroup("http4://127.0.0.1:1234") a.IsTrue(group.Protocol() == ProtocolHTTP4) a.IsTrue(group.Addr() == "127.0.0.1:1234") } { group := NewServerGroup("127.0.0.1:1234") a.IsTrue(group.Protocol() == ProtocolHTTP) a.IsTrue(group.Addr() == "127.0.0.1:1234") } { group := NewServerGroup("unix:/tmp/my.sock") a.IsTrue(group.Protocol() == ProtocolUnix) a.IsTrue(group.Addr() == "/tmp/my.sock") } }
package lookuptree import ( "errors" "strconv" "strings" ) /* * 获得IP地址第level层的整型数(从前往后数,0至3位) */ func GetIpSection(ip string, level int) (ipsec int, err error) { ipsecs := strings.Split(ip, ".") if level < 0 || level >= len(ipsecs) { err = errors.New("Wrong index when parsing ip.") return } return strconv.Atoi(ipsecs[level]) } /* * 将ip地址转换为64位整型 */ func IpToLong(ip string) (result int64, err error) { for i := 0; i < 4; i++ { tmp, err := GetIpSection(ip, i) if err != nil { return int64(0), err } result += int64(tmp << uint8((3-i)*8)) } return result, nil } /* * 将64位整型转换为ip地址 */ func LongToIp(ip int64) (result string) { for i := 0; i < 4; i++ { tmp := ip & 0xff ip = ip >> 8 if i == 0 { result = strconv.Itoa(int(tmp)) } else { result = strconv.Itoa(int(tmp)) + "." + result } } return result } /* * 将level以下的部分都修改为255 */ func EnlargeIP(ip string, level int) (result string) { ipsecs := strings.Split(ip, ".") result = ipsecs[0] for i := 1; i < 4; i++ { if i > level { result += ".255" } else { result += "." + ipsecs[i] } } return }
package raindrops import ( "math" "strconv" ) //Convert - converts numbers to raindrops :mindblown: func Convert(num int) string { rain := "" rain += AddStringIfFactor(num, 3, "Pling") rain += AddStringIfFactor(num, 5, "Plang") rain += AddStringIfFactor(num, 7, "Plong") if rain == "" { rain = strconv.Itoa(num) } return rain } //AddStringIfFactor - returns 'raindrop' if 'num' is evenly divisible by 'divisor' func AddStringIfFactor(num int, divisor int, raindrop string) string { rain := "" if Remainder(num, divisor) == 0 { rain += raindrop } return rain } //Remainder - returns the remainder of 'num' divided by 'divisor' func Remainder(num int, divisor int) float64 { return math.Remainder(float64(num), float64(divisor)) }
// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package shoot import ( "fmt" "sync" "github.com/gardener/gardener/pkg/apis/componentconfig" gardenv1beta1 "github.com/gardener/gardener/pkg/apis/garden/v1beta1" "github.com/gardener/gardener/pkg/apis/garden/v1beta1/helper" gardeninformers "github.com/gardener/gardener/pkg/client/garden/informers/externalversions/garden/v1beta1" "github.com/gardener/gardener/pkg/client/kubernetes" "github.com/gardener/gardener/pkg/logger" "github.com/gardener/gardener/pkg/operation" botanistpkg "github.com/gardener/gardener/pkg/operation/botanist" "github.com/gardener/gardener/pkg/operation/cloudbotanist" "github.com/gardener/gardener/pkg/operation/common" "github.com/gardener/gardener/pkg/utils/imagevector" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/tools/cache" ) func (c *Controller) shootCareAdd(obj interface{}) { key, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { return } c.shootCareQueue.AddAfter(key, c.config.Controllers.ShootCare.SyncPeriod.Duration) } func (c *Controller) shootCareDelete(obj interface{}) { shoot, ok := obj.(*gardenv1beta1.Shoot) if shoot == nil || !ok { return } key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) if err != nil { return } c.shootCareQueue.Done(key) } func (c *Controller) reconcileShootCareKey(key string) error { namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { return err } shoot, err := c.shootLister.Shoots(namespace).Get(name) if apierrors.IsNotFound(err) { logger.Logger.Debugf("[SHOOT CARE] %s - skipping because Shoot has been deleted", key) return nil } if err != nil { logger.Logger.Infof("[SHOOT CARE] %s - unable to retrieve object from store: %v", key, err) return err } defer c.shootCareAdd(shoot) if operationOngoing(shoot) { logger.Logger.Debugf("[SHOOT CARE] %s - skipping because an operation in ongoing", key) return nil } return c.careControl.Care(shoot, key) } // CareControlInterface implements the control logic for caring for Shoots. It is implemented as an interface to allow // for extensions that provide different semantics. Currently, there is only one implementation. type CareControlInterface interface { Care(shoot *gardenv1beta1.Shoot, key string) error } // NewDefaultCareControl returns a new instance of the default implementation CareControlInterface that // implements the documented semantics for caring for Shoots. updater is the UpdaterInterface used // to update the status of Shoots. You should use an instance returned from NewDefaultCareControl() for any // scenario other than testing. func NewDefaultCareControl(k8sGardenClient kubernetes.Client, k8sGardenInformers gardeninformers.Interface, secrets map[string]*corev1.Secret, imageVector imagevector.ImageVector, identity *gardenv1beta1.Gardener, config *componentconfig.ControllerManagerConfiguration, updater UpdaterInterface) CareControlInterface { return &defaultCareControl{k8sGardenClient, k8sGardenInformers, secrets, imageVector, identity, config, updater} } type defaultCareControl struct { k8sGardenClient kubernetes.Client k8sGardenInformers gardeninformers.Interface secrets map[string]*corev1.Secret imageVector imagevector.ImageVector identity *gardenv1beta1.Gardener config *componentconfig.ControllerManagerConfiguration updater UpdaterInterface } func (c *defaultCareControl) Care(shootObj *gardenv1beta1.Shoot, key string) error { var ( shoot = shootObj.DeepCopy() shootLogger = logger.NewShootLogger(logger.Logger, shoot.Name, shoot.Namespace, "") ) shootLogger.Debugf("[SHOOT CARE] %s", key) operation, err := operation.New(shoot, shootLogger, c.k8sGardenClient, c.k8sGardenInformers, c.identity, c.secrets, c.imageVector) if err != nil { shootLogger.Errorf("could not initialize a new operation: %s", err.Error()) return nil } // Initialize conditions based on the current status. var ( newConditions = helper.NewConditions(shoot.Status.Conditions, gardenv1beta1.ShootControlPlaneHealthy, gardenv1beta1.ShootEveryNodeReady, gardenv1beta1.ShootSystemComponentsHealthy) conditionControlPlaneHealthy = newConditions[0] conditionEveryNodeReady = newConditions[1] conditionSystemComponentsHealthy = newConditions[2] ) botanist, err := botanistpkg.New(operation) if err != nil { message := fmt.Sprintf("Failed to create a botanist object to perform the care operations (%s).", err.Error()) conditionControlPlaneHealthy = helper.ModifyCondition(conditionControlPlaneHealthy, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) conditionEveryNodeReady = helper.ModifyCondition(conditionEveryNodeReady, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) conditionSystemComponentsHealthy = helper.ModifyCondition(conditionSystemComponentsHealthy, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) operation.Logger.Error(message) c.updateShootStatus(shoot, *conditionControlPlaneHealthy, *conditionEveryNodeReady, *conditionSystemComponentsHealthy) return nil } cloudBotanist, err := cloudbotanist.New(operation, common.CloudPurposeShoot) if err != nil { message := fmt.Sprintf("Failed to create a Cloud Botanist to perform the care operations (%s).", err.Error()) conditionControlPlaneHealthy = helper.ModifyCondition(conditionControlPlaneHealthy, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) conditionEveryNodeReady = helper.ModifyCondition(conditionEveryNodeReady, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) conditionSystemComponentsHealthy = helper.ModifyCondition(conditionSystemComponentsHealthy, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) operation.Logger.Error(message) c.updateShootStatus(shoot, *conditionControlPlaneHealthy, *conditionEveryNodeReady, *conditionSystemComponentsHealthy) return nil } if err := botanist.InitializeShootClients(); err != nil { message := fmt.Sprintf("Failed to create a K8SClient for the Shoot cluster to perform the care operations (%s).", err.Error()) conditionEveryNodeReady = helper.ModifyCondition(conditionEveryNodeReady, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) conditionSystemComponentsHealthy = helper.ModifyCondition(conditionSystemComponentsHealthy, corev1.ConditionUnknown, gardenv1beta1.ConditionCheckError, message) operation.Logger.Error(message) c.updateShootStatus(shoot, *conditionControlPlaneHealthy, *conditionEveryNodeReady, *conditionSystemComponentsHealthy) return nil } // Trigger garbage collection garbageCollection(botanist) // Trigger health check conditionControlPlaneHealthy, conditionEveryNodeReady, conditionSystemComponentsHealthy = healthCheck(botanist, cloudBotanist, conditionControlPlaneHealthy, conditionEveryNodeReady, conditionSystemComponentsHealthy) // Update Shoot status if newShoot, _ := c.updateShootStatus(shoot, *conditionControlPlaneHealthy, *conditionEveryNodeReady, *conditionSystemComponentsHealthy); newShoot != nil { shoot = newShoot } // Mark Shoot as healthy/unhealthy var ( lastOperation = shoot.Status.LastOperation lastError = shoot.Status.LastError healthy = lastOperation == nil || (lastOperation.State == gardenv1beta1.ShootLastOperationStateSucceeded && lastError == nil && conditionControlPlaneHealthy.Status == corev1.ConditionTrue && conditionEveryNodeReady.Status == corev1.ConditionTrue && conditionSystemComponentsHealthy.Status == corev1.ConditionTrue) ) c.labelShoot(shoot, healthy) return nil } func (c *defaultCareControl) updateShootStatus(shoot *gardenv1beta1.Shoot, conditions ...gardenv1beta1.Condition) (*gardenv1beta1.Shoot, error) { if !helper.ConditionsNeedUpdate(shoot.Status.Conditions, conditions) { return shoot, nil } shoot.Status.Conditions = conditions newShoot, err := c.updater.UpdateShootStatusIfNoOperation(shoot) if err != nil { logger.Logger.Errorf("Could not update the Shoot status: %+v", err) } return newShoot, err } func (c *defaultCareControl) labelShoot(shoot *gardenv1beta1.Shoot, healthy bool) error { _, err := c.updater.UpdateShootLabels(shoot, computeLabelsWithShootHealthiness(healthy)) if err != nil { logger.Logger.Errorf("Could not update the Shoot metadata: %s", err.Error()) } return err } // garbageCollection cleans the Seed and the Shoot cluster from unrequired objects. // It receives a Garden object <garden> which stores the Shoot object. func garbageCollection(botanist *botanistpkg.Botanist) { var wg sync.WaitGroup wg.Add(2) go func() { defer wg.Done() botanist.PerformGarbageCollectionSeed() }() go func() { defer wg.Done() botanist.PerformGarbageCollectionShoot() }() wg.Wait() botanist.Logger.Debugf("Successfully performed garbage collection for Shoot cluster '%s'", botanist.Shoot.Info.Name) } // healthCheck performs several health checks and updates the status conditions. // It receives a Garden object <garden> which stores the Shoot object. // The current Health check verifies that the control plane running in the Seed cluster is healthy, every // node is ready and that all system components (pods running kube-system) are healthy. func healthCheck(botanist *botanistpkg.Botanist, cloudBotanist cloudbotanist.CloudBotanist, conditionControlPlaneHealthy, conditionEveryNodeReady, conditionSystemComponentsHealthy *gardenv1beta1.Condition) (*gardenv1beta1.Condition, *gardenv1beta1.Condition, *gardenv1beta1.Condition) { var wg sync.WaitGroup wg.Add(3) go func() { defer wg.Done() conditionControlPlaneHealthy = botanist.CheckConditionControlPlaneHealthy(conditionControlPlaneHealthy) }() go func() { defer wg.Done() conditionEveryNodeReady = botanist.CheckConditionEveryNodeReady(conditionEveryNodeReady) }() go func() { defer wg.Done() conditionSystemComponentsHealthy = botanist.CheckConditionSystemComponentsHealthy(conditionSystemComponentsHealthy) }() wg.Wait() botanist.Logger.Debugf("Successfully performed health check for Shoot cluster '%s'", botanist.Shoot.Info.Name) return conditionControlPlaneHealthy, conditionEveryNodeReady, conditionSystemComponentsHealthy }
// Copyright 2018 Clivern. All rights reserved. // Use of this source code is governed by the MIT // license that can be found in the LICENSE file. package module import ( "fmt" "github.com/clivern/walrus/core/driver" log "github.com/sirupsen/logrus" "github.com/spf13/viper" ) // Stats type type Stats struct { db driver.Database } // NewStats creates a stats instance func NewStats(db driver.Database) *Stats { result := new(Stats) result.db = db return result } // GetTotalTowers gets total towers count func (s *Stats) GetTotalTowers() (int, error) { log.Debug("Counting towers") key := fmt.Sprintf( "%s/tower", viper.GetString(fmt.Sprintf("%s.database.etcd.databaseName", viper.GetString("role"))), ) keys, err := s.db.GetKeys(key) if err != nil { log.WithFields(log.Fields{ "error": err.Error(), }).Error("Error while getting towers count") return 0, err } log.WithFields(log.Fields{ "count": len(keys), }).Debug("Current towers count") return len(keys), nil } // GetTotalHosts gets total hosts count func (s *Stats) GetTotalHosts() (int, error) { log.Debug("Counting hosts") key := fmt.Sprintf( "%s/host", viper.GetString(fmt.Sprintf("%s.database.etcd.databaseName", viper.GetString("role"))), ) keys, err := s.db.GetKeys(key) if err != nil { log.WithFields(log.Fields{ "error": err.Error(), }).Error("Error while getting hosts count") return 0, err } log.WithFields(log.Fields{ "count": len(keys), }).Debug("Current hosts count") return len(keys), nil }
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "net" "net/http" "os" ) // Config is used to store the vars from the json config file type Config struct { URL string `json:"url"` APIKey string `json:"api_key"` } // Interface stores the interface information found from the server type Interface struct { Name string `json:"name"` MacAddress string `json:"mac_address"` } // Data stores the infomration to be sent to the remote API type Data struct { Interfaces []Interface `json:"interfaces"` } // ParseInterfaces will gather the MacAddress and Name of interfaces func ParseInterfaces() []Interface { var interfaces = []Interface{} i, err := net.Interfaces() if err != nil { fmt.Printf("Unable to parse interfaces") panic(err) } var tmp Interface for _, i := range i { if len(i.HardwareAddr.String()) > 0 { tmp = Interface{i.Name, i.HardwareAddr.String()} interfaces = append(interfaces, tmp) } } return interfaces } // APICall sends our interfaces to the remote API func APICall(config *Config, data Data) bool { url := config.URL b, _ := json.Marshal(data) req, err := http.NewRequest("POST", url, bytes.NewBuffer(b)) req.Header.Set("X-API-Key", config.APIKey) req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { panic("Failed to send server details, Check network connection?") } defer resp.Body.Close() if resp.Status != "201" { return false } return true } // ParseConfig parses out out configuration file func ParseConfig(file string) *Config { if _, err := os.Stat(file); os.IsNotExist(err) { panic("Error parsing config file, File not found") } data := new(Config) fileData, err := ioutil.ReadFile(file) if err != nil { panic("Error parsing config file") } err = json.Unmarshal(fileData, data) if err != nil { panic("Error parsing config file") } return data } // main function call func main() { // Parse config config := ParseConfig("config.json") var serverLabel string var data Data // Get the server label fmt.Printf("Please enter server label: ") fmt.Scanf("%s", &serverLabel) // If server label is empty, We should panic // Later on we will instead create a new server if len(serverLabel) == 0 { panic("Server label unknown") } // Get server interfaces data.Interfaces = ParseInterfaces() // Post data resp := APICall(config, data) if resp != true { panic("Error occured parsing data on the remote server") } }
package config import ( "strings" "github.com/layer5io/meshery-adapter-library/adapter" "github.com/layer5io/meshery-adapter-library/meshes" smp "github.com/layer5io/service-mesh-performance/spec" ) var ( ConsulOperation = strings.ToLower(smp.ServiceMesh_CONSUL.Enum().String()) ) func getOperations(dev adapter.Operations) adapter.Operations { versions, _ := GetLatestReleases(3) var versionNames []adapter.Version for _, v := range versions { versionNames = append(versionNames, v.Name) } dev[ConsulOperation] = &adapter.Operation{ Type: int32(meshes.OpCategory_INSTALL), Description: "Consul", Versions: versionNames, Templates: []adapter.Template{ "templates/consul.yaml", }, AdditionalProperties: map[string]string{}, } return dev }
/* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. // FunctionFile bulabula type FunctionFile struct { // The filename format of function // +kubebuilder:validation:Optional // +kubebuilder:default="{Version}" Name string `json:"name,omitempty"` } // FunctionConfigMap bulabula type FunctionConfigMap struct { // The filename format of function // +kubebuilder:validation:Optional // +kubebuilder:default="fn-{Name}" Name string `json:"name,omitempty"` // The filename format of function // +kubebuilder:validation:Optional // +kubebuilder:default="/kess/fn/{Name}" Mount string `json:"mount,omitempty"` } // FunctionSpec defines the desired state of Function type FunctionSpec struct { // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster // Important: Run "make" to regenerate code after modifying this file // Optional version of function // +kubebuilder:validation:Optional Function string `json:"function,omitempty"` // Optional version of function // +kubebuilder:validation:Optional Version string `json:"version,omitempty"` // The runtime name of function // +kubebuilder:validation:Required Runtime string `json:"runtime,omitempty"` // The filename format of function // +kubebuilder:validation:Optional File FunctionFile `json:"file,omitempty"` // The filename format of function // +kubebuilder:validation:Optional ConfigMap FunctionConfigMap `json:"configMap,omitempty"` // The string of function // +kubebuilder:validation:Optional Data string `json:"data,omitempty"` // The binary of function // +kubebuilder:validation:Optional BinaryData []byte `json:"binaryData,omitempty"` } // FunctionStatus defines the observed state of Function type FunctionStatus struct { // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster // Important: Run "make" to regenerate code after modifying this file // Optional ready string of runtime for show // +kubebuilder:validation:Optional Ready string `json:"ready,omitempty"` } // +kubebuilder:resource:categories="kess",shortName="fn" // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector // +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.ready`,priority=0 // +kubebuilder:printcolumn:name="Function",type=string,JSONPath=`.spec.function`,priority=0 // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`,priority=0 // +kubebuilder:printcolumn:name="Runtime",type=string,JSONPath=`.spec.runtime`,priority=0 // +kubebuilder:object:root=true // Function is the Schema for the functions API type Function struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec FunctionSpec `json:"spec,omitempty"` Status FunctionStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true // FunctionList contains a list of Function type FunctionList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []Function `json:"items"` } func init() { SchemeBuilder.Register(&Function{}, &FunctionList{}) }
package sqlbuilder // Add a HAVING clause to your query with one or more constraints (either Expr instances or And/Or functions) func (q *Query) Having(constraints ...SQLProvider) *Query { if q.having == nil { q.having = new(constraint) q.having.gate = gate_and } q.having.children = append(q.having.children, constraints...) return q }
package ikgo const ( HIT_UNMATCH = 0x00000000 HIT_MATCH = 0x00000001 HIT_PREFIX = 0x00000010 ) type Hit struct { hitState int //该HIT当前状态,默认未匹配 matchedDictSegment *DictSegment //记录词典匹配过程中,当前匹配到的词典分支节点 beg, end int //词段起止位置 } /** * 判断是否完全匹配 */ func (h *Hit) isMatch() bool { return (h.hitState & HIT_MATCH) > 0 } func (h *Hit) setMatch() { h.hitState |= HIT_MATCH } /** * 判断是否是词的前缀 */ func (h *Hit) isPrefix() bool { return (h.hitState & HIT_PREFIX) > 0 } func (h *Hit) setPrefix() { h.hitState |= HIT_PREFIX } /** * 判断是否是不匹配 */ func (h *Hit) isUnmatch() bool { return h.hitState == HIT_UNMATCH } func (h *Hit) setUnmatch() { h.hitState = HIT_UNMATCH }
package ircserver import ( "testing" "github.com/robustirc/robustirc/internal/robust" "gopkg.in/sorcix/irc.v2" ) func TestServerQuit(t *testing.T) { i, ids := stdIRCServerWithServices() i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":services.robustirc.net NICK blorgh 1 1425542735 enforcer services.robustirc.net services.robustirc.net 0 :Services Enforcer")) mustMatchMsg(t, i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":blorgh QUIT")), ":blorgh!enforcer@robust/0x13c6cdee3e749faf QUIT :") }
package client /* #cgo CFLAGS: -std=c11 #cgo LDFLAGS: -lcomedi -lm */ import "C" const N_FLOORS = 4 const N_BUTTONS = 3 const MOTOR_SPEED = 2800 type elevMotorDirection int const ( DIRN_DOWN elevMotorDirection = -1 << iota DIRN_STOP DIRN_UP ) type elevButtonType int const ( BUTTON_CALL_UP elevButtonType = iota BUTTON_CALL_DOWN BUTTON_COMMAND ) //in port 4 const ( PORT_4_SUBDEVICE = 3 PORT_4_CHANNEL_OFFSET = 16 //PORT_4_DIRECTION = int(C.COMEDI_INPUT) OBSTRUCTION = (0x300+23) STOP = (0x300+22) BUTTON_COMMAND1 = (0x300+21) BUTTON_COMMAND2 = (0x300+20) BUTTON_COMMAND3 = (0x300+19) BUTTON_COMMAND4 = (0x300+18) BUTTON_UP1 = (0x300+17) BUTTON_UP2 = (0x300+16) ) //in port 1 const ( PORT_1_SUBDEVICE = 2 PORT_1_CHANNEL_OFFSET = 0 //PORT_1_DIRECTION = int(C.COMEDI_INPUT) BUTTON_DOWN2 = (0x200+0) BUTTON_UP3 = (0x200+1) BUTTON_DOWN3 = (0x200+2) BUTTON_DOWN4 = (0x200+3) SENSOR_FLOOR1 = (0x200+4) SENSOR_FLOOR2 = (0x200+5) SENSOR_FLOOR3 = (0x200+6) SENSOR_FLOOR4 = (0x200+7) ) //out port 3 const ( PORT_3_SUBDEVICE = 3 PORT_3_CHANNEL_OFFSET = 8 //PORT_3_DIRECTION = int(C.COMEDI_OUTPUT) MOTORDIR = (0x300+15) LIGHT_STOP = (0x300+14) LIGHT_COMMAND1 = (0x300+13) LIGHT_COMMAND2 = (0x300+12) LIGHT_COMMAND3 = (0x300+11) LIGHT_COMMAND4 = (0x300+10) LIGHT_UP1 = (0x300+9) LIGHT_UP2 = (0x300+8) ) //out port 2 const ( PORT_2_SUBDEVICE = 3 PORT_2_CHANNEL_OFFSET = 0 //PORT_2_DIRECTION = int(C.COMEDI_OUTPUT) LIGHT_DOWN2 = (0x300+7) LIGHT_UP3 = (0x300+6) LIGHT_DOWN3 = (0x300+5) LIGHT_DOWN4 = (0x300+4) LIGHT_DOOR_OPEN = (0x300+3) LIGHT_FLOOR_IND2 = (0x300+1) LIGHT_FLOOR_IND1 = (0x300+0) ) //out port 0 const ( MOTOR = (0x100+0) ) //non-existing ports (for alignment) const ( BUTTON_DOWN1 = -1 BUTTON_UP4 = -1 LIGHT_DOWN1 = -1 LIGHT_UP4 = -1 )
package sql_test import ( "reflect" "strings" "testing" "github.com/messagedb/messagedb/sql" ) // Ensure the scanner can scan tokens correctly. func TestScanner_Scan(t *testing.T) { var tests = []struct { s string tok sql.Token lit string pos sql.Pos }{ // Special tokens (EOF, ILLEGAL, WS) {s: ``, tok: sql.EOF}, {s: `#`, tok: sql.ILLEGAL, lit: `#`}, {s: ` `, tok: sql.WS, lit: " "}, {s: "\t", tok: sql.WS, lit: "\t"}, {s: "\n", tok: sql.WS, lit: "\n"}, {s: "\r", tok: sql.WS, lit: "\n"}, {s: "\r\n", tok: sql.WS, lit: "\n"}, {s: "\rX", tok: sql.WS, lit: "\n"}, {s: "\n\r", tok: sql.WS, lit: "\n\n"}, {s: " \n\t \r\n\t", tok: sql.WS, lit: " \n\t \n\t"}, {s: " foo", tok: sql.WS, lit: " "}, // Numeric operators {s: `+`, tok: sql.ADD}, {s: `-`, tok: sql.SUB}, {s: `*`, tok: sql.MUL}, {s: `/`, tok: sql.DIV}, // Logical operators {s: `AND`, tok: sql.AND}, {s: `and`, tok: sql.AND}, {s: `OR`, tok: sql.OR}, {s: `or`, tok: sql.OR}, {s: `=`, tok: sql.EQ}, {s: `<>`, tok: sql.NEQ}, {s: `! `, tok: sql.ILLEGAL, lit: "!"}, {s: `<`, tok: sql.LT}, {s: `<=`, tok: sql.LTE}, {s: `>`, tok: sql.GT}, {s: `>=`, tok: sql.GTE}, // Misc tokens {s: `(`, tok: sql.LPAREN}, {s: `)`, tok: sql.RPAREN}, {s: `,`, tok: sql.COMMA}, {s: `;`, tok: sql.SEMICOLON}, {s: `.`, tok: sql.DOT}, {s: `=~`, tok: sql.EQREGEX}, {s: `!~`, tok: sql.NEQREGEX}, // Identifiers {s: `foo`, tok: sql.IDENT, lit: `foo`}, {s: `_foo`, tok: sql.IDENT, lit: `_foo`}, {s: `Zx12_3U_-`, tok: sql.IDENT, lit: `Zx12_3U_`}, {s: `"foo"`, tok: sql.IDENT, lit: `foo`}, {s: `"foo\\bar"`, tok: sql.IDENT, lit: `foo\bar`}, {s: `"foo\bar"`, tok: sql.BADESCAPE, lit: `\b`, pos: sql.Pos{Line: 0, Char: 5}}, {s: `"foo\"bar\""`, tok: sql.IDENT, lit: `foo"bar"`}, {s: `test"`, tok: sql.BADSTRING, lit: "", pos: sql.Pos{Line: 0, Char: 3}}, {s: `"test`, tok: sql.BADSTRING, lit: `test`}, {s: `true`, tok: sql.TRUE}, {s: `false`, tok: sql.FALSE}, // Strings {s: `'testing 123!'`, tok: sql.STRING, lit: `testing 123!`}, {s: `'foo\nbar'`, tok: sql.STRING, lit: "foo\nbar"}, {s: `'foo\\bar'`, tok: sql.STRING, lit: "foo\\bar"}, {s: `'test`, tok: sql.BADSTRING, lit: `test`}, {s: "'test\nfoo", tok: sql.BADSTRING, lit: `test`}, {s: `'test\g'`, tok: sql.BADESCAPE, lit: `\g`, pos: sql.Pos{Line: 0, Char: 6}}, // Numbers {s: `100`, tok: sql.NUMBER, lit: `100`}, {s: `100.23`, tok: sql.NUMBER, lit: `100.23`}, {s: `+100.23`, tok: sql.NUMBER, lit: `+100.23`}, {s: `-100.23`, tok: sql.NUMBER, lit: `-100.23`}, {s: `-100.`, tok: sql.NUMBER, lit: `-100`}, {s: `.23`, tok: sql.NUMBER, lit: `.23`}, {s: `+.23`, tok: sql.NUMBER, lit: `+.23`}, {s: `-.23`, tok: sql.NUMBER, lit: `-.23`}, //{s: `.`, tok: sql.ILLEGAL, lit: `.`}, {s: `-.`, tok: sql.SUB, lit: ``}, {s: `+.`, tok: sql.ADD, lit: ``}, {s: `10.3s`, tok: sql.NUMBER, lit: `10.3`}, // Durations {s: `10u`, tok: sql.DURATION_VAL, lit: `10u`}, {s: `10µ`, tok: sql.DURATION_VAL, lit: `10µ`}, {s: `10ms`, tok: sql.DURATION_VAL, lit: `10ms`}, {s: `-1s`, tok: sql.DURATION_VAL, lit: `-1s`}, {s: `10m`, tok: sql.DURATION_VAL, lit: `10m`}, {s: `10h`, tok: sql.DURATION_VAL, lit: `10h`}, {s: `10d`, tok: sql.DURATION_VAL, lit: `10d`}, {s: `10w`, tok: sql.DURATION_VAL, lit: `10w`}, {s: `10x`, tok: sql.NUMBER, lit: `10`}, // non-duration unit // Keywords {s: `ALL`, tok: sql.ALL}, {s: `ALTER`, tok: sql.ALTER}, {s: `AS`, tok: sql.AS}, {s: `ASC`, tok: sql.ASC}, {s: `BEGIN`, tok: sql.BEGIN}, {s: `BY`, tok: sql.BY}, {s: `CREATE`, tok: sql.CREATE}, {s: `CONVERSATION`, tok: sql.CONVERSATION}, {s: `CONVERSATIONS`, tok: sql.CONVERSATIONS}, {s: `DATABASE`, tok: sql.DATABASE}, {s: `DATABASES`, tok: sql.DATABASES}, {s: `DEFAULT`, tok: sql.DEFAULT}, {s: `DELETE`, tok: sql.DELETE}, {s: `DESC`, tok: sql.DESC}, {s: `DROP`, tok: sql.DROP}, {s: `DURATION`, tok: sql.DURATION}, {s: `END`, tok: sql.END}, {s: `EXISTS`, tok: sql.EXISTS}, {s: `EXPLAIN`, tok: sql.EXPLAIN}, {s: `FIELD`, tok: sql.FIELD}, {s: `FROM`, tok: sql.FROM}, {s: `GRANT`, tok: sql.GRANT}, {s: `IF`, tok: sql.IF}, // {s: `INNER`, tok: sql.INNER}, {s: `INSERT`, tok: sql.INSERT}, {s: `KEY`, tok: sql.KEY}, {s: `KEYS`, tok: sql.KEYS}, {s: `LIMIT`, tok: sql.LIMIT}, {s: `SHOW`, tok: sql.SHOW}, {s: `MEMBER`, tok: sql.MEMBER}, {s: `MEMBERS`, tok: sql.MEMBERS}, {s: `OFFSET`, tok: sql.OFFSET}, {s: `ON`, tok: sql.ON}, {s: `ORDER`, tok: sql.ORDER}, {s: `ORGANIZATION`, tok: sql.ORGANIZATION}, {s: `ORGANIZATIONS`, tok: sql.ORGANIZATIONS}, {s: `PASSWORD`, tok: sql.PASSWORD}, {s: `POLICY`, tok: sql.POLICY}, {s: `POLICIES`, tok: sql.POLICIES}, {s: `PRIVILEGES`, tok: sql.PRIVILEGES}, {s: `QUERIES`, tok: sql.QUERIES}, {s: `QUERY`, tok: sql.QUERY}, {s: `READ`, tok: sql.READ}, {s: `RETENTION`, tok: sql.RETENTION}, {s: `REVOKE`, tok: sql.REVOKE}, {s: `SELECT`, tok: sql.SELECT}, {s: `TAG`, tok: sql.TAG}, {s: `TO`, tok: sql.TO}, {s: `USER`, tok: sql.USER}, {s: `USERS`, tok: sql.USERS}, {s: `VALUES`, tok: sql.VALUES}, {s: `WHERE`, tok: sql.WHERE}, {s: `WITH`, tok: sql.WITH}, {s: `WRITE`, tok: sql.WRITE}, {s: `explain`, tok: sql.EXPLAIN}, // case insensitive {s: `seLECT`, tok: sql.SELECT}, // case insensitive } for i, tt := range tests { s := sql.NewScanner(strings.NewReader(tt.s)) tok, pos, lit := s.Scan() if tt.tok != tok { t.Errorf("%d. %q token mismatch: exp=%q got=%q <%q>", i, tt.s, tt.tok, tok, lit) } else if tt.pos.Line != pos.Line || tt.pos.Char != pos.Char { t.Errorf("%d. %q pos mismatch: exp=%#v got=%#v", i, tt.s, tt.pos, pos) } else if tt.lit != lit { t.Errorf("%d. %q literal mismatch: exp=%q got=%q", i, tt.s, tt.lit, lit) } } } // Ensure the scanner can scan a series of tokens correctly. func TestScanner_Scan_Multi(t *testing.T) { type result struct { tok sql.Token pos sql.Pos lit string } exp := []result{ {tok: sql.SELECT, pos: sql.Pos{Line: 0, Char: 0}, lit: ""}, {tok: sql.WS, pos: sql.Pos{Line: 0, Char: 6}, lit: " "}, {tok: sql.IDENT, pos: sql.Pos{Line: 0, Char: 7}, lit: "value"}, {tok: sql.WS, pos: sql.Pos{Line: 0, Char: 12}, lit: " "}, {tok: sql.FROM, pos: sql.Pos{Line: 0, Char: 13}, lit: ""}, {tok: sql.WS, pos: sql.Pos{Line: 0, Char: 17}, lit: " "}, {tok: sql.IDENT, pos: sql.Pos{Line: 0, Char: 18}, lit: "myseries"}, {tok: sql.WS, pos: sql.Pos{Line: 0, Char: 26}, lit: " "}, {tok: sql.WHERE, pos: sql.Pos{Line: 0, Char: 27}, lit: ""}, {tok: sql.WS, pos: sql.Pos{Line: 0, Char: 32}, lit: " "}, {tok: sql.IDENT, pos: sql.Pos{Line: 0, Char: 33}, lit: "a"}, {tok: sql.WS, pos: sql.Pos{Line: 0, Char: 34}, lit: " "}, {tok: sql.EQ, pos: sql.Pos{Line: 0, Char: 35}, lit: ""}, {tok: sql.WS, pos: sql.Pos{Line: 0, Char: 36}, lit: " "}, {tok: sql.STRING, pos: sql.Pos{Line: 0, Char: 36}, lit: "b"}, {tok: sql.EOF, pos: sql.Pos{Line: 0, Char: 40}, lit: ""}, } // Create a scanner. v := `SELECT value from myseries WHERE a = 'b'` s := sql.NewScanner(strings.NewReader(v)) // Continually scan until we reach the end. var act []result for { tok, pos, lit := s.Scan() act = append(act, result{tok, pos, lit}) if tok == sql.EOF { break } } // Verify the token counts match. if len(exp) != len(act) { t.Fatalf("token count mismatch: exp=%d, got=%d", len(exp), len(act)) } // Verify each token matches. for i := range exp { if !reflect.DeepEqual(exp[i], act[i]) { t.Fatalf("%d. token mismatch:\n\nexp=%#v\n\ngot=%#v", i, exp[i], act[i]) } } } // Ensure the library can correctly scan strings. func TestScanString(t *testing.T) { var tests = []struct { in string out string err string }{ {in: `""`, out: ``}, {in: `"foo bar"`, out: `foo bar`}, {in: `'foo bar'`, out: `foo bar`}, {in: `"foo\nbar"`, out: "foo\nbar"}, {in: `"foo\\bar"`, out: `foo\bar`}, {in: `"foo\"bar"`, out: `foo"bar`}, {in: `"foo` + "\n", out: `foo`, err: "bad string"}, // newline in string {in: `"foo`, out: `foo`, err: "bad string"}, // unclosed quotes {in: `"foo\xbar"`, out: `\x`, err: "bad escape"}, // invalid escape } for i, tt := range tests { out, err := sql.ScanString(strings.NewReader(tt.in)) if tt.err != errstring(err) { t.Errorf("%d. %s: error: exp=%s, got=%s", i, tt.in, tt.err, err) } else if tt.out != out { t.Errorf("%d. %s: out: exp=%s, got=%s", i, tt.in, tt.out, out) } } } // Test scanning regex func TestScanRegex(t *testing.T) { var tests = []struct { in string tok sql.Token lit string err string }{ {in: `/^payments\./`, tok: sql.REGEX, lit: `^payments\.`}, {in: `/foo\/bar/`, tok: sql.REGEX, lit: `foo/bar`}, {in: `/foo\\/bar/`, tok: sql.REGEX, lit: `foo\/bar`}, {in: `/foo\\bar/`, tok: sql.REGEX, lit: `foo\\bar`}, } for i, tt := range tests { s := sql.NewScanner(strings.NewReader(tt.in)) tok, _, lit := s.ScanRegex() if tok != tt.tok { t.Errorf("%d. %s: error:\n\texp=%s\n\tgot=%s\n", i, tt.in, tt.tok.String(), tok.String()) } if lit != tt.lit { t.Errorf("%d. %s: error:\n\texp=%s\n\tgot=%s\n", i, tt.in, tt.lit, lit) } } }
package util import ( "encoding/json" "log" "net/http" ) // Handler is a handler for a request to this service. Use MakeHTTPHandler to // wrap a Handler with the logic necessary to produce a handler which can be // registered with the "net/http" package. type Handler = func(ctx *Context) StatusError // MakeHTTPHandler wraps a Handler, producing a handler which can be registered // with the "net/http" package. The returned handler is responsible for: // - Constructing a *Context // - Converting any errors into an HTTP response func MakeHTTPHandler(handler func(ctx *Context) StatusError) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { // Add HSTS header. addHSTS(w) // Reject insecure HTTP requests. if err := checkHTTPS(r); err != nil { writeStatusError(w, r, err) return } ctx, err := NewContext(w, r) if err != nil { writeStatusError(w, r, err) return } if err := handler(&ctx); err != nil { writeStatusError(w, r, err) } } } func writeStatusError(w http.ResponseWriter, r *http.Request, err StatusError) { type response struct { Message string `json:"message"` } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.WriteHeader(err.HTTPStatusCode()) json.NewEncoder(w).Encode(response{Message: err.Message()}) log.Printf("[%v %v %v]: responding with error code %v and message \"%v\" (error: %v)", r.RemoteAddr, r.Method, r.URL, err.HTTPStatusCode(), err.Message(), err) }
package acmt import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document02100101 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:acmt.021.001.01 Document"` Message *AccountClosingAdditionalInformationRequestV01 `xml:"AcctClsgAddtlInfReq"` } func (d *Document02100101) AddMessage() *AccountClosingAdditionalInformationRequestV01 { d.Message = new(AccountClosingAdditionalInformationRequestV01) return d.Message } // Scope // The AccountClosingAdditionalInformationRequest message is sent from a financial institution to an organisation as part of the account closing process. // Usage // This message is sent in response to an closing request message from the organisation, if the business content is valid, but additional information is required. type AccountClosingAdditionalInformationRequestV01 struct { // Set of elements for the identification of the message and related references. References *iso20022.References3 `xml:"Refs"` // Identifier for an organisation. OrganisationIdentification []*iso20022.OrganisationIdentification6 `xml:"OrgId"` // Unique and unambiguous identification of the account between the account owner and the account servicer. AccountIdentification *iso20022.AccountForAction1 `xml:"AcctId"` // Unique and unambiguous identifier of a financial institution, as assigned under an internationally recognised or proprietary identification scheme. // AccountServicerIdentification *iso20022.BranchAndFinancialInstitutionIdentification4 `xml:"AcctSvcrId"` // Identification of the account to which the remaining positive balance of the account to be closed must be transferred or account from which funds can be moved to the account to be closed and which balance is negative. This account must be held in the same financial institution as the account to be closed if the transfer account is used to compensate a negative balance. For a positive balance to be transferred, an account in another financial institution might be used. In that case the account servicer is mandatory. BalanceTransferAccount *iso20022.AccountForAction1 `xml:"BalTrfAcct,omitempty"` // Unique and unambiguous identifier of a financial institution, as assigned under an internationally recognised or proprietary identification scheme, that is the servicer of the transfer account. TransferAccountServicerIdentification *iso20022.BranchAndFinancialInstitutionIdentification4 `xml:"TrfAcctSvcrId,omitempty"` // Contains the signature with its components, namely signed info, signature value, key info and the object. DigitalSignature []*iso20022.PartyAndSignature1 `xml:"DgtlSgntr,omitempty"` } func (a *AccountClosingAdditionalInformationRequestV01) AddReferences() *iso20022.References3 { a.References = new(iso20022.References3) return a.References } func (a *AccountClosingAdditionalInformationRequestV01) AddOrganisationIdentification() *iso20022.OrganisationIdentification6 { newValue := new(iso20022.OrganisationIdentification6) a.OrganisationIdentification = append(a.OrganisationIdentification, newValue) return newValue } func (a *AccountClosingAdditionalInformationRequestV01) AddAccountIdentification() *iso20022.AccountForAction1 { a.AccountIdentification = new(iso20022.AccountForAction1) return a.AccountIdentification } func (a *AccountClosingAdditionalInformationRequestV01) AddAccountServicerIdentification() *iso20022.BranchAndFinancialInstitutionIdentification4 { a.AccountServicerIdentification = new(iso20022.BranchAndFinancialInstitutionIdentification4) return a.AccountServicerIdentification } func (a *AccountClosingAdditionalInformationRequestV01) AddBalanceTransferAccount() *iso20022.AccountForAction1 { a.BalanceTransferAccount = new(iso20022.AccountForAction1) return a.BalanceTransferAccount } func (a *AccountClosingAdditionalInformationRequestV01) AddTransferAccountServicerIdentification() *iso20022.BranchAndFinancialInstitutionIdentification4 { a.TransferAccountServicerIdentification = new(iso20022.BranchAndFinancialInstitutionIdentification4) return a.TransferAccountServicerIdentification } func (a *AccountClosingAdditionalInformationRequestV01) AddDigitalSignature() *iso20022.PartyAndSignature1 { newValue := new(iso20022.PartyAndSignature1) a.DigitalSignature = append(a.DigitalSignature, newValue) return newValue }
package logger import ( "encoding/json" "fmt" "log" "log/syslog" "os" "strings" ) /** * Logger class that can be instantiated to start doing all of the * logging that's necessary. */ type LoLLogger struct { logger *syslog.Writer initialized bool } type LoLLogEvent struct { Priority syslog.Priority `json:"-"` Operation LoLOperation Outcome LoLOutcome Target uint64 Details string } type LoLOperation int type LoLOutcome int /** * A list of operations that are logging events. */ const ( FETCH_MATCH_HISTORY LoLOperation = iota FETCH_GAME_STATS LoLOperation = iota FETCH_NAME LoLOperation = iota ) /** * A list of possible logged outcomes for the above operations. */ const ( SUCCESS LoLOutcome = iota API_REQUEST_FAILURE LoLOutcome = iota API_RATE_LIMIT_EXCEEDED LoLOutcome = iota HTTP_CONNECTION_ERROR LoLOutcome = iota ) func (self *LoLLogger) Init() { if self.initialized { return } // The tag is the executable's name. exe_components := strings.Split(os.Args[0], "/") tag := exe_components[len(exe_components)-1] self.logger, _ = syslog.New(syslog.LOG_INFO|syslog.LOG_LOCAL0, tag) self.initialized = true log.Println("Logging service initialized.") } func (self *LoLLogger) Log(event LoLLogEvent) { self.Init() if event.Priority&syslog.LOG_INFO > 0 { event_str, _ := json.Marshal(event) self.logger.Info(fmt.Sprintf("[LOL] %s", event_str)) } else { log.Println("Unknown priority for log event.") } }
package fetcher import ( "bufio" "errors" "fmt" "golang.org/x/net/html/charset" "golang.org/x/text/encoding" "golang.org/x/text/transform" "io/ioutil" "net" "net/http" "time" ) var timeout = time.Duration(5 * time.Second) func dialTimeout(network, addr string) (net.Conn, error) { return net.DialTimeout(network, addr, timeout) } //通过url 获取数据 func Fetcher(url string) ([]byte, error) { t := http.Transport{ Dial: dialTimeout, MaxIdleConns: 10, IdleConnTimeout: timeout, ResponseHeaderTimeout: timeout} client := http.Client{Transport: &t} resp, err := client.Get(url) if err != nil { return nil, err } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { fmt.Println("http status:", resp.StatusCode) return nil, errors.New(fmt.Sprintln("http status:", resp.StatusCode)) } bufioRead := bufio.NewReader(resp.Body) e := determineEncoding(bufioRead) //文本格式转换 transformReader := transform.NewReader(bufioRead, e.NewDecoder()) //把reader转换为字节数组 bytes, err := ioutil.ReadAll(transformReader) return bytes, err } //判断html的编码格式 func determineEncoding(r *bufio.Reader) encoding.Encoding { //取出网页中1024个字节用来猜测字符编码 bytes, err := r.Peek(1024) if err != nil { fmt.Printf("guess decode failed,err:%s", err) } e, _, _ := charset.DetermineEncoding(bytes, "") return e }
package main import ( "fmt" "io" "io/ioutil" "os" ) type CountingWriterImpl struct { w io.Writer c int64 } func (c *CountingWriterImpl) Write (p []byte) (int, error) { c.w.Write(p) c.c += int64(len(p)) return len(p), nil } func CountingWriter(w io.Writer) (io.Writer, *int64) { cw := CountingWriterImpl{w: w, c: 0} return &cw, &(cw.c) } func main() { writer, count := CountingWriter(os.Stdout) for _, filename := range os.Args[1:] { data, err := ioutil.ReadFile(filename) if err != nil { fmt.Fprintf(os.Stderr, "error: %s\n", err) continue } writer.Write(data) fmt.Println(*count) } }
/** * ShortURL: Bijective conversion between natural numbers (IDs) and short strings * Licensed under the MIT License (https://opensource.org/licenses/MIT) * * ShortURL::encode() takes an ID and turns it into a short string * ShortURL::decode() takes a short string and turns it into an ID * * Features: * + large alphabet (51 chars) and thus very short resulting strings * + proof against offensive words (removed 'a', 'e', 'i', 'o' and 'u') * + unambiguous (removed 'I', 'l', '1', 'O' and '0') **/ package shorturl import ( "fmt" "strings" ) const ( // Alphabets is "set of allowed alphabets" Alphabets = "23456789bcdfghjkmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ-_" // Base is const size of alphabets string Base = len(Alphabets) ) //Reverse string assuming that its all runes. func Reverse(s string) string { runes := []rune(s) for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { runes[i], runes[j] = runes[j], runes[i] } return string(runes) } // Encode Given a generated number, get the URL back func Encode(n int) string { sb := strings.Builder{} for n > 0 { sb.WriteByte(Alphabets[n%Base]) n = n / Base } return Reverse(sb.String()) } // Decode Given a URL(path), the decoder decodes it to a unique number. func Decode(path string) (int, error) { n := 0 for _, c := range path { index := strings.IndexRune(Alphabets, c) if index < 0 { return 0, fmt.Errorf("Invalid character %c in input %s", c, path) } n = n*Base + index } return n, nil }
package main import ( "bufio" "fmt" "log" "os" "sync" ) func getBoxID(fname string, c chan<- string, wg *sync.WaitGroup) { file, err := os.Open(fname) if err != nil { log.Fatal(err) } defer file.Close() scanner := bufio.NewScanner(file) for scanner.Scan() { c <- scanner.Text() } if err := scanner.Err(); err != nil { log.Fatal(err) } wg.Done() } func countRunes(s string) map[rune]int { countMap := map[rune]int{} for _, char := range s { countMap[char]++ } return countMap } func checkSum(c <-chan string, res chan<- int) { two := 0 three := 0 countVals := func(cMap map[rune]int) { takenVals := map[int]bool{} for _, val := range cMap { if val == 2 && !takenVals[val] { two++ takenVals[val] = true } else if val == 3 { three++ takenVals[val] = true } } } for boxID := range c { cMap := countRunes(boxID) countVals(cMap) } res <- two * three } func main() { var wg sync.WaitGroup c := make(chan string) res := make(chan int) defer close(res) wg.Add(1) go getBoxID("./input", c, &wg) go checkSum(c, res) wg.Wait() close(c) result := <-res fmt.Println("Checksum: ", result) }
package routes import( book "restfulalta/part-4-middleware/controllers/book" "restfulalta/part-4-middleware/middlewares" ) func registerBookRoutes() { e.GET("/books", book.GetBooksController) e.GET("/books/:id", book.GetBookByIdController) e.POST("/books", book.AddBookController, middlewares.AuthenticateUser) e.PUT("/books/:id", book.EditBookController, middlewares.AuthenticateUser) e.DELETE("/books/:id", book.DeleteBookController, middlewares.AuthenticateUser) }
package postgres import ( "sync" "github.com/frk/gosql/internal/analysis" "github.com/frk/gosql/internal/postgres/oid" ) //////////////////////////////////////////////////////////////////////////////// // Result Types // type ( // FieldWrite holds the information needed by the generator to produce the // expression nodes that constitute a field-to-column write operation. FieldWrite struct { // Info on the field from which the column will be written. Field *analysis.FieldInfo // The column to which the data will be written. Column *Column // The column identifier. ColIdent analysis.ColIdent // The name of the valuer to be used for writing the column, or empty. Valuer string } // FieldRead holds the information needed by the generator to produce the // expression nodes that constitute a field-from-column read operation. FieldRead struct { // Info on the field into which the column will be read. Field *analysis.FieldInfo // The column from which the data will be read. Column *Column // The column identifier. ColIdent analysis.ColIdent // The name of the scanner to be used for reading the column, or empty. Scanner string } // FieldFilter ... FieldFilter struct { // Info on the field that holds the value to be used as the filter parameter. Field *analysis.FieldInfo // The column which to filter by. Column *Column // The column identifier. ColIdent analysis.ColIdent // The name of the valuer to be used for converting the field value. Valuer string } // Boolean Boolean struct { Value analysis.Boolean } // FieldConditional FieldConditional struct { // Name of the field that holds the value to be used in the conditional. FieldName string // Type info of the field that holds the value to be used in the conditional. FieldType analysis.TypeInfo // The identifier of the column to be used in the conditional. ColIdent analysis.ColIdent // The column to be used in the conditional. Column *Column // The type of the predicate, or 0. Predicate analysis.Predicate // The predicate quantifier, or 0. Quantifier analysis.Quantifier // Name of the modifier function, or empty. FuncName analysis.FuncName // Name of the valuer to be employed, or empty. Valuer string } // ColumnConditional holds the information needed by the generator // to produce a column-specific SQL boolean expression. ColumnConditional struct { // Left hand side column id. LHSColIdent analysis.ColIdent // Left hand side column. LHSColumn *Column // Right hand side column id, or empty. RHSColIdent analysis.ColIdent // Right hand side column, or nil. RHSColumn *Column // Right hand side literal expression, or empty. RHSLiteral string // Type of the right hand side column or literal expression. RHSType *Type // The type of the predicate, or 0. Predicate analysis.Predicate // The predicate quantifier, or 0. Quantifier analysis.Quantifier } // BetweenConditional BetweenConditional struct { // The name of the field containing the "between" info. FieldName string // The id of the predicand column. ColIdent analysis.ColIdent // The primary column predicand. Column *Column // The type of the between predicate. Predicate analysis.Predicate // The lower-bound range predicand. LowerBound RangeBound // The upper-bound range predicand. UpperBound RangeBound } // NestedConditional NestedConditional struct { FieldName string Conditionals []WhereConditional } ConflictInfo struct { Target ConflictTarget Update []*Column } ConflictIndex struct { // The index predicate. Predicate string // The index expression. Expression string } ConflictConstraint struct { // The name of the constraint. Name string } WhereConditional interface{ whereConditional() } RangeBound interface{ rangeBound() } ConflictTarget interface{ conflictTarget() } TableJoinConditional interface { tableJoinConditional() whereConditional() } ) //////////////////////////////////////////////////////////////////////////////// // PostgreSQL Catalog // type ( // Catalog holds information on various objects of the database. Catalog struct { // populated by loadCatalog Types map[oid.OID]*Type Operators map[OpKey]*Operator Casts map[CastKey]*Cast Procs map[string][]*Proc // populated by loadRelation Relations map[analysis.RelIdent]*Relation // sync.RWMutex is necessary to be used only for the Relations map, // the rest is read-only once initialized. sync.RWMutex } // helper type used to map an Operator value OpKey struct { Name string Left oid.OID Right oid.OID } // helper type used to map a Cast value CastKey struct { Target oid.OID Source oid.OID } ) //////////////////////////////////////////////////////////////////////////////// // PostgreSQL Catalog Objects // type ( // Relation holds the info of a "pg_class" entry that represents // a table, view, or materialized view. Relation struct { // The object identifier of the relation. OID oid.OID // The name of the relation. Name string // The name of the schema to which the relation belongs. Schema string // The relation's kind, we're only interested in r, v, and m. RelKind RelKind // List of columns associated with the relation. Columns []*Column // List of constraints applied to the relation. Constraints []*Constraint // List of indexes applied to the relation. Indexes []*Index } // Column holds the info of a "pg_attribute" entry that represents // a column of a relation. Column struct { // The number of the column. Ordinary columns are numbered from 1 up. Num int16 // The name of the member's column. Name string // Records type-specific data supplied at table creation time (for example, // the maximum length of a varchar column). It is passed to type-specific // input functions and length coercion functions. The value will generally // be -1 for types that do not need. // // NOTE(mkopriva): to get the actual value subtract 4. // NOTE(mkopriva): in the case of NUMERIC(precision, scale) types, to // calculate the precision use ((typmod - 4) >> 16) & 65535 and to // calculate the scale use (typmod - 4) && 65535 TypeMod int // Indicates whether or not the column has a NOT NULL constraint. // NOTE(mkopriva): this is ambiguous if the column's relation is a view. HasNotNull bool // Indicates whether or not the column has a DEFAULT value. HasDefault bool // Reports whether or not the column is a primary key. IsPrimary bool // The number of dimensions if the column is an array type, otherwise 0. NumDims int // The OID of the column's type. TypeOID oid.OID // Info about the column's type. Type *Type // The Relation to which the Column belongs. Relation *Relation } // Type holds the info of a "pg_type" entry that represents a column's data type. Type struct { // The object identifier of the type. OID oid.OID // The name of the type. Name string // The formatted name of the type. NameFmt string // The number of bytes for fixed-size types, negative for variable length types. Length int // The type's type. Type TypeType // An arbitrary classification of data types that is used by the parser // to determine which implicit casts should be "preferred". Category TypeCategory // True if the type is a preferred cast target within its category. IsPreferred bool // If this is an array type then elem identifies the element type // of that array type. Elem oid.OID } // Index holds the info of a "pg_index" entry that represents a table's index. Index struct { // The object identifier of the index. OID oid.OID // The name of the index. Name string // The total number of columns in the index; this number includes // both key and included attributes. NumAtts int // If true, this is a unique index. IsUnique bool // If true, this index represents the primary key of the table. IsPrimary bool // If true, this index supports an exclusion constraint. IsExclusion bool // If true, the uniqueness check is enforced immediately on insertion. IsImmediate bool // If true, the index is currently ready for inserts. False means the // index must be ignored by INSERT/UPDATE operations. IsReady bool // This is an array of values that indicate which table columns this index // indexes. For example a value of 1 3 would mean that the first // and the third table columns make up the index entries. Key columns come // before non-key (included) columns. A zero in this array indicates that // the corresponding index attribute is an expression over the table columns, // rather than a simple column reference. Key []int16 // The index definition. Definition string // The index predicate (optional). Predicate string // Parsed index expression. Expression string } // Constraint holds the info of a "pg_constraint" entry that represents // a constraint's on a table. Constraint struct { // The object identifier of the constraint. OID oid.OID // Constraint name (not necessarily unique!) Name string // The type of the constraint Type ConstraintType // Indicates whether or not the constraint is deferrable IsDeferrable bool // Indicates whether or not the constraint is deferred by default IsDeferred bool // If a table constraint (including foreign keys, but not constraint triggers), // list of the constrained columns Key []int64 // If a foreign key, list of the referenced columns FKey []int64 } // Operator holds info on a "pg_operator" entry. Operator struct { // The object identifier of the operator. OID oid.OID // The name of the operator. Name string // The kind (infix, prefix, or postfix) of the operator. Kind string // The type oid of the left operand. Left oid.OID // The type oid of the right operand. Right oid.OID // The type oid of the result. Result oid.OID } // Cast holds info on a "pg_cast" entry. Cast struct { // The object identifier of the cast. OID oid.OID // The oid of the source data type. Source oid.OID // The oid of the target data type. Target oid.OID // The context in which the cast can be invoked. Context CastContext } // Proc holds info on a "pg_proc" entry. // Current support is limited to functions with 1 input argument // and 1 return value, hence ArgType & RetType are single OIDs. Proc struct { // The object identifier of the procedure. OID oid.OID // The name of the function. Name string // The type oid of the function's input argument. ArgType oid.OID // The type oid of the function's return value. RetType oid.OID // Indicates whether or not the function is an aggregate function. IsAgg bool } ) func (w FieldWrite) NeedsNULLIF() bool { return w.Column.IsNULLable() && w.Field.Type.Kind != analysis.TypeKindPtr && !w.Field.Type.ImplementsValuer() && w.Valuer == "" } func (r FieldRead) NeedsCOALESCE() bool { if r.Field.UseCoalesce { return true } return r.Column.IsNULLable() && r.Field.Type.Kind != analysis.TypeKindPtr && !r.Field.Type.ImplementsScanner() && r.Scanner == "" } func (t Type) is(oids ...oid.OID) bool { for _, id := range oids { if t.OID == id { return true } } return false } func (t Type) ZeroValueLiteral() (lit string, ok bool) { lit, ok = oid.TypeToZeroValue[t.OID] return lit, ok } func (t *Type) GetNameFmt() string { if t == nil { return "<nil>" } return t.NameFmt } func (c *Column) IsNULLable() bool { return c.Relation != nil && c.Relation.RelKind == RelKindOrdinaryTable && c.HasNotNull == false } // TableJoinConditional implementations func (*Boolean) tableJoinConditional() {} func (*ColumnConditional) tableJoinConditional() {} // WhereConditional implementations func (*Boolean) whereConditional() {} func (*FieldConditional) whereConditional() {} func (*ColumnConditional) whereConditional() {} func (*BetweenConditional) whereConditional() {} func (*NestedConditional) whereConditional() {} // RangeBound implementations func (*FieldConditional) rangeBound() {} func (*ColumnConditional) rangeBound() {} // ConflictTarget implementations func (*ConflictIndex) conflictTarget() {} func (*ConflictConstraint) conflictTarget() {}
package main import "fmt" func main() { start := 65 end := 90 for i := start; i <= end; i++ { fmt.Printf("%d\n", i) for j := 0; j < 3; j++ { fmt.Printf("\t%#U\n", i) } } }
package utils import ( "github.com/astaxie/beegae" "fareastdominions.com/evepaste/eve/entity" ) type RefineItem struct { Outputs []RefineOutput RefineCategory string } type RefineOutput struct { TypeId int32 Quantity float64 } var REFINE_TABLE = map[int32]RefineItem{ // Veldspar 1230: RefineItem{ []RefineOutput{ RefineOutput{34, 4.15}, }, "hs", }, // Concentrated Veldspar 17470: RefineItem{ []RefineOutput{ RefineOutput{34, 4.3575}, }, "hs", }, // Dense Veldspar 17471: RefineItem{ []RefineOutput{ RefineOutput{34, 4.565}, }, "hs", }, // Scordite 1228: RefineItem{ []RefineOutput{ RefineOutput{34, 3.46}, RefineOutput{35, 1.73}, }, "hs", }, // Condensed Scordite 17463: RefineItem{ []RefineOutput{ RefineOutput{34, 3.633}, RefineOutput{35, 1.8165}, }, "hs", }, // Massive Scordite 17464: RefineItem{ []RefineOutput{ RefineOutput{34, 3.806}, RefineOutput{35, 1.903}, }, "hs", }, // Plagioclase 18: RefineItem{ []RefineOutput{ RefineOutput{34, 1.07}, RefineOutput{35, 2.13}, RefineOutput{36, 1.07}, }, "hs", }, // Azure Plagioclase 17455: RefineItem{ []RefineOutput{ RefineOutput{34, 1.1235}, RefineOutput{35, 2.2365}, RefineOutput{36, 1.1235}, }, "hs", }, // Rich Plagioclase 17456: RefineItem{ []RefineOutput{ RefineOutput{34, 1.177}, RefineOutput{35, 2.343}, RefineOutput{36, 1.177}, }, "hs", }, // Pyroxeres 1224: RefineItem{ []RefineOutput{ RefineOutput{34, 3.51}, RefineOutput{35, 0.25}, RefineOutput{36, 0.5}, RefineOutput{38, 0.05}, }, "hs", }, // Solid Pyroxeres 17459: RefineItem{ []RefineOutput{ RefineOutput{34, 3.6855}, RefineOutput{35, 0.2625}, RefineOutput{36, 0.525}, RefineOutput{38, 0.0525}, }, "hs", }, // Viscous Pyroxeres 17460: RefineItem{ []RefineOutput{ RefineOutput{34, 3.861}, RefineOutput{35, 0.275}, RefineOutput{36, 0.55}, RefineOutput{38, 0.055}, }, "hs", }, // Omber 1227: RefineItem{ []RefineOutput{ RefineOutput{34, 8.0}, RefineOutput{35, 1.0}, RefineOutput{37, 0.85}, }, "hs", }, // Silvery Omber 17867: RefineItem{ []RefineOutput{ RefineOutput{34, 8.4}, RefineOutput{35, 1.05}, RefineOutput{37, 0.8925}, }, "hs", }, // Golden Omber 17868: RefineItem{ []RefineOutput{ RefineOutput{34, 8.8}, RefineOutput{35, 1.1}, RefineOutput{37, 0.935}, }, "hs", }, // Jaspet 1226: RefineItem{ []RefineOutput{ RefineOutput{36, 3.5}, RefineOutput{38, 0.75}, RefineOutput{39, 0.08}, }, "lns", }, // Pure Jaspet 17448: RefineItem{ []RefineOutput{ RefineOutput{36, 3.675}, RefineOutput{38, 0.7875}, RefineOutput{39, 0.084}, }, "lns", }, // Pristine Jaspet 17449: RefineItem{ []RefineOutput{ RefineOutput{36, 3.85}, RefineOutput{38, 0.825}, RefineOutput{39, 0.088}, }, "lns", }, // Hemorphite 1231: RefineItem{ []RefineOutput{ RefineOutput{34, 22.0}, RefineOutput{37, 1.0}, RefineOutput{38, 1.2}, RefineOutput{39, 0.15}, }, "lns", }, // Vivid Hemorphite 17444: RefineItem{ []RefineOutput{ RefineOutput{34, 23.1}, RefineOutput{37, 1.05}, RefineOutput{38, 1.26}, RefineOutput{39, 0.1575}, }, "lns", }, // Radiant Hemorphite 17445: RefineItem{ []RefineOutput{ RefineOutput{34, 24.2}, RefineOutput{37, 1.1}, RefineOutput{38, 1.32}, RefineOutput{39, 0.165}, }, "lns", }, // Hedbergite 21: RefineItem{ []RefineOutput{ RefineOutput{35, 10.0}, RefineOutput{37, 2.0}, RefineOutput{38, 1.0}, RefineOutput{39, 0.19}, }, "lns", }, // Vitric Hedbergite 17440: RefineItem{ []RefineOutput{ RefineOutput{35, 10.5}, RefineOutput{37, 2.1}, RefineOutput{38, 1.05}, RefineOutput{39, 0.1995}, }, "lns", }, // Glazed Hedbergite 17441: RefineItem{ []RefineOutput{ RefineOutput{35, 11.0}, RefineOutput{37, 2.2}, RefineOutput{38, 1.1}, RefineOutput{39, 0.209}, }, "lns", }, // Gneiss 1229: RefineItem{ []RefineOutput{ RefineOutput{35, 22.0}, RefineOutput{36, 24.0}, RefineOutput{37, 3.0}, }, "lns", }, // Iridescent Gneiss 17865: RefineItem{ []RefineOutput{ RefineOutput{35, 23.1}, RefineOutput{36, 25.2}, RefineOutput{37, 3.15}, }, "lns", }, // Prismatic Gneiss 17866: RefineItem{ []RefineOutput{ RefineOutput{35, 24.2}, RefineOutput{36, 26.4}, RefineOutput{37, 3.3}, }, "lns", }, // Dark Ochre 1232: RefineItem{ []RefineOutput{ RefineOutput{34, 100.0}, RefineOutput{37, 16.0}, RefineOutput{38, 1.2}, }, "lns", }, // Onyx Ochre 17436: RefineItem{ []RefineOutput{ RefineOutput{34, 105.0}, RefineOutput{37, 16.8}, RefineOutput{38, 1.26}, }, "lns", }, // Obsidian Ochre 17437: RefineItem{ []RefineOutput{ RefineOutput{34, 110.0}, RefineOutput{37, 17.6}, RefineOutput{38, 1.32}, }, "lns", }, // Crokite 1225: RefineItem{ []RefineOutput{ RefineOutput{34, 210.0}, RefineOutput{38, 7.6}, RefineOutput{39, 1.35}, }, "lns", }, // Sharp Crokite 17432: RefineItem{ []RefineOutput{ RefineOutput{34, 220.5}, RefineOutput{38, 7.98}, RefineOutput{39, 1.4175}, }, "lns", }, // Crystalline Crokite 17433: RefineItem{ []RefineOutput{ RefineOutput{34, 231.0}, RefineOutput{38, 8.36}, RefineOutput{39, 1.485}, }, "lns", }, // Spodumain 19: RefineItem{ []RefineOutput{ RefineOutput{34, 560.0}, RefineOutput{35, 120.5}, RefineOutput{36, 21.0}, RefineOutput{37, 4.5}, }, "lns", }, // Bright Spodumain 17466: RefineItem{ []RefineOutput{ RefineOutput{34, 588.0}, RefineOutput{35, 126.525}, RefineOutput{36, 22.05}, RefineOutput{37, 4.725}, }, "lns", }, // Gleaming Spodumain 17467: RefineItem{ []RefineOutput{ RefineOutput{34, 616.0}, RefineOutput{35, 132.55}, RefineOutput{36, 23.1}, RefineOutput{37, 4.95}, }, "lns", }, // Bistot 1223: RefineItem{ []RefineOutput{ RefineOutput{35, 120.0}, RefineOutput{39, 4.5}, RefineOutput{40, 1.0}, }, "lns", }, // Triclinic Bistot 17428: RefineItem{ []RefineOutput{ RefineOutput{35, 126.0}, RefineOutput{39, 4.725}, RefineOutput{40, 1.05}, }, "lns", }, // Monoclinic Bistot 17429: RefineItem{ []RefineOutput{ RefineOutput{35, 132.0}, RefineOutput{39, 4.95}, RefineOutput{40, 1.1}, }, "lns", }, // Arkonor 22: RefineItem{ []RefineOutput{ RefineOutput{34, 220.0}, RefineOutput{36, 25.0}, RefineOutput{40, 3.2}, }, "lns", }, // Crimson Arkonor 17425: RefineItem{ []RefineOutput{ RefineOutput{34, 231.0}, RefineOutput{36, 26.25}, RefineOutput{40, 3.36}, }, "lns", }, // Prime Arkonor 17426: RefineItem{ []RefineOutput{ RefineOutput{34, 242.0}, RefineOutput{36, 27.5}, RefineOutput{40, 3.52}, }, "lns", }, // Mercoxit 11396: RefineItem{ []RefineOutput{ RefineOutput{11399, 3.0}, }, "lns", }, // Mamga Mercoxit 17869: RefineItem{ []RefineOutput{ RefineOutput{11399, 3.15}, }, "lns", }, // Vitreous Mercoxit 17870: RefineItem{ []RefineOutput{ RefineOutput{11399, 3.3}, }, "lns", }, // Kernite 20: RefineItem{ []RefineOutput{ RefineOutput{34, 1.34}, RefineOutput{36, 2.67}, RefineOutput{37, 1.34}, }, "hs", }, // Luminous Kernite 17452: RefineItem{ []RefineOutput{ RefineOutput{34, 1.407}, RefineOutput{36, 2.8035}, RefineOutput{37, 1.407}, }, "hs", }, // Fiery Kernite 17453: RefineItem{ []RefineOutput{ RefineOutput{34, 1.474}, RefineOutput{36, 2.937}, RefineOutput{37, 1.474}, }, "hs", }, } /** // minerals mineralIds := []int64{ 34, // Tritanium 35, // Pyerite 36, // Mexallon 37, // Isogen 38, // Nocxium 39, // Zydrine 40, // Megacyte 11399, // Morphite } */ func Refine(typeId int32) []RefineOutput { if out, ok := REFINE_TABLE[typeId]; ok { return out.Outputs } return []RefineOutput{} } func RefineIds(typeId int32) []int32 { result := make([]int32, 0) if out, ok := REFINE_TABLE[typeId]; ok { for _, o := range out.Outputs { result = append(result, o.TypeId) } } return result } func RefineQuantity(typeId int32, mineralId int32) float64 { if out, ok := REFINE_TABLE[typeId]; ok { for _, o := range out.Outputs { if o.TypeId == mineralId { return o.Quantity * GetRefineEff(out.RefineCategory) } } } return 0 } func RefineItems(items []entity.Item) []entity.Item { mineralMap := make(map[int32]entity.Item) for _, item := range items { if out, ok := REFINE_TABLE[item.TypeId]; ok { for _, o := range out.Outputs { var m entity.Item if m, ok = mineralMap[o.TypeId]; !ok { m = entity.Item{ TypeId: o.TypeId, Quantity: 0, } } m.Quantity += int(float64(item.Quantity) * o.Quantity * GetRefineEff(out.RefineCategory)) mineralMap[o.TypeId] = m } } } results := make([]entity.Item, 0) for _, r := range mineralMap { results = append(results, r) } return results } func GetRefineEff(category string) float64 { return beegae.AppConfig.DefaultFloat("refine::" + category, 0.5) }
package security import ( "encoding/json" "github.com/dintel/budget-backend/util" "io/ioutil" "log" ) type Processor struct { dataDir string users map[string]User permits Permits done chan bool RequestCh chan Request ResultCh chan Result } func loadUsers(dataDir string) map[string]User { result := make(map[string]User) users := make([]User, 0) data, err := ioutil.ReadFile(dataDir + "/users.json") if err != nil { ioutil.WriteFile(dataDir+"/users.json", []byte("[]"), 0644) data = []byte("[]") } err = json.Unmarshal(data, &users) util.CheckErr(err) for _, user := range users { result[user.Login] = user } return result } func loadPermits(dataDir string) Permits { result := make(Permits) data, err := ioutil.ReadFile(dataDir + "/permits.json") if err != nil { ioutil.WriteFile(dataDir+"/permits.json", []byte("{}"), 0644) data = []byte("{}") } err = json.Unmarshal(data, &result) util.CheckErr(err) return result } func (p *Processor) reload() { log.Println("Loading permits") p.permits = loadPermits(p.dataDir) log.Printf("Loaded %d permits", len(p.permits)) log.Println("Loading users") p.users = loadUsers(p.dataDir) log.Printf("Loaded %d users", len(p.users)) } func NewProcessor(dataDir string) *Processor { result := new(Processor) result.dataDir = dataDir result.done = make(chan bool) result.RequestCh = make(chan Request) result.ResultCh = make(chan Result) return result } func (p *Processor) Start() { go p.Run() } func (p *Processor) Run() { log.Println("[Security goroutine] start") p.reload() for { log.Println("[Security goroutine] waiting for requests") r := <-p.RequestCh log.Println("[Security goroutine] got request -", r) switch r.Type { case REQUEST_TYPE_SHUTDOWN: log.Println("[Security goroutine] shutting down") p.done <- true return default: log.Printf("[Security goroutine] skipping request with type %s", r.Type) } } } func (p *Processor) Shutdown() { p.RequestCh <- Request{Type: REQUEST_TYPE_SHUTDOWN} <-p.done }
package multiline_test import ( "fmt" "math/rand" "time" "github.com/byounghoonkim/multiline" ) func ExampleMultiLine() { for i := 0; i < 10; i++ { line := multiline.GetLine(fmt.Sprintf("%d job - ", i)) go func(line *multiline.Line) { defer line.Close() fmt.Fprint(line, "🚚 Preparing ...") time.Sleep(time.Duration(rand.Intn(3000)) * time.Millisecond) for j := 0; j < 10; j++ { fmt.Fprintf(line, "⛏️ %s", msgList[rand.Intn(len(msgList))]) time.Sleep(time.Duration(rand.Intn(500)) * time.Millisecond) } fmt.Fprint(line, "✅ DONE") }(line) } multiline.Print() }
package main import ( "fmt" "os" "log" "net/http" "github.com/alehano/gobootstrap/sys/cmd" "github.com/go-chi/chi" "github.com/go-chi/chi/middleware" "github.com/alehano/gobootstrap/config" "github.com/spf13/cobra" "github.com/alehano/gobootstrap/sys/urls" _ "github.com/alehano/gobootstrap/models" _ "github.com/alehano/gobootstrap/sys/log" // Import all views to enable them _ "github.com/alehano/gobootstrap/views/common" _ "github.com/alehano/gobootstrap/views/admin" _ "github.com/alehano/gobootstrap/views/home" ) func main() { cmd.RootCmd.AddCommand(&cobra.Command{ Use: "run_server", Short: "Start Application Web Server", Long: fmt.Sprintf("Start Application Web Server. You have to set config file "+ "either by set %q enviroment variable with full path or place %q "+ "into a project working directory.", config.ENVName, config.Filename), Run: func(cmd *cobra.Command, args []string) { log.Printf("Server running on :%d\n", config.Get().Port) runServer() }, }) if err := cmd.RootCmd.Execute(); err != nil { log.Println(err) os.Exit(1) } } func runServer() { r := chi.NewRouter() r.Use(middleware.Logger) r.Use(middleware.Recoverer) r.Use(middleware.RedirectSlashes) r.Use(middleware.GetHead) urls.AddAll(r) http.ListenAndServe(fmt.Sprintf(":%d", config.Get().Port), r) }
package gcp import ( "context" "fmt" "os" "path/filepath" "strings" "sync" "github.com/AlecAivazis/survey/v2" "github.com/pkg/errors" "github.com/sirupsen/logrus" googleoauth "golang.org/x/oauth2/google" compute "google.golang.org/api/compute/v1" ) var ( authEnvs = []string{"GOOGLE_CREDENTIALS", "GOOGLE_CLOUD_KEYFILE_JSON", "GCLOUD_KEYFILE_JSON"} defaultAuthFilePath = filepath.Join(os.Getenv("HOME"), ".gcp", "osServiceAccount.json") credLoaders = []credLoader{} onceLoggers = map[credLoader]*sync.Once{} ) // Session is an object representing session for GCP API. type Session struct { Credentials *googleoauth.Credentials } // GetSession returns a GCP session by using credentials found in default locations in order: // env GOOGLE_CREDENTIALS, // env GOOGLE_CLOUD_KEYFILE_JSON, // env GCLOUD_KEYFILE_JSON, // file ~/.gcp/osServiceAccount.json, and // gcloud cli defaults // and, if no creds are found, asks for them and stores them on disk in a config file func GetSession(ctx context.Context) (*Session, error) { creds, err := loadCredentials(ctx) if err != nil { return nil, errors.Wrap(err, "failed to load credentials") } return &Session{ Credentials: creds, }, nil } func loadCredentials(ctx context.Context) (*googleoauth.Credentials, error) { if len(credLoaders) == 0 { for _, authEnv := range authEnvs { credLoaders = append(credLoaders, &envLoader{env: authEnv}) } credLoaders = append(credLoaders, &fileLoader{path: defaultAuthFilePath}) credLoaders = append(credLoaders, &cliLoader{}) for _, credLoader := range credLoaders { onceLoggers[credLoader] = new(sync.Once) } } for _, loader := range credLoaders { creds, err := loader.Load(ctx) if err != nil { continue } onceLoggers[loader].Do(func() { logrus.Infof("Credentials loaded from %s", loader) }) return creds, nil } return getCredentials(ctx) } func getCredentials(ctx context.Context) (*googleoauth.Credentials, error) { creds, err := (&userLoader{}).Load(ctx) if err != nil { return nil, err } filePath := defaultAuthFilePath logrus.Infof("Saving the credentials to %q", filePath) if err := os.MkdirAll(filepath.Dir(filePath), 0700); err != nil { return nil, err } if err := os.WriteFile(filePath, creds.JSON, 0o600); err != nil { return nil, err } return creds, nil } type credLoader interface { Load(context.Context) (*googleoauth.Credentials, error) } type envLoader struct { env string delegate credLoader } func (e *envLoader) Load(ctx context.Context) (*googleoauth.Credentials, error) { if val := os.Getenv(e.env); len(val) > 0 { e.delegate = &fileOrContentLoader{pathOrContent: val} return e.delegate.Load(ctx) } return nil, errors.New("empty environment variable") } func (e *envLoader) String() string { path := []string{ fmt.Sprintf("environment variable %q", e.env), } if e.delegate != nil { path = append(path, fmt.Sprintf("%s", e.delegate)) } return strings.Join(path, ", ") } type fileOrContentLoader struct { pathOrContent string delegate credLoader } func (fc *fileOrContentLoader) Load(ctx context.Context) (*googleoauth.Credentials, error) { // if this is a path and we can stat it, assume it's ok if _, err := os.Stat(fc.pathOrContent); err == nil { fc.delegate = &fileLoader{path: fc.pathOrContent} } else { fc.delegate = &contentLoader{content: fc.pathOrContent} } return fc.delegate.Load(ctx) } func (fc *fileOrContentLoader) String() string { if fc.delegate != nil { return fmt.Sprintf("%s", fc.delegate) } return "file or content" } type fileLoader struct { path string } func (f *fileLoader) Load(ctx context.Context) (*googleoauth.Credentials, error) { content, err := os.ReadFile(f.path) if err != nil { return nil, err } return (&contentLoader{content: string(content)}).Load(ctx) } func (f *fileLoader) String() string { return fmt.Sprintf("file %q", f.path) } type contentLoader struct { content string } func (f *contentLoader) Load(ctx context.Context) (*googleoauth.Credentials, error) { return googleoauth.CredentialsFromJSON(ctx, []byte(f.content), compute.CloudPlatformScope) } func (f *contentLoader) String() string { return "content <redacted>" } type cliLoader struct{} func (c *cliLoader) Load(ctx context.Context) (*googleoauth.Credentials, error) { return googleoauth.FindDefaultCredentials(ctx, compute.CloudPlatformScope) } func (c *cliLoader) String() string { return "gcloud CLI defaults" } type userLoader struct{} func (u *userLoader) Load(ctx context.Context) (*googleoauth.Credentials, error) { var content string err := survey.Ask([]*survey.Question{ { Prompt: &survey.Multiline{ Message: "Service Account (absolute path to file or JSON content)", // Due to a bug in survey pkg, help message is not rendered Help: "The location to file that contains the service account in JSON, or the service account in JSON format", }, }, }, &content) if err != nil { return nil, err } content = strings.TrimSpace(content) return (&fileOrContentLoader{pathOrContent: content}).Load(ctx) }
package models import ( "fmt" "testing" "github.com/c2h5oh/datasize" ) func TestUsage(t *testing.T) { var bm = NewUsageManager(newTestDB(t, &Usage{})) type args struct { username string tier DataUsageTier testUploadSize uint64 } tests := []struct { name string args args wantErr bool }{ {"Free", args{"free", Free, datasize.GB.Bytes()}, false}, {"Partner", args{"partner", Partner, datasize.GB.Bytes() * 10}, false}, {"Paid", args{"paid", Paid, datasize.GB.Bytes() * 100}, false}, {"Fail", args{"fail", Free, 1}, true}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { var ( usage *Usage err error ) if !tt.wantErr { // test create usage usage, err = bm.NewUsageEntry(tt.args.username, tt.args.tier) if (err != nil) != tt.wantErr { t.Fatalf("NewUsage() err = %v, wantErr %v", err, tt.wantErr) } defer bm.DB.Unscoped().Delete(usage) } // test find by username if _, err := bm.FindByUserName(tt.args.username); (err != nil) != tt.wantErr { t.Fatalf("FindByUserName() err = %v, wantErr %v", err, tt.wantErr) } // test get upload price if price, err := bm.GetUploadPricePerGB(tt.args.username); (err != nil) != tt.wantErr { t.Fatalf("GetUploadPricePerGB() err = %v, wantErr %v", err, tt.wantErr) } else if !tt.wantErr && price != usage.Tier.PricePerGB() { t.Fatal("failed to get correct price per gb") } // test ipns publish check if err := bm.CanPublishIPNS(tt.args.username); (err != nil) != tt.wantErr { t.Fatalf("CanPublishIPNS() err = %v, wantErr %v", err, tt.wantErr) } // test ipns publish check if err := bm.CanPublishPubSub(tt.args.username); (err != nil) != tt.wantErr { t.Fatalf("CanPublishPubSub() err = %v, wantErr %v", err, tt.wantErr) } if err := bm.CanCreateKey(tt.args.username); (err != nil) != tt.wantErr { t.Fatalf("CanCreateKey() err = %v, wantErr %v", err, tt.wantErr) } // test update data usage if err := bm.UpdateDataUsage(tt.args.username, tt.args.testUploadSize); (err != nil) != tt.wantErr { t.Fatalf("UpdateDataUsage() err = %v, wantErr %v", err, tt.wantErr) } // test update tiers for all tier types // an account may never enter free status once exiting tiers := []DataUsageTier{Paid, Partner} for _, tier := range tiers { if err := bm.UpdateTier(tt.args.username, tier); (err != nil) != tt.wantErr { t.Fatalf("UpdateTier() err = %v, wantErr %v", err, tt.wantErr) } } // test that the light tier was upgraded if tt.name == "Paid" && !tt.wantErr { // validate that the tier was upgraded usage, err = bm.FindByUserName(tt.args.username) if err != nil { t.Fatal(err) } if usage.Tier != Partner { t.Fatal("failed to correctly set usage tier") } } // test pubsub increment if err := bm.IncrementPubSubUsage(tt.args.username, 5); (err != nil) != tt.wantErr { t.Fatalf("IncrementPubSubUsage() err = %v, wantErr %v", err, tt.wantErr) } // if no error is expected, validate the pubsub count if !tt.wantErr { usage, err := bm.FindByUserName(tt.args.username) if err != nil { t.Fatal(err) } if usage.PubSubMessagesSent != 5 { t.Fatal("failed to count pubsub usage") } } // test ipns increment if err := bm.IncrementIPNSUsage(tt.args.username, 5); (err != nil) != tt.wantErr { t.Fatalf("IncrementIPNSUsage() err = %v, wantErr %v", err, tt.wantErr) } // if no error is expected, validate the ipns count if !tt.wantErr { usage, err := bm.FindByUserName(tt.args.username) if err != nil { t.Fatal(err) } if usage.IPNSRecordsPublished != 5 { t.Fatal("failed to count ipns usage") } } if !tt.wantErr { if err := bm.ResetCounts(tt.args.username); err != nil { t.Fatal(err) } usage, err := bm.FindByUserName(tt.args.username) if err != nil { t.Fatal(err) } if usage.IPNSRecordsPublished != 0 || usage.PubSubMessagesSent != 0 { t.Fatal("should be 0") } } }) } } func Test_Tier_Upgrade(t *testing.T) { var bm = NewUsageManager(newTestDB(t, &Usage{})) b, err := bm.NewUsageEntry("testuser", Free) if err != nil { t.Fatal(err) } defer bm.DB.Unscoped().Delete(b) if b.Tier != Free { t.Fatal("bad tier set") } if b.MonthlyDataLimitBytes != FreeUploadLimit { t.Fatal("bad upload limit set") } if err := bm.UpdateTier("testuser", Paid); err != nil { t.Fatal(err) } b, err = bm.FindByUserName("testuser") if err != nil { t.Fatal(err) } if b.Tier != Paid { t.Fatal("bad tier set") } if b.MonthlyDataLimitBytes != NonFreeUploadLimit { t.Fatal("bad upload limit set") } } func Test_UpdateDataUsage_Free(t *testing.T) { var bm = NewUsageManager(newTestDB(t, &Usage{})) b, err := bm.NewUsageEntry("testuser", Free) if err != nil { t.Fatal(err) } defer bm.DB.Unscoped().Delete(b) if b.Tier != Free { t.Fatal("bad tier set") } b.CurrentDataUsedBytes = datasize.GB.Bytes() * 2 if err := bm.DB.Save(b).Error; err != nil { t.Fatal(err) } b, err = bm.FindByUserName("testuser") if err != nil { t.Fatal(err) } if b.CurrentDataUsedBytes != datasize.GB.Bytes()*2 { t.Fatal("bad usage set") } if err := bm.UpdateDataUsage("testuser", datasize.GB.Bytes()*2); err == nil { t.Fatal("error expected") } if err := bm.UpdateDataUsage("testuser", datasize.MB.Bytes()*100); err != nil { t.Fatal(err) } } func Test_ReduceDataUsage(t *testing.T) { var bm = NewUsageManager(newTestDB(t, &Usage{})) b, err := bm.NewUsageEntry("testuser", Paid) if err != nil { t.Fatal(err) } defer bm.DB.Unscoped().Delete(b) if b.Tier != Paid { t.Fatal("bad tier set") } if err := bm.UpdateDataUsage( "testuser", datasize.GB.Bytes()+datasize.MB.Bytes()*100, ); err != nil { t.Fatal(err) } b, err = bm.FindByUserName("testuser") if err != nil { t.Fatal(err) } if b.CurrentDataUsedBytes != datasize.GB.Bytes()+datasize.MB.Bytes()*100 { t.Fatal("bad datasize") } currentSize := b.CurrentDataUsedBytes expectedSize := b.CurrentDataUsedBytes - datasize.MB.Bytes()*100 if err := bm.ReduceDataUsage("testuser", datasize.MB.Bytes()*100); err != nil { t.Fatal(err) } b, err = bm.FindByUserName("testuser") if b.CurrentDataUsedBytes != expectedSize { fmt.Println("current size", currentSize) fmt.Println("got size", b.CurrentDataUsedBytes) fmt.Println("expected size", expectedSize) t.Fatal("bad reduction in datasize") } } func Test_ReduceKeyCount(t *testing.T) { var bm = NewUsageManager(newTestDB(t, &Usage{})) b, err := bm.NewUsageEntry("testuser", Paid) if err != nil { t.Fatal(err) } defer bm.DB.Unscoped().Delete(b) if b.Tier != Paid { t.Fatal("bad tier set") } if err := bm.IncrementKeyCount("testuser", 5); err != nil { t.Fatal(err) } if err := bm.ReduceKeyCount("testuser", 4); err != nil { t.Fatal(err) } b, err = bm.FindByUserName("testuser") if err != nil { t.Fatal(err) } if b.KeysCreated != 1 { t.Fatal("bad key count") } if err := bm.ReduceKeyCount("testuser", 3); err != nil { t.Fatal(err) } b, err = bm.FindByUserName("testuser") if err != nil { t.Fatal(err) } if b.KeysCreated != 0 { t.Fatal("bad key count") } }