text
stringlengths
11
4.05M
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package hwsec /* This file implements the TPM clear tool in remote tast. */ import ( "context" "path/filepath" "time" "chromiumos/tast/common/hwsec" "chromiumos/tast/dut" "chromiumos/tast/errors" "chromiumos/tast/testing" ) // TPMClearer clear the TPM via crossystem, this would work on both TPM1.2 and TPM2.0. type TPMClearer struct { cmdRunner hwsec.CmdRunner daemonController *hwsec.DaemonController dut *dut.DUT } // NewTPMClearer creates a new TPMClearer object, where r is used to run the command internally. func NewTPMClearer(cmdRunner hwsec.CmdRunner, daemonController *hwsec.DaemonController, dut *dut.DUT) *TPMClearer { return &TPMClearer{cmdRunner, daemonController, dut} } // PreClearTPM backups the logs func (tc *TPMClearer) PreClearTPM(ctx context.Context) error { // Copy logs before TPM reset. Ignore errors on failure. if outDir, ok := testing.ContextOutDir(ctx); ok { dateString := time.Now().Format(time.RFC3339) if err := tc.dut.GetFile(ctx, "/var/log/chrome/", filepath.Join(outDir, "chrome-"+dateString)); err != nil { testing.ContextLog(ctx, "Failed to copy chrome logs: ", err) } } return nil } // ClearTPM sends the TPM clear request func (tc *TPMClearer) ClearTPM(ctx context.Context) error { // Reset the flag of finished clearing. if rawOutput, err := tc.cmdRunner.RunWithCombinedOutput(ctx, "crossystem", "clear_tpm_owner_done=0"); err != nil { // clear_tpm_owner_done is meaningless on VM. // We should log the error message instead of failing the test. testing.ContextLogf(ctx, "Failed to reset clear_tpm_owner_done, output: %q, %v", string(rawOutput), err) } // Fire clear TPM owner request to crossystem. if rawOutput, err := tc.cmdRunner.RunWithCombinedOutput(ctx, "crossystem", "clear_tpm_owner_request=1"); err != nil { return errors.Wrapf(err, "failed to fire clear_tpm_owner_request, output: %q", string(rawOutput)) } // Check we have effective value of crossystem. rawOutput, err := tc.cmdRunner.RunWithCombinedOutput(ctx, "crossystem", "clear_tpm_owner_done") output := string(rawOutput) if err != nil { // clear_tpm_owner_done is meaningless on VM. // We should log the error message instead of failing the test, the error message is for verbosity. testing.ContextLogf(ctx, "Failed to query clear_tpm_owner_done, output: %q, %v", output, err) } if output != "0" { // clear_tpm_owner_done is meaningless on VM. // We should log the error message instead of failing the test, the error message is for verbosity. testing.ContextLogf(ctx, "clear_tpm_owner_done = %q; want 0", output) } rawOutput, err = tc.cmdRunner.RunWithCombinedOutput(ctx, "crossystem", "clear_tpm_owner_request") output = string(rawOutput) if err != nil { return errors.Wrapf(err, "failed to query clear_tpm_owner_request, output: %q", output) } if output != "1" { return errors.Wrapf(err, "clear_tpm_owner_request = %q; want 1", output) } return nil } // PostClearTPM reboots and ensure every TPM daemon is up. func (tc *TPMClearer) PostClearTPM(ctx context.Context) error { if err := tc.dut.Reboot(ctx); err != nil { return errors.Wrap(err, "failed to reboot") } // Wait for services. if err := tc.daemonController.WaitForAllDBusServices(ctx); err != nil { return errors.Wrap(err, "failed to wait for hwsec D-Bus services to be ready") } // Check we have effective reset of TPM. rawOutput, err := tc.cmdRunner.RunWithCombinedOutput(ctx, "crossystem", "clear_tpm_owner_done") output := string(rawOutput) if err != nil { // clear_tpm_owner_done is meaningless on VM. // We should log the error message instead of failing the test. testing.ContextLogf(ctx, "Failed to query clear_tpm_owner_done, output: %q, %v", output, err) } if output != "1" { // clear_tpm_owner_done is meaningless on VM. // We should log the error message instead of failing the test. testing.ContextLogf(ctx, "clear_tpm_owner_done = %q; want 1", output) } rawOutput, err = tc.cmdRunner.RunWithCombinedOutput(ctx, "crossystem", "clear_tpm_owner_request") output = string(rawOutput) if err != nil { return errors.Wrapf(err, "failed to query clear_tpm_owner_request, output: %q", output) } if output != "0" { return errors.Wrapf(err, "clear_tpm_owner_request = %q; want 0", output) } return nil }
package main import ( "fmt" ) type Person struct { Name, Gender string } // 函数内部操作的修改的是传入变量的值拷贝 func f1(p Person) { p.Gender = "female" } // 传递指针可以直接修改传入变量的值 func f2(p *Person) { //语法糖 //标准写法 (*p).Gender="female" p.Gender = "female" } func main() { p := Person{"tom", "male"} f1(p) fmt.Println("p=", p) //直接传入结构体的指针 f2(&p) fmt.Println("p=", p) //使用new()实例化struct,返回的是struct的直针 p2 := new(Person) fmt.Printf("p2 type=%T\n", p2) p2.Name = "jerry" p2.Gender = "male" f2(p2) fmt.Println("p2=", p2) }
package main import ( "github.com/spf13/cobra" "github.com/sp0x/torrentd/indexer" "github.com/sp0x/torrentd/torrent" ) func init() { cmdFetchTorrents := &cobra.Command{ Use: "fetch", Short: "Fetches torrents. If no flags are given this command simply fetches the latest 10 pages of torrents.", Run: fetchTorrents, } rootCmd.AddCommand(cmdFetchTorrents) } func fetchTorrents(_ *cobra.Command, _ []string) { facade := indexer.NewFacadeFromConfiguration(&appConfig) _ = torrent.GetNewScrapeItems(facade, nil) }
// Copyright © 2018 NAME HERE <EMAIL ADDRESS> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "github.com/spf13/cobra" "github.com/johnnyeven/chain/blockchain" ) // chainStateReindexCmd represents the chainStateReindex command var chainStateReindexCmd = &cobra.Command{ Use: "chainStateReindex", Short: "A brief description of your command", Run: func(cmd *cobra.Command, args []string) { c := blockchain.NewBlockChain(blockchain.Config{ NewGenesisBlockFunc: blockchain.NewGenesisBlock, }) chainState := blockchain.ChainState{c} chainState.Reindex() }, } func init() { RootCmd.AddCommand(chainStateReindexCmd) }
package main import ( "testing" ) func TestCode(t *testing.T) { var tests = []struct { grid []string output []string }{ { grid: []string{"1112", "1912", "1892", "1234"}, output: []string{"1112", "1X12", "18X2", "1234"}, }, { grid: []string{"12", "12"}, output: []string{"12", "12"}, }, } for _, test := range tests { got := cavityMap(test.grid) for i, v := range got { if v != test.output[i] { t.Errorf( "For grid=%v; Got %v while expecting %v", test.grid, got, test.output, ) } } } }
package chance import ( "bytes" ) // String returns random string with length in range [1..100] func (chance *Chance) String() string { var buffer bytes.Buffer l := chance.NaturalN(100) for i := 0; i < l; i++ { buffer.WriteString(string(chance.Char())) } return buffer.String() } // String returns random string with length in range [1..len] func (chance *Chance) StringN(len int) string { var buffer bytes.Buffer for i := 0; i < len; i++ { buffer.WriteString(string(chance.Char())) } return buffer.String() } // AnyString returns random string with length in range [1..100] based on symbols of string passed as argument. func (chance *Chance) AnyString(str string) string { var buffer bytes.Buffer l := chance.NaturalN(100) for i := 0; i < l; i++ { buffer.WriteString(string(chance.AnyChar(str))) } return buffer.String() } // String returns random string with length in range [1..len] based on symbols of string passed as argument. func (chance *Chance) AnyStringN(str string, len int) string { var buffer bytes.Buffer for i := 0; i < len; i++ { buffer.WriteString(string(chance.AnyChar(str))) } return buffer.String() }
package support import "gopkg.in/go-playground/validator.v9" var G_validate *validator.Validate func InitValidator() { G_validate = validator.New() }
package main import "fmt" func main() { // client side data income := 1000 loanAmount := 1000 loanTerm := 24 // bank side data creditScore := 500 // by default, should be changed if there some violation to be approved approved := true // ToDo: implement it in the function ... var monthlyPayment, rate, totalCost float64 if loanTerm%12 == 0 { if creditScore >= 450 { rate = 0.15 } else { rate = 0.2 } totalCost = rate * float64(loanAmount) monthlyPayment = (totalCost + float64(loanAmount))/float64(loanTerm) if creditScore < 450 && 0.1 * float64(income) < monthlyPayment { approved = false } fmt.Printf("Application X\n" + "-----------\n"+ "Credit Score: %v\n"+ "Income: %v\n"+ "Loan Amount: %v\n"+ "Loan Term: %v\n"+ "Monthly Payment: %v\n"+ "Rate: %v\n"+ "Total Cost: %v\n"+ "Approved: %v\n", creditScore, income, loanAmount, loanTerm, monthlyPayment, int(rate*100), totalCost, approved) } else { fmt.Println("Opps! The loan should be divisible by 12") } }
package solutions import ( "sort" ) func merge(intervals [][]int) [][]int { var result [][]int sort.Slice(intervals, func(i, j int) bool { return intervals[i][0] < intervals[j][0] }) for i, interval := range intervals { if i == 0 { result = append(result, interval) continue } if len(result) == 0 { break } end := result[len(result) - 1] if interval[0] > end[1] { result = append(result, interval) } else if interval[1] > end[1] { end[1] = interval[1] } } return result }
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved. // This program and the accompanying materials are made available under // the terms of the under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package instanceiterator import ( "errors" "fmt" "log" "time" "github.com/craigfurman/herottp" "github.com/pivotal-cf/on-demand-service-broker/authorizationheader" "github.com/pivotal-cf/on-demand-service-broker/broker/services" "github.com/pivotal-cf/on-demand-service-broker/config" "github.com/pivotal-cf/on-demand-service-broker/network" "github.com/pivotal-cf/on-demand-service-broker/tools" ) type Configurator struct { BrokerServices BrokerServices PollingInterval time.Duration AttemptInterval time.Duration AttemptLimit int MaxInFlight int Canaries int Listener Listener Sleeper sleeper Triggerer Triggerer CanarySelectionParams config.CanarySelectionParams } func NewConfigurator(conf config.InstanceIteratorConfig, logger *log.Logger, logPrefix string) (*Configurator, error) { brokerServices, err := brokerServices(conf, logger) if err != nil { return nil, err } pollingInterval, err := pollingInterval(conf) if err != nil { return nil, err } attemptInterval, err := attemptInterval(conf) if err != nil { return nil, err } attemptLimit, err := attemptLimit(conf) if err != nil { return nil, err } maxInFlight, err := maxInFlight(conf) if err != nil { return nil, err } canaries, err := canaries(conf) if err != nil { return nil, err } canarySelectionParams, err := canarySelectionParams(conf) if err != nil { return nil, err } listener := NewLoggingListener(logger, logPrefix) b := &Configurator{ BrokerServices: brokerServices, PollingInterval: pollingInterval, AttemptInterval: attemptInterval, AttemptLimit: attemptLimit, MaxInFlight: maxInFlight, Canaries: canaries, Listener: listener, Sleeper: &tools.RealSleeper{}, CanarySelectionParams: canarySelectionParams, } return b, nil } func (b *Configurator) SetUpgradeTriggererToBOSH() { b.Listener.UpgradeStrategy("BOSH") b.Triggerer = NewBOSHUpgradeTriggerer(b.BrokerServices) } func (b *Configurator) SetUpgradeTriggererToCF(cfClient CFClient, logger *log.Logger) { b.Listener.UpgradeStrategy("CF") b.Triggerer = NewCFTrigger(cfClient, logger) } func (b *Configurator) SetRecreateTriggerer() error { if b.BrokerServices == nil { return errors.New("unable to set triggerer, brokerServices must not be nil") } b.Triggerer = NewRecreateTriggerer(b.BrokerServices) return nil } func brokerServices(conf config.InstanceIteratorConfig, logger *log.Logger) (*services.BrokerServices, error) { if conf.BrokerAPI.Authentication.Basic.Username == "" || conf.BrokerAPI.Authentication.Basic.Password == "" || conf.BrokerAPI.URL == "" { return &services.BrokerServices{}, errors.New("the brokerUsername, brokerPassword and brokerUrl are required to function") } brokerBasicAuthHeaderBuilder := authorizationheader.NewBasicAuthHeaderBuilder( conf.BrokerAPI.Authentication.Basic.Username, conf.BrokerAPI.Authentication.Basic.Password, ) certPool, err := network.AppendCertsFromPEM(conf.BrokerAPI.TLS.CACert) if err != nil { return &services.BrokerServices{}, fmt.Errorf("error getting a certificate pool to append our trusted cert to: %s", err) } return services.NewBrokerServices( herottp.New(herottp.Config{ Timeout: time.Duration(conf.RequestTimeout) * time.Second, RootCAs: certPool, DisableTLSCertificateVerification: conf.BrokerAPI.TLS.DisableSSLCertVerification, MaxRetries: 5, }), brokerBasicAuthHeaderBuilder, conf.BrokerAPI.URL, logger, ), nil } func pollingInterval(conf config.InstanceIteratorConfig) (time.Duration, error) { if conf.PollingInterval <= 0 { return 0, errors.New("the pollingInterval must be greater than zero") } return time.Duration(conf.PollingInterval) * time.Second, nil } func attemptInterval(conf config.InstanceIteratorConfig) (time.Duration, error) { if conf.AttemptInterval <= 0 { return 0, errors.New("the attemptInterval must be greater than zero") } return time.Duration(conf.AttemptInterval) * time.Second, nil } func attemptLimit(conf config.InstanceIteratorConfig) (int, error) { if conf.AttemptLimit <= 0 { return 0, errors.New("the attempt limit must be greater than zero") } return conf.AttemptLimit, nil } func maxInFlight(conf config.InstanceIteratorConfig) (int, error) { if conf.MaxInFlight <= 0 { return 0, errors.New("the max in flight must be greater than zero") } return conf.MaxInFlight, nil } func canaries(conf config.InstanceIteratorConfig) (int, error) { if conf.Canaries < 0 { return 0, errors.New("the number of canaries cannot be negative") } return conf.Canaries, nil } func canarySelectionParams(conf config.InstanceIteratorConfig) (config.CanarySelectionParams, error) { return conf.CanarySelectionParams, nil }
package e var MsgFlags = map[int]string{ SUCCESS: "ok", ERROR: "fail", ErrInvalidParams: "请求参数错误", ErrUnKnownInternalError: "未知服务器内部错误,请稍后重试", ErrInvalidBasicAuthParam: "Basic认证参数错误", ErrBasicAuthFailed: "未通过Basic认证", ErrUnAuthorized: "你没有权限进行这个操作", ErrInvalidBearerAuthParams: "Bearer认证参数错误", ErrAuthCheckTokenFail: "Token鉴权失败", ErrAuthCheckTokenTimeout: "Token已超时", ErrJwtAuth: "Token生成失败", ErrUserNotFound: "用户不存在", ErrGroupNotFound: "群组不存在", ErrDeleteGroup: "解散群组错误,请检查参数", } // GetMsg get error information based on Code func GetMsg(code int) string { msg, ok := MsgFlags[code] if ok { return msg } return MsgFlags[ERROR] }
package adservertargeting const ( reqValid = `{ "id": "req_id", "imp": [ { "id": "test_imp1", "ext": {"appnexus": {"placementId": 250419771}}, "banner": {"format": [{"h": 250, "w": 300}]} }, { "id": "test_imp2", "ext": {"appnexus": {"placementId": 250419771}}, "banner": {"format": [{"h": 250, "w": 300}]} } ], "site": {"page": "test.com"} }` reqInvalid = `{ "id": "req_id", "imp": { "incorrect":true }, "site": {"page": "test.com"} }` reqNoImps = `{ "id": "req_id", "site": {"page": "test.com"} }` testUrl = "https://www.test-url.com?amp-key=testAmpKey&data-override-height=400" reqFullValid = `{ "id": "req_id", "imp": [ { "id": "test_imp1", "ext": {"appnexus": {"placementId": 123}}, "banner": {"format": [{"h": 250, "w": 300}], "w": 260, "h": 350} }, { "id": "test_imp2", "ext": {"appnexus": {"placementId": 456}}, "banner": {"format": [{"h": 400, "w": 600}], "w": 270, "h": 360} } ], "site": {"page": "test.com"} }` reqFullInvalid = `{ "imp": [ { "ext": {"appnexus": {"placementId": 123}} }, { "ext": {"appnexus": {"placementId": 456}} } ] }` reqExt = `{ "prebid": { "adservertargeting": [ { "key": "hb_amp_param", "source": "bidrequest", "value": "ext.prebid.amp.data.ampkey" }, { "key": "hb_req_imp_ext_param", "source": "bidrequest", "value": "imp.ext.prebid.test" }, { "key": "hb_req_imp_ext_bidder_param", "source": "bidrequest", "value": "imp.ext.bidder1.tagid" }, { "key": "hb_req_imp_param", "source": "bidrequest", "value": "imp.bidfloor" }, { "key": "hb_req_ext_param", "source": "bidrequest", "value": "ext.prebid.targeting.includebrandcategory" }, { "key": "hb_req_user_param", "source": "bidrequest", "value": "user.yob" }, { "key": "hb_static_thing", "source": "static", "value": "test-static-value" }, { "key": "{{BIDDER}}_custom1", "source": "bidresponse", "value": "seatbid.bid.ext.custom1" }, { "key": "custom2", "source": "bidresponse", "value": "seatbid.bid.ext.custom2" }, { "key": "{{BIDDER}}_imp", "source": "bidresponse", "value": "seatbid.bid.impid" }, { "key": "seat_cur", "source": "bidresponse", "value": "cur" } ], "targeting": { "includewinners": false, "includebidderkeys": true, "includebrandcategory": { "primaryadserver": 1, "publisher": "", "withcategory": true } } } }` seatBid0Bid0Ext = `{ "prebid": { "foo": "bar1", "targeting": { "appnexus_custom6": "true", "custom2": "bar1", "custom3": "10", "custom4": "barApn", "custom5": "USD", "custom6":"testResponse", "custom7":"testBidId", "custom8":"testCustomData", "custom9":"2", "hb_amp_param": "testAmpKey", "hb_imp_param": "111" } } }` seatBid0Bid1Ext = ` { "prebid": { "foo": "bar2", "targeting": { "appnexus_custom6": "true", "custom2": "bar2", "custom3": "20", "custom4": "barApn", "custom5": "USD", "custom6":"testResponse", "custom7":"testBidId", "custom8":"testCustomData", "custom9":"2", "hb_amp_param": "testAmpKey", "hb_imp_param": "222" } } }` seatBid0Bid2Ext = ` { "prebid": { "foo": "bar3", "targeting": { "appnexus_custom6": "true", "custom2": "bar3", "custom3": "30", "custom4": "barApn", "custom5": "USD", "custom6":"testResponse", "custom7":"testBidId", "custom8":"testCustomData", "custom9":"2", "hb_amp_param": "testAmpKey", "hb_imp_param": "333" } } }` seatBid1Bid0Ext = `{ "prebid": { "foo": "bar11", "targeting": { "custom2": "bar11", "custom3": "11", "custom4": "barRubicon", "custom5": "USD", "custom6":"testResponse", "custom7":"testBidId", "custom8":"testCustomData", "custom9":"2", "hb_amp_param": "testAmpKey", "hb_imp_param": "111", "rubicon_custom6": "true", "testInput": 111 } } }` seatBid1Bid1Ext = ` { "prebid": { "foo": "bar22", "targeting": { "custom2": "bar22", "custom3": "22", "custom4": "barRubicon", "custom5": "USD", "custom6":"testResponse", "custom7":"testBidId", "custom8":"testCustomData", "custom9":"2", "hb_amp_param": "testAmpKey", "hb_imp_param": "222", "rubicon_custom6": "true", "testInput": 222 } } }` seatBid1Bid2Ext = `{ "prebid": { "foo": "bar33", "targeting": { "custom2": "bar33", "custom3": "33", "custom4": "barRubicon", "custom5": "USD", "custom6":"testResponse", "custom7":"testBidId", "custom8":"testCustomData", "custom9":"2", "hb_amp_param": "testAmpKey", "hb_imp_param": "333", "rubicon_custom6": "true", "testInput": 333 } } }` bid0Ext = `{"prebid": {"foo":"bar1", "targeting": {"custom_attr":"bar1"}}}` bid1Ext = `{"prebid": {"foo":"bar2", "targeting": {"custom_attr":"bar2"}}}` bid2Ext = `{"prebid": {"foo":"bar3", "targeting": {"custom_attr":"bar3"}}}` apnBid0Ext = `{ "custom1": 1111, "custom2": "a1111", "prebid": { "foo": "bar1", "targeting": { "appnexus_custom1": "1111", "appnexus_imp":"imp1", "custom2": "a1111", "hb_amp_param": "testAmpKey", "hb_req_imp_ext_bidde": "111", "hb_req_imp_param": "10", "hb_req_user_param": "2000", "hb_static_thing": "test-static-value", "seat_cur":"USD" } } }` apnBid1Ext = `{ "custom1": 2222, "custom2": "a2222", "prebid": { "foo": "bar2", "targeting": { "appnexus_custom1": "2222", "appnexus_imp":"imp2", "custom2": "a2222", "hb_amp_param": "testAmpKey", "hb_req_imp_ext_bidde": "222", "hb_req_imp_param": "20", "hb_req_user_param": "2000", "hb_static_thing": "test-static-value", "seat_cur":"USD" } } }` apnBid2Ext = `{ "custom1": 3333, "custom2": "a3333", "prebid": { "foo": "bar3", "targeting": { "appnexus_custom1": "3333", "appnexus_imp":"imp3", "custom2": "a3333", "hb_amp_param": "testAmpKey", "hb_req_imp_ext_bidde": "333", "hb_req_imp_param": "30", "hb_req_user_param": "2000", "hb_static_thing": "test-static-value", "seat_cur":"USD" } } }` rbcBid0Ext = `{ "custom1": 4444, "custom2": "r4444", "prebid": { "foo": "bar11", "targeting": { "custom2": "r4444", "rubicon_imp":"imp1", "hb_amp_param": "testAmpKey", "hb_req_imp_ext_bidde": "111", "hb_req_imp_param": "10", "hb_req_user_param": "2000", "hb_static_thing": "test-static-value", "rubicon_custom1": "4444", "testInput": 111, "seat_cur":"USD" } } }` rbcBid1Ext = `{ "custom1": 5555, "custom2": "r5555", "prebid": { "foo": "bar22", "targeting": { "custom2": "r5555", "rubicon_imp":"imp2", "hb_amp_param": "testAmpKey", "hb_req_imp_ext_bidde": "222", "hb_req_imp_param": "20", "hb_req_user_param": "2000", "hb_static_thing": "test-static-value", "rubicon_custom1": "5555", "testInput": 222, "seat_cur":"USD" } } }` rbcBid2Ext = `{ "custom1": 6666, "custom2": "r6666", "prebid": { "foo": "bar33", "targeting": { "custom2": "r6666", "rubicon_imp":"imp3", "hb_amp_param": "testAmpKey", "hb_req_imp_ext_bidde": "333", "hb_req_imp_param": "30", "hb_req_user_param": "2000", "hb_static_thing": "test-static-value", "rubicon_custom1": "6666", "testInput": 333, "seat_cur":"USD" } } }` )
package main import "fmt" func main() { fmt.Println("洞窟の入口だ。東に進む道もある。") var command = "go inside" switch command { case "go east": fmt.Println("君は、更に山に登る。") case "enter cave", "go inside": fmt.Println("君は薄暗い洞窟の中にいる。") case "read sign": fmt.Println("「未成年立ち入り禁止」と書いてある。") default: fmt.Println("なんだか、よくわからない。") } }
package main import "fmt" func main() { const idioma = "Español" name, lastname := "Heriberto", "Figueroa" fmt.Println(name, lastname) name, edad := "Maaria", 20 fmt.Println(name, lastname, edad, idioma) } //TIPICOS COMENTARIOS /* TIPICOS COMENTARIOS */
// Copyright 2020 The LUCI Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package poller import ( "context" "fmt" "strings" "time" "google.golang.org/grpc/codes" "google.golang.org/protobuf/types/known/timestamppb" gerritutil "go.chromium.org/luci/common/api/gerrit" "go.chromium.org/luci/common/clock" "go.chromium.org/luci/common/errors" "go.chromium.org/luci/common/logging" gerritpb "go.chromium.org/luci/common/proto/gerrit" "go.chromium.org/luci/common/retry/transient" "go.chromium.org/luci/common/sync/parallel" "go.chromium.org/luci/gae/service/datastore" "go.chromium.org/luci/grpc/grpcutil" "go.chromium.org/luci/cv/internal/changelist" "go.chromium.org/luci/cv/internal/common" "go.chromium.org/luci/cv/internal/gerrit" "go.chromium.org/luci/cv/internal/gerrit/updater" ) const ( // fullPollInterval is between querying Gerrit for all changes relevant to CV as // if from scratch. fullPollInterval = 30 * time.Minute // incrementalPollOverlap is safety overlap of time range of Change.Updated // between two successive polls. // // While this doesn't guarantee that CV won't miss changes in-between // incremental polls, it should mitigate the most common reasons: // * time skew between CV and Gerrit clocks, // * hopping between potentially out of sync Gerrit mirrors. incrementalPollOverlap = time.Minute // changesPerPoll is how many changes CV will process per poll. // // A value that's too low here will first affect full polls, since they have // to (re-)process all interesting changes watched by a LUCI project. // // 10k is OK to fetch sequentially and keep in RAM without OOM-ing, // and currently enough for each of the LUCI projects. // // Higher values may need smarter full polling techniques. changesPerPoll = 10000 // pageSize is how many changes to request in a single ListChangesRequest. pageSize = 1000 // moreChangesTrustFactor controls when CV must not trust false value of // ListChangesResponse.MoreChanges. // // Value of 0.5 combined with pageSize of 1000 means that CV will trust // MoreChanges iff Gerrit returns <= 500 CLs. // // For more info, see corresponding field in // https://godoc.org/go.chromium.org/luci/common/api/gerrit#PagingListChangesOptions moreChangesTrustFactor = 0.5 ) // subpoll queries Gerrit and updates the state of individual SubPoller. func (p *Poller) subpoll(ctx context.Context, luciProject string, sp *SubPoller) error { q := singleQuery{ luciProject: luciProject, sp: sp, p: p, } var err error if q.client, err = gerrit.CurrentClient(ctx, sp.GetHost(), luciProject); err != nil { return err } if sp.GetLastFullTime() == nil { return q.full(ctx) } nextFullAt := sp.GetLastFullTime().AsTime().Add(fullPollInterval) if clock.Now(ctx).Before(nextFullAt) { return q.incremental(ctx) } return q.full(ctx) } type singleQuery struct { luciProject string pm PM sp *SubPoller p *Poller client gerrit.QueryClient } func (q *singleQuery) full(ctx context.Context) error { ctx = logging.SetFields(ctx, logging.Fields{ "luciProject": q.luciProject, "poll": "full", }) started := clock.Now(ctx) after := started.Add(-common.MaxTriggerAge) changes, err := q.fetch(ctx, after, buildQuery(q.sp, queryLimited)) // There can be partial result even if err != nil. switch err2 := q.scheduleTasks(ctx, changes, true); { case err != nil: return err case err2 != nil: return err2 } cur := uniqueSortedIDsOf(changes) if diff := common.DifferenceSorted(q.sp.Changes, cur); len(diff) != 0 { // `diff` changes are no longer matching the limited query, // so they probably updated since. if err := q.p.scheduleRefreshTasks(ctx, q.luciProject, q.sp.GetHost(), diff); err != nil { return err } } q.sp.Changes = cur q.sp.LastFullTime = timestamppb.New(started) q.sp.LastIncrTime = nil return nil } func (q *singleQuery) incremental(ctx context.Context) error { ctx = logging.SetFields(ctx, logging.Fields{ "luciProject": q.luciProject, "poll": "incremental", }) started := clock.Now(ctx) lastInc := q.sp.GetLastIncrTime() if lastInc == nil { if lastInc = q.sp.GetLastFullTime(); lastInc == nil { panic("must have been a full poll") } } after := lastInc.AsTime().Add(-incrementalPollOverlap) // Unlike the full poll, query for all changes regardless of status or CQ // vote. This ensures that CV notices quickly when previously NEW & CQ-ed // change has either CQ vote removed OR status changed (e.g. submitted or // abandoned). changes, err := q.fetch(ctx, after, buildQuery(q.sp, queryAll)) // There can be partial result even if err != nil. switch err2 := q.scheduleTasks(ctx, changes, false); { case err != nil: return err case err2 != nil: return err2 } q.sp.Changes = common.UnionSorted(q.sp.Changes, uniqueSortedIDsOf(changes)) q.sp.LastIncrTime = timestamppb.New(started) return nil } func (q *singleQuery) fetch(ctx context.Context, after time.Time, query string) ([]*gerritpb.ChangeInfo, error) { opts := gerritutil.PagingListChangesOptions{ Limit: changesPerPoll, PageSize: pageSize, MoreChangesTrustFactor: moreChangesTrustFactor, UpdatedAfter: after, } req := gerritpb.ListChangesRequest{ Options: []gerritpb.QueryOption{ gerritpb.QueryOption_SKIP_MERGEABLE, }, Query: query, } resp, err := gerritutil.PagingListChanges(ctx, q.client, &req, opts) switch grpcutil.Code(err) { case codes.OK: if resp.GetMoreChanges() { logging.Errorf(ctx, "Ignoring oldest changes because reached max (%d) allowed to process per poll", changesPerPoll) } return resp.GetChanges(), nil // TODO(tandrii): handle 403 and 404 if CV lacks access to entire host. default: // NOTE: resp may be set if there was partial success in fetching changes // followed by a typically transient error. return resp.GetChanges(), gerrit.UnhandledError(ctx, err, "PagingListChanges failed") } } // maxLoadCLBatchSize limits how many CL entities are loaded at once for // notifying PM. const maxLoadCLBatchSize = 100 func (q *singleQuery) scheduleTasks(ctx context.Context, changes []*gerritpb.ChangeInfo, forceNotifyPM bool) error { // TODO(tandrii): optimize by checking if CV is interested in the // (host,project,ref) these changes come from before triggering tasks. logging.Debugf(ctx, "scheduling %d CLUpdate tasks", len(changes)) var clids []common.CLID if forceNotifyPM { // Objective: make PM aware of all CLs. // Optimization: avoid RefreshGerritCL with forceNotifyPM=true if possible, // as this removes ability to de-duplicate. var err error eids := make([]changelist.ExternalID, len(changes)) for i, c := range changes { if eids[i], err = changelist.GobID(q.sp.GetHost(), c.GetNumber()); err != nil { return err } } clids, err = changelist.Lookup(ctx, eids) if err != nil { return err } if err := q.p.notifyPMifKnown(ctx, q.luciProject, clids, maxLoadCLBatchSize); err != nil { return err } } errs := parallel.WorkPool(10, func(work chan<- func() error) { for i, c := range changes { payload := &updater.RefreshGerritCL{ LuciProject: q.luciProject, Host: q.sp.GetHost(), Change: c.GetNumber(), UpdatedHint: c.GetUpdated(), ForceNotifyPm: forceNotifyPM && (clids[i] == 0), } work <- func() error { return q.p.clUpdater.Schedule(ctx, payload) } } }) return common.MostSevereError(errs) } func (p *Poller) scheduleRefreshTasks(ctx context.Context, luciProject, host string, changes []int64) error { logging.Debugf(ctx, "scheduling %d CLUpdate tasks for no longer matched CLs", len(changes)) var err error eids := make([]changelist.ExternalID, len(changes)) for i, c := range changes { if eids[i], err = changelist.GobID(host, c); err != nil { return err } } // Objective: make PM aware of all CLs. // Optimization: avoid RefreshGerritCL with forceNotifyPM=true if possible, // as this removes ability to de-duplicate. // Since all no longer matched CLs are typically already stored in the // Datastore, get their internal CL IDs and notify PM directly. clids, err := changelist.Lookup(ctx, eids) if err != nil { return err } if err := p.notifyPMifKnown(ctx, luciProject, clids, maxLoadCLBatchSize); err != nil { return err } errs := parallel.WorkPool(10, func(work chan<- func() error) { for i, c := range changes { payload := &updater.RefreshGerritCL{ LuciProject: luciProject, Host: host, Change: c, ForceNotifyPm: 0 == clids[i], // notify iff CL ID isn't yet known. } work <- func() error { return p.clUpdater.Schedule(ctx, payload) } } }) return common.MostSevereError(errs) } // notifyPMifKnown notifies PM to update its CLs for each non-0 CLID. // // Obtains EVersion of each CL before notify a PM. Unfortunately, this loads a // lot of information we don't need, such as Snapshot. So, loads CLs in batches // to avoid large memory footprint. // // In practice, most of these CLs would be already dscache-ed, so loading them // is fast. func (p *Poller) notifyPMifKnown(ctx context.Context, luciProject string, clids []common.CLID, maxBatchSize int) error { cls := make([]*changelist.CL, 0, maxBatchSize) flush := func() error { if err := datastore.Get(ctx, cls); err != nil { return errors.Annotate(common.MostSevereError(err), "failed to load CLs").Tag(transient.Tag).Err() } return p.pm.NotifyCLsUpdated(ctx, luciProject, cls) } for _, clid := range clids { switch l := len(cls); { case clid == 0: case l == maxBatchSize: if err := flush(); err != nil { return err } cls = cls[:0] default: cls = append(cls, &changelist.CL{ID: clid}) } } if len(cls) > 0 { return flush() } return nil } type queryKind int const ( queryLimited queryKind = iota queryAll ) // buildQuery returns query string. // // If queryLimited, unlike queryAll, searches for NEW CLs with CQ vote. func buildQuery(sp *SubPoller, kind queryKind) string { buf := strings.Builder{} switch kind { case queryLimited: buf.WriteString("status:NEW ") // TODO(tandrii): make label optional to support Tricium use-case. buf.WriteString("label:Commit-Queue>0 ") case queryAll: default: panic(fmt.Errorf("unknown queryKind %d", kind)) } // TODO(crbug/1163177): specify `branch:` search term to restrict search to // specific refs. This requires changing partitioning poller into subpollers, // but will provide more targeted queries, reducing load on CV & Gerrit. emitProjectValue := func(p string) { // Even though it appears to work without, Gerrit doc says project names // containing / must be surrounded by "" or {}: // https://gerrit-review.googlesource.com/Documentation/user-search.html#_argument_quoting buf.WriteRune('"') buf.WriteString(p) buf.WriteRune('"') } // One of .OrProjects or .CommonProjectPrefix must be set. switch prs := sp.GetOrProjects(); len(prs) { case 0: if sp.GetCommonProjectPrefix() == "" { panic("partitionConfig function should have ensured this") } // project*s* means find matching projects by prefix buf.WriteString("projects:") emitProjectValue(sp.GetCommonProjectPrefix()) case 1: buf.WriteString("project:") emitProjectValue(prs[0]) default: buf.WriteRune('(') for i, p := range prs { if i > 0 { buf.WriteString(" OR ") } buf.WriteString("project:") emitProjectValue(p) } buf.WriteRune(')') } return buf.String() } func uniqueSortedIDsOf(changes []*gerritpb.ChangeInfo) []int64 { if len(changes) == 0 { return nil } out := make([]int64, len(changes)) for i, c := range changes { out[i] = c.GetNumber() } return common.UniqueSorted(out) }
package engine // Deal adds n cards to each player's hand func Deal(deck Shifter, players []Player, n int) { for j := 0; j < n; j++ { for i := 0; i < len(players); i++ { // Get player to deal to player := players[i] // Get this player's hand hand := player.Hand() // Remove card to deal from top of deck card := deck.Shift() // Push the card to deal onto the end of this player's hand hand.Cards = append(hand.Cards, card) // Update the player's hand player.SetHand(hand) players[i] = player } } }
// Copyright (c) 2016-2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package conn import ( "errors" "fmt" "net" "github.com/uber/kraken/core" "github.com/uber/kraken/gen/go/proto/p2p" "github.com/uber/kraken/lib/torrent/networkevent" "github.com/uber/kraken/lib/torrent/storage" "github.com/uber/kraken/utils/bandwidth" "github.com/andres-erbsen/clock" "github.com/uber-go/tally" "github.com/willf/bitset" "go.uber.org/zap" ) // RemoteBitfields represents the bitfields of an agent's peers for a given torrent. type RemoteBitfields map[core.PeerID]*bitset.BitSet func (rb RemoteBitfields) marshalBinary() (map[string][]byte, error) { rbBytes := make(map[string][]byte) for peerID, bitfield := range rb { b, err := bitfield.MarshalBinary() if err != nil { return nil, err } rbBytes[peerID.String()] = b } return rbBytes, nil } func (rb RemoteBitfields) unmarshalBinary(rbBytes map[string][]byte) error { for peerIDStr, bitfieldBytes := range rbBytes { peerID, err := core.NewPeerID(peerIDStr) if err != nil { return fmt.Errorf("peer id: %s", err) } bitfield := bitset.New(0) if err := bitfield.UnmarshalBinary(bitfieldBytes); err != nil { return err } rb[peerID] = bitfield } return nil } // handshake contains the same fields as a protobuf bitfield message, but with // the fields converted into types used within the scheduler package. As such, // in this package "handshake" and "bitfield message" are usually synonymous. type handshake struct { peerID core.PeerID digest core.Digest infoHash core.InfoHash bitfield *bitset.BitSet remoteBitfields RemoteBitfields namespace string } func (h *handshake) toP2PMessage() (*p2p.Message, error) { b, err := h.bitfield.MarshalBinary() if err != nil { return nil, err } rb, err := h.remoteBitfields.marshalBinary() if err != nil { return nil, err } return &p2p.Message{ Type: p2p.Message_BITFIELD, Bitfield: &p2p.BitfieldMessage{ PeerID: h.peerID.String(), Name: h.digest.Hex(), InfoHash: h.infoHash.String(), BitfieldBytes: b, RemoteBitfieldBytes: rb, Namespace: h.namespace, }, }, nil } func handshakeFromP2PMessage(m *p2p.Message) (*handshake, error) { if m.Type != p2p.Message_BITFIELD { return nil, fmt.Errorf("expected bitfield message, got %s", m.Type) } bitfieldMsg := m.GetBitfield() if bitfieldMsg == nil { return nil, fmt.Errorf("empty bit field") } peerID, err := core.NewPeerID(bitfieldMsg.PeerID) if err != nil { return nil, fmt.Errorf("peer id: %s", err) } ih, err := core.NewInfoHashFromHex(bitfieldMsg.InfoHash) if err != nil { return nil, fmt.Errorf("info hash: %s", err) } d, err := core.NewSHA256DigestFromHex(bitfieldMsg.Name) if err != nil { return nil, fmt.Errorf("name: %s", err) } bitfield := bitset.New(0) if err := bitfield.UnmarshalBinary(bitfieldMsg.BitfieldBytes); err != nil { return nil, err } remoteBitfields := make(RemoteBitfields) if err := remoteBitfields.unmarshalBinary(bitfieldMsg.RemoteBitfieldBytes); err != nil { return nil, err } return &handshake{ peerID: peerID, infoHash: ih, bitfield: bitfield, digest: d, namespace: bitfieldMsg.Namespace, remoteBitfields: remoteBitfields, }, nil } // PendingConn represents half-opened, pending connection initialized by a // remote peer. type PendingConn struct { handshake *handshake nc net.Conn } // PeerID returns the remote peer id. func (pc *PendingConn) PeerID() core.PeerID { return pc.handshake.peerID } // Digest returns the digest of the blob the remote peer wants to open. func (pc *PendingConn) Digest() core.Digest { return pc.handshake.digest } // InfoHash returns the info hash of the torrent the remote peer wants to open. func (pc *PendingConn) InfoHash() core.InfoHash { return pc.handshake.infoHash } // Bitfield returns the bitfield of the remote peer's torrent. func (pc *PendingConn) Bitfield() *bitset.BitSet { return pc.handshake.bitfield } // RemoteBitfields returns the bitfield of the remote peer's torrent. func (pc *PendingConn) RemoteBitfields() RemoteBitfields { return pc.handshake.remoteBitfields } // Namespace returns the namespace of the remote peer's torrent. func (pc *PendingConn) Namespace() string { return pc.handshake.namespace } // Close closes the connection. func (pc *PendingConn) Close() { pc.nc.Close() } // HandshakeResult wraps data returned from a successful handshake. type HandshakeResult struct { Conn *Conn Bitfield *bitset.BitSet RemoteBitfields RemoteBitfields } // Handshaker defines the handshake protocol for establishing connections to // other peers. type Handshaker struct { config Config stats tally.Scope clk clock.Clock bandwidth *bandwidth.Limiter networkEvents networkevent.Producer peerID core.PeerID events Events } // NewHandshaker creates a new Handshaker. func NewHandshaker( config Config, stats tally.Scope, clk clock.Clock, networkEvents networkevent.Producer, peerID core.PeerID, events Events, logger *zap.SugaredLogger) (*Handshaker, error) { config = config.applyDefaults() stats = stats.Tagged(map[string]string{ "module": "conn", }) bl, err := bandwidth.NewLimiter(config.Bandwidth, bandwidth.WithLogger(logger)) if err != nil { return nil, fmt.Errorf("bandwidth: %s", err) } return &Handshaker{ config: config, stats: stats, clk: clk, bandwidth: bl, networkEvents: networkEvents, peerID: peerID, events: events, }, nil } // Accept upgrades a raw network connection opened by a remote peer into a // PendingConn. func (h *Handshaker) Accept(nc net.Conn) (*PendingConn, error) { hs, err := h.readHandshake(nc) if err != nil { return nil, fmt.Errorf("read handshake: %s", err) } return &PendingConn{hs, nc}, nil } // Establish upgrades a PendingConn returned via Accept into a fully // established Conn. func (h *Handshaker) Establish( pc *PendingConn, info *storage.TorrentInfo, remoteBitfields RemoteBitfields) (*Conn, error) { // Namespace is one-directional: it is only supplied by the connection opener // and is not reciprocated by the connection acceptor. if err := h.sendHandshake(pc.nc, info, remoteBitfields, ""); err != nil { return nil, fmt.Errorf("send handshake: %s", err) } c, err := h.newConn(pc.nc, pc.handshake.peerID, info, true) if err != nil { return nil, fmt.Errorf("new conn: %s", err) } return c, nil } // Initialize returns a fully established Conn for the given torrent to the // given peer / address. Also returns the bitfield of the remote peer and // its connections for the torrent. func (h *Handshaker) Initialize( peerID core.PeerID, addr string, info *storage.TorrentInfo, remoteBitfields RemoteBitfields, namespace string) (*HandshakeResult, error) { nc, err := net.DialTimeout("tcp", addr, h.config.HandshakeTimeout) if err != nil { return nil, fmt.Errorf("dial: %s", err) } r, err := h.fullHandshake(nc, peerID, info, remoteBitfields, namespace) if err != nil { nc.Close() return nil, err } return r, nil } func (h *Handshaker) sendHandshake( nc net.Conn, info *storage.TorrentInfo, remoteBitfields RemoteBitfields, namespace string) error { hs := &handshake{ peerID: h.peerID, digest: info.Digest(), infoHash: info.InfoHash(), bitfield: info.Bitfield(), remoteBitfields: remoteBitfields, namespace: namespace, } msg, err := hs.toP2PMessage() if err != nil { return err } return sendMessageWithTimeout(nc, msg, h.config.HandshakeTimeout) } func (h *Handshaker) readHandshake(nc net.Conn) (*handshake, error) { m, err := readMessageWithTimeout(nc, h.config.HandshakeTimeout) if err != nil { return nil, fmt.Errorf("read message: %s", err) } hs, err := handshakeFromP2PMessage(m) if err != nil { return nil, fmt.Errorf("handshake from p2p message: %s", err) } return hs, nil } func (h *Handshaker) fullHandshake( nc net.Conn, peerID core.PeerID, info *storage.TorrentInfo, remoteBitfields RemoteBitfields, namespace string) (*HandshakeResult, error) { if err := h.sendHandshake(nc, info, remoteBitfields, namespace); err != nil { return nil, fmt.Errorf("send handshake: %s", err) } hs, err := h.readHandshake(nc) if err != nil { return nil, fmt.Errorf("read handshake: %s", err) } if hs.peerID != peerID { return nil, errors.New("unexpected peer id") } c, err := h.newConn(nc, peerID, info, false) if err != nil { return nil, fmt.Errorf("new conn: %s", err) } return &HandshakeResult{c, hs.bitfield, hs.remoteBitfields}, nil } func (h *Handshaker) newConn( nc net.Conn, peerID core.PeerID, info *storage.TorrentInfo, openedByRemote bool) (*Conn, error) { return newConn( h.config, h.stats, h.clk, h.networkEvents, h.bandwidth, h.events, nc, h.peerID, peerID, info, openedByRemote, zap.NewNop().Sugar()) }
package main import "sort" //332. 重新安排行程 //给定一个机票的字符串二维数组 [from, to],子数组中的两个成员分别表示飞机出发和降落的机场地点,对该行程进行重新规划排序。所有这些机票都属于一个从 JFK(肯尼迪国际机场)出发的先生,所以该行程必须从 JFK 开始。 // //说明: // //如果存在多种有效的行程,你可以按字符自然排序返回最小的行程组合。例如,行程 ["JFK", "LGA"] 与 ["JFK", "LGB"] 相比就更小,排序更靠前 //所有的机场都用三个大写字母表示(机场代码)。 //假定所有机票至少存在一种合理的行程。 //示例 1: // //输入: [["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]] //输出: ["JFK", "MUC", "LHR", "SFO", "SJC"] //示例 2: // //输入: [["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]] //输出: ["JFK","ATL","JFK","SFO","ATL","SFO"] //解释: 另一种有效的行程是["JFK","SFO","ATL","JFK","ATL","SFO"]。但是它自然排序更大更靠后。 //思路: 图 Hierholzer 算法 寻找欧拉回路 func findItinerary(tickets [][]string) []string { dic := make(map[string][]string) result := make([]string, 0) for i := 0; i < len(tickets); i++ { dic[tickets[i][0]] = append(dic[tickets[i][0]], tickets[i][1]) } for i, _ := range dic { sort.Strings(dic[i]) } var dfs func(pos string) dfs = func(pos string) { for dic[pos] != nil && len(dic[pos]) > 0 { next := dic[pos][0] dic[pos] = dic[pos][1:] dfs(next) } result = append(result, pos) } dfs("JFK") n := len(result) for i := 0; i < n>>1; i++ { result[i], result[n-i-1] = result[n-i-1], result[i] } return result }
package main import ( "fmt" "io/ioutil" "os" "os/exec" "strings" "testing" ) func TestCC(t *testing.T) { files, err := ioutil.ReadDir("test") if err != nil { t.Fatal(err) } for _, finf := range files { if finf.IsDir() { continue } if !strings.HasSuffix(finf.Name(), ".c") { continue } tpath := "test/" + finf.Name() spath := "test/" + finf.Name() + ".s" bpath := "test/" + finf.Name() + ".bin" sfile, err := os.Create(spath) if err != nil { t.Fatal(err) } err = compileFile(tpath, sfile) if err != nil { t.Errorf("compiling %s failed. %s", tpath, err) continue } gccout, err := exec.Command("gcc", spath, "-o", bpath).CombinedOutput() if err != nil { t.Log(string(gccout)) t.Errorf("assembling %s failed. %s", spath, err) continue } bout, err := exec.Command(bpath).CombinedOutput() if err != nil { t.Log(string(bout)) t.Errorf("running %s failed. %s", bpath, err) continue } if testing.Verbose() { fmt.Printf("%s OK\n", tpath) } } }
package main import "fmt" type human struct { firstname, secondname string } func main() { var people []human firstHuman :=human{ firstname: "Petr", secondname: "Jahoda", } secondHuman :=human{ firstname: "Michal", secondname: "Hovorka", } people = append(people, firstHuman) people = append(people, secondHuman) for index, element := range people{ fmt.Println("Position: ", index,"\t first name :", element.firstname + ", second name: " + element.secondname) } }
package entity import "fmt" type Edges []Edge func (n Edges) Exist(id string) bool { for _, v := range n { if v.Data.Id == id { return true } } return false } type Edge struct { Data struct { Id string `json:"id"` Source string `json:"source"` Target string `json:"target"` Label string `json:"label"` Width string `json:"width"` } `json:"data"` } func NewEdge(id, source, target, label string, width int) Edge { return Edge{Data: struct { Id string `json:"id"` Source string `json:"source"` Target string `json:"target"` Label string `json:"label"` Width string `json:"width"` }{Id: id, Source: source, Target: target, Label: label, Width: fmt.Sprintf("%dpx", width)}} }
package main import ( "io" "net/http" ) func d(res http.ResponseWriter, req *http.Request) { io.WriteString(res, "This si my route one") } func c(res http.ResponseWriter, req *http.Request) { io.WriteString(res, "This si my route two") } func main() { http.HandleFunc("/dog", d) http.HandleFunc("/cat", c) // we can implement the following as well // http.Handle("/dog", http.HandlerFunc(d)) // http.Handle("/cat", http.HandlerFunc(c)) http.ListenAndServe(":8000", nil) }
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package span import ( "container/heap" "fmt" "strings" // Needed for roachpb.Span.String(). _ "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/sql/covering" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/interval" ) // frontierEntry represents a timestamped span. It is used as the nodes in both // the interval tree and heap needed to keep the Frontier. type frontierEntry struct { id int64 keys interval.Range span roachpb.Span ts hlc.Timestamp // The index of the item in the frontierHeap, maintained by the // heap.Interface methods. index int } // ID implements interval.Interface. func (s *frontierEntry) ID() uintptr { return uintptr(s.id) } // Range implements interval.Interface. func (s *frontierEntry) Range() interval.Range { return s.keys } func (s *frontierEntry) String() string { return fmt.Sprintf("[%s @ %s]", s.span, s.ts) } // frontierHeap implements heap.Interface and holds `frontierEntry`s. Entries // are sorted based on their timestamp such that the oldest will rise to the top // of the heap. type frontierHeap []*frontierEntry // Len implements heap.Interface. func (h frontierHeap) Len() int { return len(h) } // Less implements heap.Interface. func (h frontierHeap) Less(i, j int) bool { if h[i].ts.EqOrdering(h[j].ts) { return h[i].span.Key.Compare(h[j].span.Key) < 0 } return h[i].ts.Less(h[j].ts) } // Swap implements heap.Interface. func (h frontierHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] h[i].index, h[j].index = i, j } // Push implements heap.Interface. func (h *frontierHeap) Push(x interface{}) { n := len(*h) entry := x.(*frontierEntry) entry.index = n *h = append(*h, entry) } // Pop implements heap.Interface. func (h *frontierHeap) Pop() interface{} { old := *h n := len(old) entry := old[n-1] entry.index = -1 // for safety old[n-1] = nil // for gc *h = old[0 : n-1] return entry } // Frontier tracks the minimum timestamp of a set of spans. type Frontier struct { // tree contains `*frontierEntry` items for the entire current tracked // span set. Any tracked spans that have never been `Forward`ed will have a // zero timestamp. If any entries needed to be split along a tracking // boundary, this has already been done by `insert` before it entered the // tree. tree interval.Tree // minHeap contains the same `*frontierEntry` items as `tree`. Entries // in the heap are sorted first by minimum timestamp and then by lesser // start key. minHeap frontierHeap idAlloc int64 } // MakeFrontier returns a Frontier that tracks the given set of spans. func MakeFrontier(spans ...roachpb.Span) *Frontier { s := &Frontier{tree: interval.NewTree(interval.ExclusiveOverlapper)} for _, span := range spans { e := &frontierEntry{ id: s.idAlloc, keys: span.AsRange(), span: span, ts: hlc.Timestamp{}, } s.idAlloc++ if err := s.tree.Insert(e, true /* fast */); err != nil { panic(err) } heap.Push(&s.minHeap, e) } s.tree.AdjustRanges() return s } // Frontier returns the minimum timestamp being tracked. func (f *Frontier) Frontier() hlc.Timestamp { if f.minHeap.Len() == 0 { return hlc.Timestamp{} } return f.minHeap[0].ts } // PeekFrontierSpan returns one of the spans at the Frontier. func (f *Frontier) PeekFrontierSpan() roachpb.Span { if f.minHeap.Len() == 0 { return roachpb.Span{} } return f.minHeap[0].span } // Forward advances the timestamp for a span. Any part of the span that doesn't // overlap the tracked span set will be ignored. True is returned if the // frontier advanced as a result. // // Note that internally, it may be necessary to use multiple entries to // represent this timestamped span (e.g. if it overlaps with the tracked span // set boundary). Similarly, an entry created by a previous Forward may be // partially overlapped and have to be split into two entries. func (f *Frontier) Forward(span roachpb.Span, ts hlc.Timestamp) bool { prevFrontier := f.Frontier() f.insert(span, ts) return prevFrontier.Less(f.Frontier()) } func (f *Frontier) insert(span roachpb.Span, ts hlc.Timestamp) { entryKeys := span.AsRange() overlapping := f.tree.Get(entryKeys) // TODO(dan): OverlapCoveringMerge is overkill, do this without it. See // `tscache/treeImpl.Add` for inspiration. entryCov := covering.Covering{{Start: span.Key, End: span.EndKey, Payload: ts}} overlapCov := make(covering.Covering, len(overlapping)) for i, o := range overlapping { spe := o.(*frontierEntry) overlapCov[i] = covering.Range{ Start: spe.span.Key, End: spe.span.EndKey, Payload: spe, } } merged := covering.OverlapCoveringMerge([]covering.Covering{entryCov, overlapCov}) toInsert := make([]frontierEntry, 0, len(merged)) for _, m := range merged { // Compute the newest timestamp seen for this span and note whether it's // tracked. There will be either 1 or 2 payloads. If there's 2, it will // be the new span and the old entry. If it's 1 it could be either a new // span (which is untracked and should be ignored) or an old entry which // has been clipped. var mergedTs hlc.Timestamp var tracked bool for _, payload := range m.Payload.([]interface{}) { switch p := payload.(type) { case hlc.Timestamp: if mergedTs.Less(p) { mergedTs = p } case *frontierEntry: tracked = true if mergedTs.Less(p.ts) { mergedTs = p.ts } } } // TODO(dan): Collapse span-adjacent entries with the same value for // timestamp and tracked to save space. if tracked { toInsert = append(toInsert, frontierEntry{ id: f.idAlloc, keys: interval.Range{Start: m.Start, End: m.End}, span: roachpb.Span{Key: m.Start, EndKey: m.End}, ts: mergedTs, }) f.idAlloc++ } } // All the entries in `overlapping` have been replaced by updated ones in // `toInsert`, so remove them all from the tree and heap. needAdjust := false if len(overlapping) == 1 { spe := overlapping[0].(*frontierEntry) if err := f.tree.Delete(spe, false /* fast */); err != nil { panic(err) } heap.Remove(&f.minHeap, spe.index) } else { for i := range overlapping { spe := overlapping[i].(*frontierEntry) if err := f.tree.Delete(spe, true /* fast */); err != nil { panic(err) } heap.Remove(&f.minHeap, spe.index) } needAdjust = true } // Then insert! if len(toInsert) == 1 { if err := f.tree.Insert(&toInsert[0], false /* fast */); err != nil { panic(err) } heap.Push(&f.minHeap, &toInsert[0]) } else { for i := range toInsert { if err := f.tree.Insert(&toInsert[i], true /* fast */); err != nil { panic(err) } heap.Push(&f.minHeap, &toInsert[i]) } needAdjust = true } if needAdjust { f.tree.AdjustRanges() } } // Entries invokes the given callback with the current timestamp for each // component span in the tracked span set. func (f *Frontier) Entries(fn func(roachpb.Span, hlc.Timestamp)) { f.tree.Do(func(i interval.Interface) bool { spe := i.(*frontierEntry) fn(spe.span, spe.ts) return false }) } func (f *Frontier) String() string { var buf strings.Builder f.tree.Do(func(i interval.Interface) bool { if buf.Len() != 0 { buf.WriteString(` `) } buf.WriteString(i.(*frontierEntry).String()) return false }) return buf.String() }
package model import "time" // Message is a model for messages. type Message struct { ID uint `gorm:"column:id" json:"id"` CreateTime time.Time `gorm:"column:time" json:"time"` Body string `gorm:"column:body" json:"body"` // Foreign keys SenderID uint `gorm:"column:sender_id" json:"sender_id"` RoomID uint `gorm:"column:room_id" json:"room_id"` } // MessageHistory contains a message and a sender name. type MessageHistory struct { Message SenderName string `gorm:"column:name" json:"userName"` } // TableName tells GORM where to find this record. func (Message) TableName() string { return "messages" } // Validate performs validation on message model. func (m *Message) Validate() error { if len(m.Body) == 0 { return &ValidationError{Field: "body", Message: "cannot be empty"} } if m.SenderID == 0 { return &ValidationError{Field: "userID", Message: "required"} } if m.RoomID == 0 { return &ValidationError{Field: "roomID", Message: "required"} } return nil }
package operations // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "time" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" "github.com/go-openapi/swag" strfmt "github.com/go-openapi/strfmt" ) // NewGetResourcesParams creates a new GetResourcesParams object // with the default values initialized. func NewGetResourcesParams() *GetResourcesParams { var ( forceFullPageDefault bool = bool(false) forceTotalCountDefault bool = bool(false) recursiveDefault bool = bool(true) showHiddenItemsDefault bool = bool(false) ) return &GetResourcesParams{ ForceFullPage: &forceFullPageDefault, ForceTotalCount: &forceTotalCountDefault, Recursive: &recursiveDefault, ShowHiddenItems: &showHiddenItemsDefault, timeout: cr.DefaultTimeout, } } // NewGetResourcesParamsWithTimeout creates a new GetResourcesParams object // with the default values initialized, and the ability to set a timeout on a request func NewGetResourcesParamsWithTimeout(timeout time.Duration) *GetResourcesParams { var ( forceFullPageDefault bool = bool(false) forceTotalCountDefault bool = bool(false) recursiveDefault bool = bool(true) showHiddenItemsDefault bool = bool(false) ) return &GetResourcesParams{ ForceFullPage: &forceFullPageDefault, ForceTotalCount: &forceTotalCountDefault, Recursive: &recursiveDefault, ShowHiddenItems: &showHiddenItemsDefault, timeout: timeout, } } /*GetResourcesParams contains all the parameters to send to the API endpoint for the get resources operation typically these are written to a http.Request */ type GetResourcesParams struct { /*Accept*/ Accept *string /*AccessType*/ AccessType *string /*ExcludeFolder*/ ExcludeFolder *string /*Expanded*/ Expanded *bool /*FolderURI*/ FolderURI *string /*ForceFullPage*/ ForceFullPage *bool /*ForceTotalCount*/ ForceTotalCount *bool /*Limit*/ Limit *int32 /*Offset*/ Offset *int32 /*Q*/ Q *string /*Recursive*/ Recursive *bool /*ShowHiddenItems*/ ShowHiddenItems *bool /*SortBy*/ SortBy *string /*Type*/ Type *string timeout time.Duration } // WithAccept adds the accept to the get resources params func (o *GetResourcesParams) WithAccept(Accept *string) *GetResourcesParams { o.Accept = Accept return o } // WithAccessType adds the accessType to the get resources params func (o *GetResourcesParams) WithAccessType(AccessType *string) *GetResourcesParams { o.AccessType = AccessType return o } // WithExcludeFolder adds the excludeFolder to the get resources params func (o *GetResourcesParams) WithExcludeFolder(ExcludeFolder *string) *GetResourcesParams { o.ExcludeFolder = ExcludeFolder return o } // WithExpanded adds the expanded to the get resources params func (o *GetResourcesParams) WithExpanded(Expanded *bool) *GetResourcesParams { o.Expanded = Expanded return o } // WithFolderURI adds the folderUri to the get resources params func (o *GetResourcesParams) WithFolderURI(FolderURI *string) *GetResourcesParams { o.FolderURI = FolderURI return o } // WithForceFullPage adds the forceFullPage to the get resources params func (o *GetResourcesParams) WithForceFullPage(ForceFullPage *bool) *GetResourcesParams { o.ForceFullPage = ForceFullPage return o } // WithForceTotalCount adds the forceTotalCount to the get resources params func (o *GetResourcesParams) WithForceTotalCount(ForceTotalCount *bool) *GetResourcesParams { o.ForceTotalCount = ForceTotalCount return o } // WithLimit adds the limit to the get resources params func (o *GetResourcesParams) WithLimit(Limit *int32) *GetResourcesParams { o.Limit = Limit return o } // WithOffset adds the offset to the get resources params func (o *GetResourcesParams) WithOffset(Offset *int32) *GetResourcesParams { o.Offset = Offset return o } // WithQ adds the q to the get resources params func (o *GetResourcesParams) WithQ(Q *string) *GetResourcesParams { o.Q = Q return o } // WithRecursive adds the recursive to the get resources params func (o *GetResourcesParams) WithRecursive(Recursive *bool) *GetResourcesParams { o.Recursive = Recursive return o } // WithShowHiddenItems adds the showHiddenItems to the get resources params func (o *GetResourcesParams) WithShowHiddenItems(ShowHiddenItems *bool) *GetResourcesParams { o.ShowHiddenItems = ShowHiddenItems return o } // WithSortBy adds the sortBy to the get resources params func (o *GetResourcesParams) WithSortBy(SortBy *string) *GetResourcesParams { o.SortBy = SortBy return o } // WithType adds the type to the get resources params func (o *GetResourcesParams) WithType(Type *string) *GetResourcesParams { o.Type = Type return o } // WriteToRequest writes these params to a swagger request func (o *GetResourcesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { r.SetTimeout(o.timeout) var res []error if o.Accept != nil { // query param Accept var qrAccept string if o.Accept != nil { qrAccept = *o.Accept } qAccept := qrAccept if qAccept != "" { if err := r.SetQueryParam("Accept", qAccept); err != nil { return err } } } if o.AccessType != nil { // query param accessType var qrAccessType string if o.AccessType != nil { qrAccessType = *o.AccessType } qAccessType := qrAccessType if qAccessType != "" { if err := r.SetQueryParam("accessType", qAccessType); err != nil { return err } } } if o.ExcludeFolder != nil { // query param excludeFolder var qrExcludeFolder string if o.ExcludeFolder != nil { qrExcludeFolder = *o.ExcludeFolder } qExcludeFolder := qrExcludeFolder if qExcludeFolder != "" { if err := r.SetQueryParam("excludeFolder", qExcludeFolder); err != nil { return err } } } if o.Expanded != nil { // query param expanded var qrExpanded bool if o.Expanded != nil { qrExpanded = *o.Expanded } qExpanded := swag.FormatBool(qrExpanded) if qExpanded != "" { if err := r.SetQueryParam("expanded", qExpanded); err != nil { return err } } } if o.FolderURI != nil { // query param folderUri var qrFolderURI string if o.FolderURI != nil { qrFolderURI = *o.FolderURI } qFolderURI := qrFolderURI if qFolderURI != "" { if err := r.SetQueryParam("folderUri", qFolderURI); err != nil { return err } } } if o.ForceFullPage != nil { // query param forceFullPage var qrForceFullPage bool if o.ForceFullPage != nil { qrForceFullPage = *o.ForceFullPage } qForceFullPage := swag.FormatBool(qrForceFullPage) if qForceFullPage != "" { if err := r.SetQueryParam("forceFullPage", qForceFullPage); err != nil { return err } } } if o.ForceTotalCount != nil { // query param forceTotalCount var qrForceTotalCount bool if o.ForceTotalCount != nil { qrForceTotalCount = *o.ForceTotalCount } qForceTotalCount := swag.FormatBool(qrForceTotalCount) if qForceTotalCount != "" { if err := r.SetQueryParam("forceTotalCount", qForceTotalCount); err != nil { return err } } } if o.Limit != nil { // query param limit var qrLimit int32 if o.Limit != nil { qrLimit = *o.Limit } qLimit := swag.FormatInt32(qrLimit) if qLimit != "" { if err := r.SetQueryParam("limit", qLimit); err != nil { return err } } } if o.Offset != nil { // query param offset var qrOffset int32 if o.Offset != nil { qrOffset = *o.Offset } qOffset := swag.FormatInt32(qrOffset) if qOffset != "" { if err := r.SetQueryParam("offset", qOffset); err != nil { return err } } } if o.Q != nil { // query param q var qrQ string if o.Q != nil { qrQ = *o.Q } qQ := qrQ if qQ != "" { if err := r.SetQueryParam("q", qQ); err != nil { return err } } } if o.Recursive != nil { // query param recursive var qrRecursive bool if o.Recursive != nil { qrRecursive = *o.Recursive } qRecursive := swag.FormatBool(qrRecursive) if qRecursive != "" { if err := r.SetQueryParam("recursive", qRecursive); err != nil { return err } } } if o.ShowHiddenItems != nil { // query param showHiddenItems var qrShowHiddenItems bool if o.ShowHiddenItems != nil { qrShowHiddenItems = *o.ShowHiddenItems } qShowHiddenItems := swag.FormatBool(qrShowHiddenItems) if qShowHiddenItems != "" { if err := r.SetQueryParam("showHiddenItems", qShowHiddenItems); err != nil { return err } } } if o.SortBy != nil { // query param sortBy var qrSortBy string if o.SortBy != nil { qrSortBy = *o.SortBy } qSortBy := qrSortBy if qSortBy != "" { if err := r.SetQueryParam("sortBy", qSortBy); err != nil { return err } } } if o.Type != nil { // query param type var qrType string if o.Type != nil { qrType = *o.Type } qType := qrType if qType != "" { if err := r.SetQueryParam("type", qType); err != nil { return err } } } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
package main //139. 单词拆分 //给定一个非空字符串 s 和一个包含非空单词列表的字典 wordDict,判定s 是否可以被空格拆分为一个或多个在字典中出现的单词。 // //说明: // //拆分时可以重复使用字典中的单词。 //你可以假设字典中没有重复的单词。 //示例 1: // //输入: s = "leetcode", wordDict = ["leet", "code"] //输出: true //解释: 返回 true 因为 "leetcode" 可以被拆分成 "leet code"。 //示例 2: // //输入: s = "applepenapple", wordDict = ["apple", "pen"] //输出: true //解释: 返回 true 因为 "applepenapple" 可以被拆分成 "apple pen apple"。 // 注意你可以重复使用字典中的单词。 //示例 3: // //输入: s = "catsandog", wordDict = ["cats", "dog", "sand", "and", "cat"] //输出: false func wordBreak(s string, wordDict []string) bool { //"cars" //["car","ca","rs"] dic := make(map[string]int) for _, v := range wordDict { dic[v] = 1 } memo := make(map[string]bool) var check func(str string) bool check = func(str string) bool { n := len(str) if n < 1 { return true } if _, ok := memo[str]; ok { return memo[str] } for i := 1; i <= n; i++ { if dic[str[:i]] > 0 && check(str[i:]) { memo[str] = true return true } } memo[str] = false return false } return check(s) } func main() { println(wordBreak("cars", []string{"car", "ca", "rs"})) }
/* * Copyright 2019 Nalej * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Test utils to avoid external dependencies. package derrors import ( "reflect" "testing" ) // AssertEquals utility function for the tests to avoid external dependencies. func assertEquals(t *testing.T, expected interface{}, current interface{}, message string) { if !reflect.DeepEqual(expected, current) { t.Errorf("%s\nExpected: %s, Current: %s", message, expected, current) } } // AssertTrue utility function for the tests to avoid external dependencies. func assertTrue(t *testing.T, condition bool, message string) { if !condition { t.Error(message) } }
package main import ( "context" "fmt" "time" "github.com/kasworld/h4o/appbase" "github.com/kasworld/h4o/appbase/appwindow" "github.com/kasworld/h4o/camera" "github.com/kasworld/h4o/eventtype" "github.com/kasworld/h4o/gls" "github.com/kasworld/h4o/gui" "github.com/kasworld/h4o/light" "github.com/kasworld/h4o/math32" "github.com/kasworld/h4o/node" "github.com/kasworld/h4o/renderer" "github.com/kasworld/h4o/util/framerater" "github.com/kasworld/h4o/util/helper" ) func main() { NewMtLogic().Run() } const ( BufferSize = 10 ) // multi thead logic type MtLogic struct { doClose func() `prettystring:"hide"` // logic to view channel l2vCh chan interface{} // view to logic channel v2lCh chan interface{} } func NewMtLogic() *MtLogic { rtn := &MtLogic{ l2vCh: make(chan interface{}, BufferSize), v2lCh: make(chan interface{}, BufferSize), } return rtn } func (ls *MtLogic) handleV2LCh() { for fromView := range ls.v2lCh { switch pk := fromView.(type) { default: fmt.Printf("unknown packet %v", pk) ls.doClose() // handle known packet from view } } } func (ls *MtLogic) Run() { ctx, closeCtx := context.WithCancel(context.Background()) ls.doClose = closeCtx defer closeCtx() go func() { // now run single thread view err := NewStView(ls.l2vCh, ls.v2lCh).Run() if err != nil { fmt.Printf("view err %v", err) } ls.doClose() }() go ls.handleV2LCh() timerInfoTk := time.NewTicker(1 * time.Second) defer timerInfoTk.Stop() loop: for { select { case <-ctx.Done(): break loop case <-timerInfoTk.C: if len(ls.v2lCh) >= cap(ls.v2lCh) { fmt.Printf("v2lCh full %v/%v", len(ls.v2lCh), cap(ls.v2lCh)) break loop } if len(ls.l2vCh) >= cap(ls.l2vCh) { fmt.Printf("l2vCh full %v/%v", len(ls.l2vCh), cap(ls.l2vCh)) break loop } } } } // single thread view // runtime.LockOSThread type StView struct { // logic to view channel l2vCh chan interface{} // view to logic channel v2lCh chan interface{} appBase *appbase.AppBase scene *node.Node cam *camera.Camera camZpos float32 pLight *light.Point frameRater *framerater.FrameRater // Render loop frame rater labelFPS *gui.Label // header FPS label } func NewStView(l2vCh chan interface{}, v2lCh chan interface{}) *StView { rtn := &StView{ l2vCh: l2vCh, v2lCh: v2lCh, } return rtn } func (cv *StView) Run() error { if err := cv.glInit(); err != nil { return err } // Set background color to gray cv.appBase.Gls().ClearColor(0.5, 0.5, 0.5, 1.0) cv.appBase.Run(cv.updateGL) return nil } func (cv *StView) updateGL(renderer *renderer.Renderer, deltaTime time.Duration) { // Start measuring this frame cv.frameRater.Start() cv.appBase.Gls().Clear(gls.DEPTH_BUFFER_BIT | gls.STENCIL_BUFFER_BIT | gls.COLOR_BUFFER_BIT) renderer.Render(cv.scene, cv.cam) cv.handle_l2vCh() // Control and update FPS cv.frameRater.Wait() cv.updateFPS() } func (cv *StView) glInit() error { // Create application and scene cv.appBase = appbase.New("h4o clock calendar", 1920, 1080) cv.scene = node.NewNode() // Set the scene to be managed by the gui manager gui.Manager().Set(cv.scene) // Create perspective camera cv.cam = camera.New(1) cv.cam.SetFar(1400) cv.camZpos = 100 cv.cam.SetPosition(0, 0, cv.camZpos) cv.scene.Add(cv.cam) // Set up orbit control for the camera // camera.NewOrbitControl(cv.cam) cv.appBase.Subscribe(eventtype.OnWindowSize, cv.onResize) cv.onResize(eventtype.OnResize, nil) // Create and add lights to the scene cv.scene.Add(light.NewAmbient(&math32.Color{1.0, 1.0, 1.0}, 0.8)) cv.pLight = light.NewPoint(&math32.Color{1, 1, 1}, 5.0) cv.pLight.SetPosition(1, 0, 2) cv.scene.Add(cv.pLight) // Create and add an axis helper to the scene cv.scene.Add(helper.NewAxes(100)) cv.frameRater = framerater.NewFrameRater(60) cv.labelFPS = gui.NewLabel(" ") cv.labelFPS.SetFontSize(20) cv.labelFPS.SetLayoutParams(&gui.HBoxLayoutParams{AlignV: gui.AlignCenter}) lightTextColor := math32.Color4{0.8, 0.8, 0.8, 1} cv.labelFPS.SetColor4(&lightTextColor) cv.scene.Add(cv.labelFPS) gui.Manager().SubscribeID(eventtype.OnMouseUp, cv, cv.onMouse) gui.Manager().SubscribeID(eventtype.OnMouseDown, cv, cv.onMouse) gui.Manager().SubscribeID(eventtype.OnScroll, &cv, cv.onScroll) return nil } func (cv *StView) handle_l2vCh() { for len(cv.l2vCh) > 0 { fromLogic := <-cv.l2vCh switch pk := fromLogic.(type) { default: fmt.Printf("unknown packet %v", pk) // handle known packet from logic } } } // Set up callback to update viewport and camera aspect ratio when the window is resized func (cv *StView) onResize(evname eventtype.EventType, ev interface{}) { // Get framebuffer size and update viewport accordingly width, height := cv.appBase.GetSize() cv.appBase.Gls().Viewport(0, 0, int32(width), int32(height)) // Update the camera's aspect ratio cv.cam.SetAspect(float32(width) / float32(height)) } // UpdateFPS updates the fps value in the window title or header label func (cv *StView) updateFPS() { // Get the FPS and potential FPS from the frameRater fps, pfps, ok := cv.frameRater.FPS(time.Duration(60) * time.Millisecond) if !ok { return } // Show the FPS in the header label cv.labelFPS.SetText(fmt.Sprintf("%3.1f / %3.1f", fps, pfps)) } // onMouse is called when an OnMouseDown/OnMouseUp event is received. func (cv *StView) onMouse(evname eventtype.EventType, ev interface{}) { switch evname { case eventtype.OnMouseDown: // gui.Manager().SetCursorFocus(cv) mev := ev.(*appwindow.MouseEvent) switch mev.Button { case appwindow.MouseButtonLeft: // Rotate case appwindow.MouseButtonMiddle: // Zoom case appwindow.MouseButtonRight: // Pan } case eventtype.OnMouseUp: // gui.Manager().SetCursorFocus(nil) } } // onScroll is called when an OnScroll event is received. func (cv *StView) onScroll(evname eventtype.EventType, ev interface{}) { zF := float32(1.5) sev := ev.(*appwindow.ScrollEvent) if sev.Yoffset > 0 { cv.camZpos *= zF if cv.camZpos > 1000 { cv.camZpos = 1000 } } else if sev.Yoffset < 0 { cv.camZpos /= zF if cv.camZpos < 10 { cv.camZpos = 10 } } }
package main import "fmt" type Cat struct { Color string Name string } type BlackCat struct { Cat // 嵌入Cat, 类似于派生 sex string } // “构造基类” func NewCat(name string) *Cat { fmt.Println("===================================") return &Cat{ Name: name, } } // “构造子类” func NewBlackCat(color string) *BlackCat { cat := &BlackCat{} cat.sex = color return cat } func main1(){ test:=NewBlackCat("red") fmt.Println(test.Color) }
package main import ( "net/http" ) func main() { mux := http.NewServeMux() mux.HandleFunc("/go/lalala", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("ola mundo go lalala\n\n")) }) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("ola mundo\n\n")) }) mux.HandleFunc("/go/", func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("ola mundo go\n\n")) }) http.ListenAndServe("localhost:3000", mux) }
package configs const ( ProjectName = "gsp2md" )
package get import ( "context" "github.com/shiv3/slackube/app/adapter/slacksender" "github.com/shiv3/slackube/app/view/slack/common" "github.com/slack-go/slack/slackevents" "github.com/slack-go/slack" ) func (h GetHandler) GetNs(ctx context.Context, ev *slackevents.AppMentionEvent) error { res, err := h.usecases.GetNameSpace(ctx) if err != nil { return err } var table [][]string for _, re := range res { table = append(table, []string{re.Name, re.Status, re.Age}) } //var list []string //for _, re := range res { // list = append(list, re.Name) //} return slacksender.NewSender(h.slackClient, ev.Channel). PostBlocks([]slack.Block{common.TableBlock([]string{"Name", "Status", "Age"}, table)}) }
package string import ( "fmt" "github.com/project-flogo/core/data" "github.com/project-flogo/core/data/coerce" "github.com/project-flogo/core/data/expression/function" ) func init() { _ = function.Register(&fnEquals{}) } type fnEquals struct { } func (fnEquals) Name() string { return "equals" } func (fnEquals) Sig() (paramTypes []data.Type, isVariadic bool) { return []data.Type{data.TypeString, data.TypeString}, false } func (fnEquals) Eval(params ...interface{}) (interface{}, error) { s1, err := coerce.ToString(params[0]) if err != nil { return nil, fmt.Errorf("string.equals function first parameter [%+v] must be string", params[0]) } s2, err := coerce.ToString(params[1]) if err != nil { return nil, fmt.Errorf("string.quals function second parameter [%+v] must be string", params[1]) } return s1 == s2, nil }
package eventloop import ( "log" "runtime" "sync" ) type InitFunc func(loop *EventLoop) type Worker struct { initFunc InitFunc loop *EventLoop done sync.WaitGroup } func NewWorker(initFunc InitFunc) *Worker { worker := &Worker{ loop: NewEventLoop(), initFunc: initFunc, } worker.done.Add(1) go func() { defer worker.done.Done() worker.worker() }() return worker } func (this *Worker) GetLoop() *EventLoop { return this.loop } func (this *Worker) Join() { this.done.Wait() } func (this *Worker) worker() { defer func() { if err := recover(); err != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Printf("eventloop: panic worker: %v\n%s", err, buf) } }() if initFunc := this.initFunc; initFunc != nil { initFunc(this.loop) } this.loop.Loop() }
package main import ( "encoding/json" "fmt" "io/ioutil" "log" "os" "github.com/codegangsta/cli" ) const ( // ERROR Codes e1000 = "ERROR[1000]\tunable to load config file " e1010 = "ERROR[1010]\tunable to parse config file " ) type ( // Conf is the configuration for the server's dynamic settings Conf struct { LogFile string `json:"log"` Datastore string `json:"datastore"` } ) // Config is the global variable for storing the config var Config Conf func main() { app := cli.NewApp() app.Name = "Training Server" app.Author = "Pasquale D'Agostino" app.Version = "0.1.0" app.Usage = "CLI for interacting with a Go Backend Training Server" app.Flags = []cli.Flag{ cli.StringFlag{ Name: "config", Usage: "path to the json file that configures the server's dynamic settings", EnvVar: "APP_CONFIG", }, } app.Action = func(c *cli.Context) (err error) { // If no flags or args passed via the cli then show help and close if !c.Args().Present() && c.NumFlags() < 1 { cli.ShowAppHelp(c) } return err } app.Commands = []cli.Command{ { Name: "run", Aliases: []string{"r"}, Usage: "configure and run the server", Action: func(c *cli.Context) error { err := setConf(c.GlobalString("config")) return err }, }, { Name: "migrate", Aliases: []string{"m"}, Usage: "performs the functions to migrate the backend tables", Action: func(c *cli.Context) error { err := setConf(c.GlobalString("config")) return err }, }, } if err := app.Run(os.Args); err != nil { log.Fatal(err) } } func setConf(path string) error { conf, err := ioutil.ReadFile(path) if err != nil { return fmt.Errorf(e1000 + path) } if err = json.Unmarshal(conf, &Config); err != nil { return fmt.Errorf(e1010 + path) } return err }
package server import ( "bytes" "encoding/binary" "fmt" "io" "log" "net" "sync" "time" ) func NewKannaTcpConnection(server *KannaServer, ID int, conn *net.TCPConn, handler MsgHandler) *KannaTCPConnection { c := &KannaTCPConnection{ sKannaConnection: &sKannaConnection{ Server: server, ID: ID, isClosed: false, last: time.Now().UnixNano(), msgHandler: handler, ExitBuffChan: make(chan bool, 1), msgChan: make(chan []byte), AllowTelnet: false, Props: &sync.Map{}, }, Conn: conn, } c.Server.AddConn(c) return c } func (c *KannaTCPConnection) GetID() int { return c.ID } func (c *KannaTCPConnection) SetAllowTelnet(to bool) { c.AllowTelnet = to } func (c *KannaTCPConnection) SendMsg(data []byte) error { if c.isClosed { return fmt.Errorf("connection closed") } ///c.Server.SendCount++ if c.Server.NeedSendCount { c.Server.SendCount++ } select { case <-c.ExitBuffChan: return fmt.Errorf("conn is closed") default: c.msgChan <- data } return nil } func (c *KannaTCPConnection) startWriter() { log.Println(c.ID, "Writer running") for { select { case data := <-c.msgChan: if _, err := c.Conn.Write(data); err != nil { log.Println("Send Data Err:", err) return } case <-c.ExitBuffChan: return } } } const PackHeadLen = 4 func (c *KannaTCPConnection) startReader() { log.Println(c.ID, "Reader running") log.Println(c.Conn) defer c.Stop() for { if c.isClosed == true { break } if c.AllowTelnet { data := make([]byte, 128) size, err := c.Conn.Read(data) if err != nil { log.Println("read msg error", err) break } if size > 0 { req := Request{conn: c, msg: data} if c.msgHandler != nil { go c.msgHandler(&req) } } } else { headData := make([]byte, PackHeadLen) if _, err := io.ReadFull(c.Conn, headData); err != nil { fmt.Println("read msg head error ", err) return } dataBuff := bytes.NewReader(headData) var msgLen uint32 if err := binary.Read(dataBuff, binary.BigEndian, &msgLen); err != nil { fmt.Println("read msg data error - binary read", err) return } if msgLen > 1000 { fmt.Println("message too large, auto closing", msgLen) c.Stop() return } if msgLen > 0 && msgLen < 1000 { data := make([]byte, msgLen) if _, err := io.ReadFull(c.Conn, data); err != nil { fmt.Println("read msg data error ", err) return } req := Request{conn: c, msg: data} if c.msgHandler != nil { go c.msgHandler(&req) } } } } } func (c *KannaTCPConnection) Start() { go c.startWriter() go c.startReader() } func (c *KannaTCPConnection) Stop() { if c.isClosed == true { return } log.Println(c.ID, " will quit") c.isClosed = true c.ExitBuffChan <- true c.Conn.Close() if c.Server.OnConnEnd != nil { c.Server.OnConnEnd(c) } c.Server.RemoveConn(c) close(c.ExitBuffChan) } func (c *KannaTCPConnection) SetMsgHandler(h MsgHandler) { c.msgHandler = h } func (c *KannaTCPConnection) GetProps() *sync.Map { return c.Props } func (c *KannaTCPConnection) IsClosed() bool { return c.isClosed }
package stringutil import "github.com/golang/example/stringutil" func Reverse(s string) string { return stringutil.Reverse(s) }
/* * @lc app=leetcode.cn id=769 lang=golang * * [769] 最多能完成排序的块 */ package leetcode // @lc code=start func maxChunksToSorted(arr []int) int { max := func (a int, b int) int { if a > b { return a } else { return b } } res, m := 0, 0 for i, v := range arr { m = max(v, m) if m == i { res++ } } return res } // @lc code=end
package commands import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "os/exec" "path/filepath" "strings" build "github.com/Azure/acr-builder/pkg" "github.com/Azure/acr-builder/pkg/constants" dockerbuild "github.com/docker/cli/cli/command/image/build" "github.com/docker/docker/builder/remotecontext/git" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/urlutil" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // NewDockerSource creates a new passthrough source func NewDockerSource(context, dockerfile string) build.Source { return &DockerSource{ context: context, dockerfile: dockerfile, } } type dockerSourceType int const ( dockerSourceLocal dockerSourceType = iota dockerSourceGit dockerSourceDockerfile dockerSourceArchive ) // DockerSource is a source we can pass directly into docker type DockerSource struct { // context is the literal string representing docker build context. It can be a directory, archive url, git url or "-" context string dockerfile string tracker *DirectoryTracker gitHeadRev string } // Obtain obtains the source func (s *DockerSource) Obtain(runner build.Runner) error { runContext := runner.GetContext() context := runContext.Expand(s.context) dockerfile := runContext.Expand(s.dockerfile) if context == constants.FromStdin && dockerfile == constants.FromStdin { return fmt.Errorf("invalid argument: can't use stdin for both build context and dockerfile") } sourceType, workingDir, err := s.ensureContext(runner, context, dockerfile) if err != nil { return err } if workingDir != "" { s.tracker, err = ChdirWithTracking(runner, workingDir) if err != nil { return err } } if sourceType == dockerSourceGit { sha, queryErr := runner.QueryCmd("git", []string{"rev-parse", "--verify", "HEAD"}) if queryErr != nil { logrus.Errorf("Error querying for git head rev: %s, output will not contain git head revision SHA", queryErr) } else { s.gitHeadRev = sha } } return nil } // Return returns the source func (s *DockerSource) Return(runner build.Runner) error { if s.tracker != nil { return s.tracker.Return(runner) } return nil } // Export exports the source func (s *DockerSource) Export() []build.EnvVar { return []build.EnvVar{ {Name: constants.ExportsDockerBuildContext, Value: s.context}, } } // Remark makes a remark to the dependencies func (s *DockerSource) Remark(runner build.Runner, dependencies *build.ImageDependencies) { if len(s.gitHeadRev) > 0 { dependencies.Git = &build.GitReference{ GitHeadRev: s.gitHeadRev, } } } // see docker cli image.runbuild func (s *DockerSource) ensureContext(runner build.Runner, context, dockerfile string) (sourceType dockerSourceType, workingDir string, err error) { if context == "" { sourceType = dockerSourceLocal return } else if context == constants.FromStdin { return s.ensureContextFromReader(runner, runner.GetStdin(), workingDir, dockerfile) } else if urlutil.IsGitURL(context) || isVstsGitURL(context) { workingDir, err = s.ensureContextFromGitURL(context) sourceType = dockerSourceGit return } else if urlutil.IsURL(context) { return s.ensureContextFromURL(runner, os.Stdout, workingDir, context, dockerfile) } var isDir bool isDir, err = runner.GetFileSystem().DoesDirExist(context) if err != nil { err = errors.Wrapf(err, "Failed to look up context from path %s, note that the context path must be a directory. To use archive as a source, please pipe it in with stdin", context) return } if isDir { sourceType = dockerSourceLocal workingDir = context return } err = fmt.Errorf("Unable to determine context type for context \"%s\". Dependency scanning will NOT work as expected", context) return } const archiveHeaderSize = 512 // see dockerbuild.GetContextFromReader func (s *DockerSource) ensureContextFromReader(runner build.Runner, r io.Reader, workingDir, dockerfile string) (sourceType dockerSourceType, tempDir string, err error) { buf := bufio.NewReader(r) var magic []byte magic, err = buf.Peek(archiveHeaderSize) if err != nil && err != io.EOF { err = fmt.Errorf("failed to peek context header from STDIN: %v", err) return } var fs = runner.GetFileSystem() tempDir, err = fs.CreateTempDir() if err != nil { return } if dockerbuild.IsArchive(magic) { sourceType = dockerSourceArchive err = archive.Untar(buf, tempDir, nil) return } sourceType = dockerSourceDockerfile if dockerfile == "" { dockerfile = dockerbuild.DefaultDockerfileName } else if dockerfile == "-" { dockerfile = dockerbuild.DefaultDockerfileName // Following the same undesirable behavior from docker cli in the special case "echo $dockerfile | docker build -f - $docker_file_url" logrus.Errorf("Warning: Dockerfile from context stream would be overwritten by stdin") } err = fs.WriteFile(filepath.Join(tempDir, dockerfile), buf) return } // see dockerbuild.GetContextFromGitURL func (s *DockerSource) ensureContextFromGitURL(gitURL string) (string, error) { if _, err := exec.LookPath("git"); err != nil { return "", errors.Wrapf(err, "unable to find 'git'") } checkoutRoot, err := git.Clone(gitURL) if err != nil { return "", errors.Wrapf(err, "unable to 'git clone' to temporary context directory") } return checkoutRoot, err } // see dockerbuild.GetContextFromGitURL func (s *DockerSource) ensureContextFromURL(runner build.Runner, out io.Writer, workingDir, remoteURL, dockerfile string) (dockerSourceType, string, error) { response, err := s.getWithStatusError(remoteURL) if err != nil { return 0, "", errors.Errorf("unable to download remote context %s: %v", remoteURL, err) } progressOutput := streamformatter.NewProgressOutput(out) progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) defer func(response *http.Response) { err := response.Body.Close() if err != nil { logrus.Errorf("Failed to close http response from url: %s, error: %s", remoteURL, err) } }(response) return s.ensureContextFromReader(runner, progReader, workingDir, dockerfile) } func (s *DockerSource) getWithStatusError(url string) (resp *http.Response, err error) { if resp, err = http.Get(url); err != nil { return nil, err } // Anything under 400 are considered non-error and we will try to parse the body if resp.StatusCode < 400 { return resp, nil } msg := fmt.Sprintf("failed to GET %s with status %s", url, resp.Status) body, err := ioutil.ReadAll(resp.Body) defer func(resp *http.Response) { if err := resp.Body.Close(); err != nil { logrus.Errorf("Error closing response body: %s", err) } }(resp) if err != nil { return nil, errors.Wrapf(err, msg+": error reading body") } return nil, fmt.Errorf(msg+": %s", bytes.TrimSpace(body)) } func isVstsGitURL(s string) bool { url, err := url.Parse(strings.ToLower(s)) if err != nil { return false } return url.Scheme == "https" && strings.HasSuffix(url.Host, ".visualstudio.com") && strings.Contains(url.Path, "/_git/") && len(url.Query()) == 0 }
/* Copyright 2019 The Ceph-CSI Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cephfs import ( "testing" ) func init() { } func TestKernelSupportsQuota(t *testing.T) { supportsQuota := []string{ "4.17.0", "5.0.0", "4.17.0-rc1", "4.18.0-80.el8", "3.10.0-1062.el7.x86_64", // 1st backport "3.10.0-1062.4.1.el7.x86_64", // updated backport } noQuota := []string{ "2.6.32-754.15.3.el6.x86_64", // too old "3.10.0-123.el7.x86_64", // too old for backport "3.10.0-1062.4.1.el8.x86_64", // nonexisting RHEL-8 kernel "3.11.0-123.el7.x86_64", // nonexisting RHEL-7 kernel } for _, kernel := range supportsQuota { ok := kernelSupportsQuota(kernel) if !ok { t.Errorf("support expected for %s", kernel) } } for _, kernel := range noQuota { ok := kernelSupportsQuota(kernel) if ok { t.Errorf("no support expected for %s", kernel) } } }
package importer import ( "encoding/json" "errors" "github.com/rs/zerolog/log" "io/ioutil" "magicTGArchive/internal/pkg/mongodb" "net/http" "strconv" ) /* RequestAllCards receives a response with type *http.Response from the mtg api containing 100 cards. Returning the response and an error */ func RequestAllCards(page int) (mongodb.MultipleCards, error) { var response mongodb.MultipleCards var resp *http.Response var err error var body []byte //GET request to URL with page param if resp, err = http.Get("https://api.magicthegathering.io/v1/cards?page="+strconv.Itoa(page)); err != nil{ log.Error().Timestamp().Err(err).Msg("Error: problem with http GET request\n") return response, err } log.Info().Timestamp().Msgf("HTTP GET REQUEST TO https://api.magicthegathering.io/v1/cards?page=\n",page) defer func() { if err = resp.Body.Close(); err != nil { log.Fatal().Timestamp().Err(err).Msg("Fatal: couldn't close response body\n") } }() //checks if there is an http status code other than 200 in the response if resp.StatusCode != 200{ err = errors.New("http statuscode != 200") log.Error().Timestamp().Err(err).Msgf("Error: Http status code:\n", resp.StatusCode) return response, err } //reads response body into []byte if body, err = ioutil.ReadAll(resp.Body); err != nil { log.Error().Timestamp().Err(err).Msg("Error: couldn't read from response body\n") return response, err } //parses response body []byte values into response if err = json.Unmarshal(body, &response); err != nil { log.Error().Timestamp().Err(err).Msg("Error: couldn't unmarshal body into MTGDevAPIResponse struct\n") return response, err } return response, err }
package leetcode import ( "testing" "github.com/go-playground/assert/v2" ) func TestLongestCommonPrefix(t *testing.T) { data := []map[string]interface{}{ { "test": []string{"flower", "flow", "flight"}, "except": "fl", }, { "test": []string{"summer", "summ", "summmmmmmmmmm"}, "except": "summ", }, } for _, v := range data { s, _ := v["test"].([]string) prefix := longestCommonPrefix(s) assert.Equal(t, prefix, v["except"]) } }
/* Copyright 2023 Gravitational, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package web import ( "context" "net/http" "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" "github.com/gravitational/teleport/api/types" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/integrations/awsoidc" "github.com/gravitational/teleport/lib/reversetunnel" "github.com/gravitational/teleport/lib/web/ui" ) // awsOIDCListDatabases returns a list of databases using the ListDatabases action of the AWS OIDC Integration. func (h *Handler) awsOIDCListDatabases(w http.ResponseWriter, r *http.Request, p httprouter.Params, sctx *SessionContext, site reversetunnel.RemoteSite) (interface{}, error) { ctx := r.Context() var req ui.AWSOIDCListDatabasesRequest if err := httplib.ReadJSON(r, &req); err != nil { return nil, trace.Wrap(err) } awsClientReq, err := h.awsOIDCClientRequest(r.Context(), req.Region, p, sctx, site) if err != nil { return nil, trace.Wrap(err) } listDBsClient, err := awsoidc.NewListDatabasesClient(ctx, awsClientReq) if err != nil { return nil, trace.Wrap(err) } resp, err := awsoidc.ListDatabases(ctx, listDBsClient, awsoidc.ListDatabasesRequest{ Region: req.Region, NextToken: req.NextToken, Engines: req.Engines, RDSType: req.RDSType, }, ) if err != nil { return nil, trace.Wrap(err) } return ui.AWSOIDCListDatabasesResponse{ NextToken: resp.NextToken, Databases: ui.MakeDatabases(resp.Databases, nil, nil), }, nil } // awsOIDClientRequest receives a request to execute an action for the AWS OIDC integrations. func (h *Handler) awsOIDCClientRequest(ctx context.Context, region string, p httprouter.Params, sctx *SessionContext, site reversetunnel.RemoteSite) (*awsoidc.AWSClientRequest, error) { integrationName := p.ByName("name") if integrationName == "" { return nil, trace.BadParameter("an integration name is required") } clt, err := sctx.GetUserClient(ctx, site) if err != nil { return nil, trace.Wrap(err) } integration, err := clt.GetIntegration(ctx, integrationName) if err != nil { return nil, trace.Wrap(err) } if integration.GetSubKind() != types.IntegrationSubKindAWSOIDC { return nil, trace.BadParameter("integration subkind (%s) mismatch", integration.GetSubKind()) } issuer, err := h.issuerFromPublicAddr() if err != nil { return nil, trace.Wrap(err) } token, err := clt.GenerateAWSOIDCToken(ctx, types.GenerateAWSOIDCTokenRequest{ Issuer: issuer, }) if err != nil { return nil, trace.Wrap(err) } awsoidcSpec := integration.GetAWSOIDCIntegrationSpec() if awsoidcSpec == nil { return nil, trace.BadParameter("missing spec fields for %q (%q) integration", integration.GetName(), integration.GetSubKind()) } return &awsoidc.AWSClientRequest{ Token: token, RoleARN: awsoidcSpec.RoleARN, Region: region, }, nil }
package sort import "testing" func TestFindKBigNum(t *testing.T) { var arr []int var knum int arr = []int{1} knum = findKBigNum(arr, len(arr), 1) t.Logf("arr: %v, knum: %d", arr, knum) arr = []int{2, 1} knum = findKBigNum(arr, len(arr), 2) t.Logf("arr: %v, knum: %d", arr, knum) arr = []int{3, 1, 2} knum = findKBigNum(arr, len(arr), 1) t.Logf("arr: %v, knum: %d", arr, knum) arr = []int{3, 2, 1} knum = findKBigNum(arr, len(arr), 2) t.Logf("arr: %v, knum: %d", arr, knum) arr = []int{3, 2, 1, 4, 7, 5, 6} knum = findKBigNum(arr, len(arr), 5) t.Logf("arr: %v, knum: %d", arr, knum) arr = []int{5, 2, 4, 1, 2, 4, 7, 5, 6} knum = findKBigNum(arr, len(arr), 8) t.Logf("arr: %v, knum: %d", arr, knum) }
//go:build !windows // +build !windows /* Copyright 2019 The Skaffold Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package gcp import ( "fmt" "testing" "github.com/docker/cli/cli/config/configfile" "github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util" "github.com/GoogleContainerTools/skaffold/v2/testutil" ) func TestAutoConfigureGCRCredentialHelper(t *testing.T) { tests := []struct { description string helperInPath bool config *configfile.ConfigFile expected *configfile.ConfigFile }{ { description: "add to nil map", helperInPath: true, config: &configfile.ConfigFile{}, expected: &configfile.ConfigFile{ CredentialHelpers: map[string]string{ "gcr.io": "gcloud", "us.gcr.io": "gcloud", "eu.gcr.io": "gcloud", "asia.gcr.io": "gcloud", "staging-k8s.gcr.io": "gcloud", "marketplace.gcr.io": "gcloud", }, }, }, { description: "add to empty map", helperInPath: true, config: &configfile.ConfigFile{ CredentialHelpers: map[string]string{}, }, expected: &configfile.ConfigFile{ CredentialHelpers: map[string]string{ "gcr.io": "gcloud", "us.gcr.io": "gcloud", "eu.gcr.io": "gcloud", "asia.gcr.io": "gcloud", "staging-k8s.gcr.io": "gcloud", "marketplace.gcr.io": "gcloud", }, }, }, { description: "leave existing helper", config: &configfile.ConfigFile{ CredentialHelpers: map[string]string{ "gcr.io": "existing", "us.gcr.io": "existing", "eu.gcr.io": "existing", "asia.gcr.io": "existing", "staging-k8s.gcr.io": "existing", "marketplace.gcr.io": "existing", }, }, expected: &configfile.ConfigFile{ CredentialHelpers: map[string]string{ "gcr.io": "existing", "us.gcr.io": "existing", "eu.gcr.io": "existing", "asia.gcr.io": "existing", "staging-k8s.gcr.io": "existing", "marketplace.gcr.io": "existing", }, }, }, { description: "ignore if gcloud is not in PATH", helperInPath: false, config: &configfile.ConfigFile{}, expected: &configfile.ConfigFile{}, }, } for _, test := range tests { testutil.Run(t, test.description, func(t *testutil.T) { tmpDir := t.NewTempDir() t.Setenv("PATH", tmpDir.Root()) if test.helperInPath { tmpDir.Write("docker-credential-gcloud", "") } AutoConfigureGCRCredentialHelper(test.config) t.CheckDeepEqual(test.expected, test.config) }) } } func TestActiveUserCredentials(t *testing.T) { output := `{ "access_token": "access_token_value", "id_token": "id_token_value", "token_expiry": "2023-03-23T23:10:40Z" }` tests := []struct { name string mockCommand util.Command shouldErr bool expected string }{ { name: "get credential succeed", mockCommand: testutil.CmdRunWithOutput("gcloud auth print-identity-token --format=json", output). AndRunWithOutput("gcloud auth print-identity-token --format=json", output), expected: "access_token_value", }, { name: "command error, get error", mockCommand: testutil.CmdRunErr("gcloud auth print-identity-token --format=json", fmt.Errorf("command exited with non-zero code")), shouldErr: true, }, { name: "invalid json, get error", mockCommand: testutil.CmdRunWithOutput("gcloud auth print-identity-token --format=json", "{{{"), shouldErr: true, }, } for _, test := range tests { testutil.Run(t, test.name, func(t *testutil.T) { t.Override(&util.DefaultExecCommand, test.mockCommand) out, err := activeUserCredentials() t.CheckError(test.shouldErr, err) if err == nil { token, _ := out.TokenSource.Token() t.CheckDeepEqual(test.expected, token.AccessToken) } }) } }
package handle import ( . "base" ) type C10000Up struct { SID string //String, } func f10000Up(c uint16, p *Pack, u *Player) []byte { s := new(C10000Up) s.SID = p.ReadString() res := new(C10000Down) //业务逻辑: AddPlayer(s.SID, u) res.Flag = 1 return res.ToBytes() }
package main import ( filepc "protos" "context" "fmt" "google.golang.org/grpc" "net/http" "os" "strings" ) var ( serverAddr = "127.0.0.1:8082" ) func parseRequest(r http.Request, prefix string) map[string]string { request := r.URL.Path[len(prefix):] tokens := strings.Split(request, "&") result := make(map[string]string) for _, token := range tokens { kv := strings.Split(token, "=") result[kv[0]] = kv[1] } return result } func viewHandler(w http.ResponseWriter, r *http.Request) { headers := parseRequest(*r, "/view/") filerequest := filepc.FileRequest{ Filename: headers["filename"], Extension: headers["extension"], } conn, _ := grpc.Dial(serverAddr) client := filepc.NewExplorerClient(conn) response, _ := client.GetFile(context.Background(), &filerequest) fmt.Fprintf(w, "<div>%s</div>", response.Content) } func main() { http.HandleFunc("/view", viewHandler) http.ListenAndServe(":" + os.Args[1], nil) }
package blade import ( "bytes" "sort" ) const defaultMaxCacheSize = 8192 type radixChild struct { c byte node *RadixNode } // radixValue is used to avoid the type radixValue struct { k []byte v interface{} } // RadixNode is the note in radix tree. type RadixNode struct { prefix []byte childs []*radixChild v *radixValue } // IsLeaf returns true if the node is leaf. func (n *RadixNode) IsLeaf() bool { return n.v != nil } // Clone returns a deep copy of the current node. func (n *RadixNode) Clone() *RadixNode { rn := &RadixNode{v: n.v} // copy prefix rn.prefix = make([]byte, len(n.prefix)) copy(rn.prefix, n.prefix) // copy childs rn.childs = make([]*radixChild, len(n.childs)) copy(rn.childs, n.childs) return rn } // Get returns the value associated with the given key // if the key is exist, otherwise false. func (n *RadixNode) Get(k []byte) (v interface{}, ok bool) { if len(k) == 0 { if n.IsLeaf() { v, ok = n.v.v, true } return } child, idx := n.getChild(k[0]) if idx == -1 { return } if bytes.HasPrefix(k, child.prefix) { prefix := k[len(child.prefix):] return child.Get(prefix) } return } func (n *RadixNode) getChild(c byte) (*RadixNode, int) { l := len(n.childs) i := sort.Search(l, func(i int) bool { return n.childs[i].c >= c }) if i < l { child := n.childs[i] if child.c == c { return child.node, i } } return nil, -1 } func (n *RadixNode) addChild(child *radixChild) { l := len(n.childs) i := sort.Search(l, func(i int) bool { return n.childs[i].c >= child.c }) n.childs = append(n.childs, child) if i < l { copy(n.childs[i+1:], n.childs[i:l]) n.childs[i] = child } } func (n *RadixNode) delChild(c byte) { l := len(n.childs) i := sort.Search(l, func(i int) bool { return n.childs[i].c >= c }) if i < l { child := n.childs[i] if child.c == c { copy(n.childs[i:], n.childs[i+1:]) n.childs[l-1] = nil n.childs = n.childs[:l-1] } } } func (n *RadixNode) scale() { if len(n.childs) != 1 || n.IsLeaf() { return } child := n.childs[0].node n.prefix = append(n.prefix, child.prefix...) n.v = child.v if len(child.childs) > 0 { n.childs = make([]*radixChild, len(child.childs)) copy(n.childs, child.childs) } else { n.childs = nil } } func (n *RadixNode) count() int { count := 0 if n.IsLeaf() { count++ } for _, child := range n.childs { count += child.node.count() } return count } func visit(node *RadixNode, fn func(k []byte, v interface{}) bool) bool { if node != nil && node.v != nil { if fn(node.v.k, node.v.v) { return true } } for _, child := range node.childs { if visit(child.node, fn) { return true } } return false } // Walk is used to walk the tree. Stopped when the fn returns true. func (n *RadixNode) Walk(fn func(k []byte, v interface{}) bool) { visit(n, fn) } // WalkPrefix is used to walk the tree underlying the prefix. // Stopped when the fn returns true. func (n *RadixNode) WalkPrefix(prefix []byte, fn func(k []byte, v interface{}) bool) { k := prefix for { if len(k) == 0 { n.Walk(fn) return } n, _ = n.getChild(k[0]) if n == nil { break } if bytes.HasPrefix(k, n.prefix) { k = k[len(n.prefix):] } else if bytes.HasPrefix(n.prefix, k) { n.Walk(fn) return } else { break } } } // WalkPath is used to walk the tree, but only visiting nodes // from the root down to a given leaf. Stopped when the fn returns true. func (n *RadixNode) WalkPath(path []byte, fn func(k []byte, v interface{}) bool) { k := path for { if n.IsLeaf() && fn(n.v.k, n.v.v) { return } if len(k) == 0 { return } n, _ = n.getChild(k[0]) if n == nil { break } if bytes.HasPrefix(k, n.prefix) { k = k[len(n.prefix):] } else { break } } } // RadixTree implements an immutable radix tree // and is immutable. type RadixTree struct { root *RadixNode size int } // RadixTXN is a transaction of the RadixDB. type RadixTXN struct { tree *RadixTree root *RadixNode size int cache *LRU hits int allocs int } // NewRadixTree creates a new RadixTree. func NewRadixTree() *RadixTree { return &RadixTree{ root: &RadixNode{}, } } // OpenTXN returns a new transaction for inserting or deleting // the tree node. func (t *RadixTree) OpenTXN() *RadixTXN { return &RadixTXN{ tree: t, root: t.root, size: t.size, } } // Root returns the root of this radix tree. func (t *RadixTree) Root() *RadixNode { return t.root } // Get returns the value associated with the given key // if the key is exist, otherwise false. func (t *RadixTree) Get(k []byte) (interface{}, bool) { return t.root.Get(k) } // Size returns the size of the tree. func (t *RadixTree) Size() int { return t.size } // Walk calls node.Walk. func (t *RadixTree) Walk(fn func(k []byte, v interface{}) bool) { t.root.Walk(fn) } // WalkPrefix calls node.WalkPrefix. func (t *RadixTree) WalkPrefix(prefix []byte, fn func(k []byte, v interface{}) bool) { t.root.WalkPrefix(prefix, fn) } // WalkPath calls node.WalkPath. func (t *RadixTree) WalkPath(path []byte, fn func(k []byte, v interface{}) bool) { t.root.WalkPath(path, fn) } // MemOptimized will create a cache for reusing the node's clone. func (t *RadixTXN) MemOptimized() { if t.cache == nil { t.cache = NewLRU(defaultMaxCacheSize, nil) } } // Get returns the value associated with the given key // if the key is exist, otherwise false. func (t *RadixTXN) Get(k []byte) (interface{}, bool) { return t.root.Get(k) } // Insert inserts the k/v pair into the tree and returns the old // value associated with the given key if it exists. func (t *RadixTXN) Insert(k []byte, v interface{}) (old interface{}) { rn, rp := t.insert(t.root, k, &radixValue{k, v}) if rn != nil { t.root = rn } if rp != nil { old = rp.v } else { t.size++ } return } // Delete deletes the k/v pair from the tree. func (t *RadixTXN) Delete(k []byte) (old interface{}) { rn, rp := t.delete(t.root, k) if rn != nil { t.root = rn } if rp != nil { old = rp.v t.size-- } return } // DeletePrefix deletes the k/v pair whose key is started // with the given prefix. func (t *RadixTXN) DeletePrefix(prefix []byte) int { rn, count := t.deletePrefix(t.root, prefix) if rn != nil { t.root = rn } if count > 0 { t.size -= count } return count } // Commit finalizes the transaction and returns a new tree. func (t *RadixTXN) Commit() *RadixTree { tree := &RadixTree{ root: t.root, size: t.size, } // clear cache t.cache = nil return tree } // Rollback finalizes the transaction and returns the old tree. func (t *RadixTXN) Rollback() *RadixTree { // clear cache t.cache = nil return t.tree } func (t *RadixTXN) insert(n *RadixNode, prefix []byte, v *radixValue) (rn *RadixNode, old *radixValue) { // It indicates the current node is the node // looking for. If the k is empty, the node is root. if len(prefix) == 0 { if n.IsLeaf() { old = n.v } rn = n.Clone() rn.v = v return } childNode, idx := n.getChild(prefix[0]) if idx == -1 { // Did not find the specified character, // directly add a child node rn = t.copyNode(n) rn.addChild(&radixChild{ c: prefix[0], node: &RadixNode{ prefix: prefix, v: v, }, }) return } lcp := longestCommonPrefix(prefix, childNode.prefix) if lcp == len(childNode.prefix) { // It indicates the current node is // the node looking for, insert recursively. rn, old = t.insert(childNode, prefix[lcp:], v) if rn != nil { // It is necessary to copy the current node // because the childNode has already changed. tmp := t.copyNode(n) tmp.childs[idx].node = rn rn = tmp } return } // The current child node and 'prefix' have the common prefix, // so we need to create a new child node with the common prefix // ('prefix[:lcp]') to replace the current child node. rn = t.copyNode(n) newChildNode := &RadixNode{ prefix: prefix[:lcp], } rn.childs[idx] = &radixChild{ c: prefix[0], node: newChildNode, } // split the old child node oldChild := t.copyNode(childNode) newChildNode.addChild(&radixChild{ c: oldChild.prefix[lcp], node: oldChild, }) oldChild.prefix = oldChild.prefix[lcp:] if lcp == len(prefix) { newChildNode.v = v } else { newChildNode.addChild(&radixChild{ c: prefix[lcp], node: &RadixNode{ prefix: prefix[lcp:], v: v, }, }) } return } func (t *RadixTXN) delete(n *RadixNode, prefix []byte) (rn *RadixNode, old *radixValue) { if len(prefix) == 0 { if !n.IsLeaf() { return } old = n.v // must before copy node because of cache rn = t.copyNode(n) rn.v = nil // delete if n != t.root { rn.scale() } return } child, idx := n.getChild(prefix[0]) if idx == -1 || !bytes.HasPrefix(prefix, child.prefix) { return } rn, old = t.delete(child, prefix[len(child.prefix):]) if rn != nil { newChild := rn rn = t.copyNode(n) if newChild.v == nil && len(newChild.childs) == 0 { rn.delChild(prefix[0]) if n != t.root { rn.scale() } } else { rn.childs[idx].node = newChild } } return } func (t *RadixTXN) deletePrefix(n *RadixNode, prefix []byte) (rn *RadixNode, count int) { if len(prefix) == 0 { rn = t.copyNode(n) rn.v = nil rn.childs = nil count = n.count() return } child, idx := n.getChild(prefix[0]) if idx == -1 { return } if bytes.HasPrefix(prefix, child.prefix) { newChild, c := t.deletePrefix(child, prefix[len(child.prefix):]) if newChild != nil { rn = t.copyNode(n) if rn.v == nil && len(rn.childs) == 0 { rn.delChild(prefix[0]) if n != t.root { rn.scale() } } else { rn.childs[idx].node = newChild } count += c } return } if bytes.HasPrefix(child.prefix, prefix) { newChild, c := t.deletePrefix(child, []byte("")) if newChild != nil { rn = t.copyNode(n) if rn.v == nil && len(rn.childs) == 0 { rn.delChild(prefix[0]) if n != t.root { rn.scale() } } else { rn.childs[idx].node = newChild } count += c } return } return } func (t *RadixTXN) copyNode(n *RadixNode) (rn *RadixNode) { // clone the current node if t.cache != nil { if t.cache.Exist(n) { rn = n t.hits++ } else { rn = n.Clone() t.allocs++ t.cache.Add(rn, struct{}{}) } } else { rn = n.Clone() t.allocs++ } return } func longestCommonPrefix(a, b []byte) int { max := len(a) if l := len(b); l < max { max = l } var i int for i = 0; i < max; i++ { if a[i] != b[i] { break } } return i }
package Monotone_Increasing_Digits import "testing" func Test_monotoneIncreasingDigits(t *testing.T) { type args struct { N int } tests := []struct { name string args args want int }{ // TODO: Add test cases. { "case", args{ 114191537, }, 113999999, }, { "case", args{ 4321, }, 3999, }, { "case", args{ 13332, }, 12999, }, { "case", args{ 4523, }, 4499, }, { "case", args{ 10, }, 9, }, { "case", args{ 4586, }, 4579, }, { "case", args{ 1234, }, 1234, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := monotoneIncreasingDigits(tt.args.N); got != tt.want { t.Errorf("monotoneIncreasingDigits() = %v, want %v", got, tt.want) } }) } }
package Controller import ( "1/Model" "github.com/gin-gonic/gin" ) //点赞 //func Like(c *gin.Context) { // vid := c.Query("vid") // // uid, err := c.Request.Cookie("uid");if err!=nil{ // c.JSON(400,err) // return // } // // if !Model.Like(uid.Value,vid){ // c.JSON(400,"已点赞") // } //} //投币 func Coin(c *gin.Context) { vid := c.Query("vid") to_uid := c.Query("uid") uid, err := c.Request.Cookie("uid");if err!=nil{ c.JSON(400,err) return } Model.Coin(uid.Value,vid,to_uid) } //点赞 //func Collection(c *gin.Context) { // vid := c.Query("vid") // // uid, err := c.Request.Cookie("uid");if err!=nil{ // c.JSON(400,err) // return // } // // if !Model.Collection(uid.Value,vid){ // c.JSON(400,"已收藏") // } //}
package iteration import "strings" func Repeat(character string, count int) string { return strings.Repeat(character, count) }
// SPDX-License-Identifier: ISC // Copyright (c) 2014-2020 Bitmark Inc. // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package storage import ( "encoding/binary" "fmt" "reflect" "sync" "github.com/syndtr/goleveldb/leveldb" ldb_opt "github.com/syndtr/goleveldb/leveldb/opt" "github.com/bitmark-inc/bitmarkd/fault" "github.com/bitmark-inc/logger" ) // exported storage pools // // note all must be exported (i.e. initial capital) or initialisation will panic type pools struct { Blocks Handle `prefix:"B" pool:"PoolHandle"` BlockHeaderHash Handle `prefix:"2" pool:"PoolHandle"` BlockOwnerPayment Handle `prefix:"H" pool:"PoolHandle"` BlockOwnerTxIndex Handle `prefix:"I" pool:"PoolHandle"` Assets Handle `prefix:"A" pool:"PoolNB"` Transactions Handle `prefix:"T" pool:"PoolNB"` OwnerNextCount Handle `prefix:"N" pool:"PoolHandle"` OwnerList Handle `prefix:"L" pool:"PoolHandle"` OwnerTxIndex Handle `prefix:"D" pool:"PoolHandle"` OwnerData Handle `prefix:"O" pool:"PoolHandle"` Shares Handle `prefix:"F" pool:"PoolHandle"` ShareQuantity Handle `prefix:"Q" pool:"PoolHandle"` TestData Handle `prefix:"Z" pool:"PoolHandle"` } // Pool - the set of exported pools var Pool pools // for database version var ( versionKey = []byte{0x00, 'V', 'E', 'R', 'S', 'I', 'O', 'N'} needMigration = false ) const ( currentBitmarksDBVersion = 0x1 bitmarksDBName = "bitmarks" ) // holds the database handle var poolData struct { sync.RWMutex bitmarksDB *leveldb.DB trx Transaction bitmarksBatch *leveldb.Batch cache Cache } var PaymentStorage struct { Btc P2PStorage Ltc P2PStorage } // pool access modes const ( ReadOnly = true ReadWrite = false ) // Initialise - open up the database connection // // this must be called before any pool is accessed func Initialise(dbPrefix string, readOnly bool) error { poolData.Lock() defer poolData.Unlock() ok := false if nil != poolData.bitmarksDB { return fault.AlreadyInitialised } defer func() { if !ok { dbClose() } }() bitmarksDBVersion, err := openBitmarkdDB(dbPrefix, readOnly) if err != nil { return err } err = validateBitmarksDBVersion(bitmarksDBVersion, readOnly) if err != nil { return err } err = setupBitmarksDB() if err != nil { return err } // payment dbPrefix btcDatabase := dbPrefix + "-btc.leveldb" ltcDatabase := dbPrefix + "-ltc.leveldb" db, _, err := getDB(btcDatabase, readOnly) if nil != err { return err } PaymentStorage.Btc = NewLevelDBPaymentStore(db) db, _, err = getDB(ltcDatabase, readOnly) if nil != err { return err } PaymentStorage.Ltc = NewLevelDBPaymentStore(db) ok = true // prevent db close return nil } func setupBitmarksDB() error { bitmarksDBAccess := setupBitmarksDBTransaction() err := setupPools(bitmarksDBAccess) if err != nil { return err } return nil } func setupBitmarksDBTransaction() Access { poolData.bitmarksBatch = new(leveldb.Batch) poolData.cache = newCache() bitmarksDBAccess := newDA(poolData.bitmarksDB, poolData.bitmarksBatch, poolData.cache) poolData.trx = newTransaction([]Access{bitmarksDBAccess}) return bitmarksDBAccess } func setupPools(bitmarksDBAccess Access) error { // this will be a struct type poolType := reflect.TypeOf(Pool) // get write access by using pointer + Elem() poolValue := reflect.ValueOf(&Pool).Elem() // scan each field for i := 0; i < poolType.NumField(); i += 1 { fieldInfo := poolType.Field(i) prefixTag := fieldInfo.Tag.Get("prefix") poolTag := fieldInfo.Tag.Get("pool") if 1 != len(prefixTag) || 0 == len(poolTag) { return fmt.Errorf("pool: %v has invalid prefix: %q, poolTag: %s", fieldInfo, prefixTag, poolTag) } prefix := prefixTag[0] limit := []byte(nil) if prefix < 255 { limit = []byte{prefix + 1} } p := &PoolHandle{ prefix: prefix, limit: limit, dataAccess: bitmarksDBAccess, } if poolTag == "PoolNB" { pNB := &PoolNB{ pool: p, } newNB := reflect.ValueOf(pNB) poolValue.Field(i).Set(newNB) } else { newPool := reflect.ValueOf(p) poolValue.Field(i).Set(newPool) } } return nil } func openBitmarkdDB(dbPrefix string, readOnly bool) (int, error) { name := fmt.Sprintf("%s-%s.leveldb", dbPrefix, bitmarksDBName) db, version, err := getDB(name, readOnly) if nil != err { return 0, err } poolData.bitmarksDB = db return version, err } func validateBitmarksDBVersion(bitmarksDBVersion int, readOnly bool) error { // ensure no database downgrade if bitmarksDBVersion > currentBitmarksDBVersion { msg := fmt.Sprintf("bitmarksDB database version: %d > current version: %d", bitmarksDBVersion, currentBitmarksDBVersion) logger.Critical(msg) return nil } // prevent readOnly from modifying the database if readOnly && bitmarksDBVersion != currentBitmarksDBVersion { msg := fmt.Sprintf("database inconsistent: bitmarksDB: %d current: %d ", bitmarksDBVersion, currentBitmarksDBVersion) logger.Critical(msg) return nil } if 0 < bitmarksDBVersion && bitmarksDBVersion < currentBitmarksDBVersion { needMigration = true } else if 0 == bitmarksDBVersion { // database was empty so tag as current version err := putVersion(poolData.bitmarksDB, currentBitmarksDBVersion) if err != nil { return nil } } return nil } func dbClose() { if nil != poolData.bitmarksDB { if err := poolData.bitmarksDB.Close(); nil != err { logger.Criticalf("close bitmarkd db with error: %s", err) } poolData.bitmarksDB = nil } if nil != PaymentStorage.Btc { if err := PaymentStorage.Btc.Close(); nil != err { logger.Criticalf("close btc db with error: %s", err) } PaymentStorage.Btc = nil } if nil != PaymentStorage.Ltc { if err := PaymentStorage.Ltc.Close(); nil != err { logger.Criticalf("close btc db with error: %s", err) } PaymentStorage.Ltc = nil } } // Finalise - close the database connection func Finalise() { poolData.Lock() dbClose() poolData.Unlock() } // return: // database handle // version number func getDB(name string, readOnly bool) (*leveldb.DB, int, error) { opt := &ldb_opt.Options{ ErrorIfExist: false, ErrorIfMissing: readOnly, ReadOnly: readOnly, } db, err := leveldb.OpenFile(name, opt) if nil != err { return nil, 0, err } versionValue, err := db.Get(versionKey, nil) if leveldb.ErrNotFound == err { return db, 0, nil } else if nil != err { e := db.Close() if nil != e { logger.Criticalf("close %s database with error: %s", name, e) } return nil, 0, err } if 4 != len(versionValue) { e := db.Close() if nil != e { logger.Criticalf("close %s database with error: %s", name, e) } return nil, 0, fmt.Errorf("incompatible database version length: expected: %d actual: %d", 4, len(versionValue)) } version := int(binary.BigEndian.Uint32(versionValue)) return db, version, nil } func putVersion(db *leveldb.DB, version int) error { currentVersion := make([]byte, 4) binary.BigEndian.PutUint32(currentVersion, uint32(version)) return db.Put(versionKey, currentVersion, nil) } // IsMigrationNeed - check if bitmarks database needs migration func IsMigrationNeed() bool { return needMigration } func NewDBTransaction() (Transaction, error) { err := poolData.trx.Begin() if nil != err { return nil, err } return poolData.trx, nil }
package entry import ( "shared/utility/coordinate" "testing" ) func TestYggdrasilEntry_Reload(t *testing.T) { position1 := *coordinate.NewPosition(1, 1) position2 := *coordinate.NewPosition(1, 1) t.Log(position1 == position2) } func TestYggdrasilEntry_GetPosAreaId(t *testing.T) { areaId, err := CSV.Yggdrasil.GetPosAreaId(*coordinate.NewPosition(-8, -24)) t.Log(err) t.Log(areaId) areaId, err = CSV.Yggdrasil.GetPosAreaId(*coordinate.NewPosition(-113, 58)) t.Log(err) t.Log(areaId) } func TestYggdrasilEntry_GetPosInitHeightType(t *testing.T) { H, T := CSV.Yggdrasil.GetPosInitHeightAndType(*coordinate.NewPosition(-8, -24)) t.Log(H) t.Log(T) } func TestYggdrasilEntry_Check(t *testing.T) { _, posType := CSV.Yggdrasil.GetPosInitHeightType() t.Log(posType[*coordinate.NewPosition(-7, -26)]) } func TestYggdrasilEntry_GetMostCost(t *testing.T) { cost, err := CSV.Yggdrasil.GetMostCost(*coordinate.NewPosition(12, 0)) t.Log(cost) t.Log(err) } func TestYggdrasilEntry_Area(t *testing.T) { area1, _ := CSV.Yggdrasil.GetArea(1) t.Log(area1.Area) t.Log(area1.Area.Count()) area2, _ := CSV.Yggdrasil.GetArea(2) cut := area1.Area.CutArea(area2.Area) minus := area1.Area.MinusArea(area2.Area) t.Log(cut.Count()) t.Log(minus.Count()) } func TestYggdrasilEntry_GetYggBagAllCount(t *testing.T) { count := CSV.Yggdrasil.GetYggBagAllCount(59) t.Log(count) teamCount := CSV.Yggdrasil.GetYggEditTeamCount(59) t.Log(teamCount) } func TestYggdrasilEntry_GetYggInitPos(t *testing.T) { pos := CSV.Yggdrasil.GetYggInitPos() i := &pos i.X = 23 t.Log(CSV.Yggdrasil.GetYggInitPos()) count := CSV.Yggdrasil.GetYggMailMaxCount() count = 1 t.Log(count) t.Log(CSV.Yggdrasil.GetYggMailMaxCount()) } func TestYggdrasilEntry_GetExploreProcessIndex(t *testing.T) { index, err := CSV.Yggdrasil.GetExploreProcessIndex(1, 38) t.Log(index) t.Log(err) area, err := CSV.Yggdrasil.GetArea(1) dropId := area.ExploredProgressDrop[index] t.Log(dropId) } func TestYggdrasilEntry_IsCityEntrance(t *testing.T) { cityId := CSV.Yggdrasil.IsCityEntrance(coordinate.NewPosition(-35, -6)) t.Log(cityId) } func TestYggdrasilEntry_GetArea(t *testing.T) { id, err := CSV.Yggdrasil.GetPosAreaId(*coordinate.NewPosition(11, -26)) t.Log(id) t.Log(err) } func TestYggdrasilEntry_GetClosestSafePos(t *testing.T) { p, err := CSV.Yggdrasil.GetClosestSafePos(*coordinate.NewPosition(11, -26)) t.Log(p) t.Log(err) }
package cmd import ( "fmt" "net/url" "regexp" "github.com/manifoldco/promptui" "github.com/redhat-openshift-ecosystem/openshift-preflight/certification/errors" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) const ( baseURL = "https://connect.redhat.com/support/technology-partner/#/case/new?" typeParam = "type" typeValue = "CERT" sourceParam = "source" sourceValue = "preflight" certProjectTypeParam = "cert_project_type" certProjectIDParam = "cert_project_id" pullRequestURLParam = "pull_request_url" ) var supportCmd = &cobra.Command{ Use: "support", Short: "Submits a support request", Long: `This interactive command will generate a URL; based on user input which can then be used to create a Red Hat Support Ticket. This command can be used when you'd like assistance from Red Hat Support when attempting to pass your certification checks. `, PreRun: preRunConfig, RunE: func(cmd *cobra.Command, args []string) error { certProjectTypeLabel := promptui.Select{ Label: "Select a Certification Project Type", Items: []string{"Container Image", "Operator Bundle Image"}, } _, certProjectTypeValue, err := certProjectTypeLabel.Run() if err != nil { return errors.ErrSupportCmdPromptFailed } log.Debugf("certification project type: %s", certProjectTypeValue) certProjectIDLabel := promptui.Prompt{ Label: "Please Enter Connect Certification Project ID", // validate makes sure that the project id is not blank, does not contain special characters, // and is in the proper format Validate: func(s string) error { if s == "" { return errors.ErrEmptyProjectID } isLegacy, _ := regexp.MatchString(`^p.*`, s) if isLegacy { return errors.ErrRemovePFromProjectID } isOSPID, _ := regexp.MatchString(`^ospid-.*`, s) if isOSPID { return errors.ErrRemoveOSPIDFromProjectID } isAlphaNumeric := regexp.MustCompile(`^[a-zA-Z0-9]*$`).MatchString(s) if !isAlphaNumeric { return errors.ErrRemoveSpecialCharFromProjectID } return nil }, } certProjectIDValue, err := certProjectIDLabel.Run() if err != nil { return errors.ErrSupportCmdPromptFailed } log.Debugf("certification project id: %s", certProjectIDValue) pullRequestURLLabel := promptui.Prompt{ Label: "Please Enter Your Pull Request URL", // validate makes sure that the url entered has a valid scheme, host and path to the pull request Validate: func(s string) error { _, err := url.ParseRequestURI(s) if err != nil { return errors.ErrPullRequestURL } url, err := url.Parse(s) if err != nil || url.Scheme == "" || url.Host == "" || url.Path == "" { return errors.ErrPullRequestURL } return nil }, } pullRequestURLValue, err := pullRequestURLLabel.Run() if err != nil { return errors.ErrSupportCmdPromptFailed } log.Debugf("pull request url: %s", pullRequestURLValue) // building and encoding query params queryParams := url.Values{} queryParams.Add(typeParam, typeValue) queryParams.Add(sourceParam, sourceValue) queryParams.Add(certProjectTypeParam, certProjectTypeValue) queryParams.Add(certProjectIDParam, certProjectIDValue) queryParams.Add(pullRequestURLParam, pullRequestURLValue) fmt.Printf("Create a support ticket by: \n"+ "\t1. Copying URL: %s\n"+ "\t2. Paste above URL in a browser\n"+ "\t3. Login with Red Hat SSO\n"+ "\t4. Enter an issue summary and description\n"+ "\t5. Preview and Submit your ticket\n", baseURL+queryParams.Encode()) return nil }, } func init() { rootCmd.AddCommand(supportCmd) }
// package hash implements various hashing algorithms used in bitcoin package hash
package main import ( "fmt" "log" "math" "net" "github.com/ev3go/ev3dev" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/reflection" pb "penbot/shared" ) const ( Left = 1 Right = -1 ) type Config struct { A float64 L float64 R float64 } func find_angle(p *pb.Point, l float64, r float64, leftOrRight float64) float64 { v := p.X*p.X + p.Y*p.Y u := l*l - v + r*r t := p.X * math.Sqrt(4*l*l*r*r-u*u) y := (leftOrRight*t + p.Y*(v+r*r-l*l)) / (2 * v) return math.Asin(y / r) } func left_angle(p *pb.Point, config *Config) float64 { return find_angle(&pb.Point{X: p.X + config.A, Y: p.Y}, config.L, config.R, Left) } func right_angle(p *pb.Point, config *Config) float64 { return find_angle(&pb.Point{X: p.X - config.A, Y: p.Y}, config.L, config.R, Right) } func to_motor_position(angle float64) int { // Go from radians to degrees. scaled := (angle / math.Pi) * 180.0 //TODO: maybe change this to be an initialization step // Rotate by -90* shifted := (scaled - 90.0) // Compensate for the 3:1 gear ratio. gearScaled := shifted * 3.0 // Enforce -195 lower limit 40 upper limit. if gearScaled < -195.0 { log.Printf("Position %f was below lower limit -195", gearScaled) return -195 } if gearScaled > 40.0 { log.Printf("Position %f was above upper limit 40", gearScaled) return 40 } return int(math.Floor(gearScaled + .5)) } type Server struct { Config *Config Left *Motor Right *Motor Commands chan *pb.Point } func (server *Server) EnqueuePosition( ctx context.Context, request *pb.EnqueuePositionRequest) (*pb.EnqueuePositionResponse, error) { server.Commands <- request.P return &pb.EnqueuePositionResponse{}, nil } type Motor struct { Ev3Motor *ev3dev.TachoMotor } func InitMotor(output string) *Motor { motor, err := ev3dev.TachoMotorFor(output, "lego-ev3-l-motor") if err != nil { log.Fatalf("count not get motor %s: %v", output, err) } err = motor.SetStopAction("brake").Err() if err != nil { log.Fatalf("could not set stop action: %v", err) } motor.SetSpeedSetpoint(100) return &Motor{Ev3Motor: motor} } func (motor *Motor) SetPosition(position int) { motor.Ev3Motor.SetPositionSetpoint(position) } func (motor *Motor) Go() { motor.Ev3Motor.Command("run-to-abs-pos") } func (server *Server) Run() { for point := range server.Commands { a1 := left_angle(point, server.Config) a2 := right_angle(point, server.Config) leftMotorPosition := to_motor_position(a1) rightMotorPosition := to_motor_position(a2) fmt.Printf("Position: (%f, %f)", point.X, point.Y) fmt.Printf("Left angle: %f", a1) fmt.Printf("Right angle: %f", a2) fmt.Printf("Left motor position: %d", leftMotorPosition) fmt.Printf("Right motor position: %d", rightMotorPosition) server.Left.SetPosition(leftMotorPosition) server.Right.SetPosition(rightMotorPosition) server.Left.Go() server.Right.Go() } } // x: -70mm min, 70mm max, 140mm total // y: 70mm min, 120mm max, 50mm total func main() { config := Config{ A: 32, // 32mm L: 98, // 98mm R: 80, // 80mm } //TODO: garbage collection? leftMotor := InitMotor("outA") rightMotor := InitMotor("outB") robotServer := Server{ Config: &config, Left: leftMotor, Right: rightMotor, Commands: make(chan *pb.Point), } go robotServer.Run() lis, err := net.Listen("tcp", ":4321") if err != nil { log.Fatalf("failed to listen: %v", err) } s := grpc.NewServer() pb.RegisterPenBotServer(s, &robotServer) reflection.Register(s) fmt.Println("serving") if err := s.Serve(lis); err != nil { log.Fatalf("failed to serve: %v", err) } }
package config type WeWork struct { CorpID string `yaml:"corp_id"` Agents []*Agent `yaml:"agents"` } func (w *WeWork) GetAgent(id int) *Agent { for _, a := range w.Agents { if a.ID == id { return a } } return nil } type Agent struct { ID int `yaml:"id"` Name string `yaml:"name"` Secret string `yaml:"secret"` }
package telepathy import ( "context" "fmt" "net/http" "strings" "testing" "github.com/stretchr/testify/assert" ) func TestRegisterWebhook(t *testing.T) { assert := assert.New(t) server, err := newWebServer("http://localhost", "8080") assert.NoError(err) handler := func(http.ResponseWriter, *http.Request) {} url, err := server.registerWebhook("test-hook", handler) assert.NoError(err) assert.Equal(fmt.Sprintf("http://localhost%stest-hook", webhookRoot), url.String()) } func TestRegisterWebhookIllegal(t *testing.T) { assert := assert.New(t) server, err := newWebServer("http://localhost", "8080") assert.NoError(err) handler := func(http.ResponseWriter, *http.Request) {} _, err = server.registerWebhook("@abc", handler) assert.Error(err) _, err = server.registerWebhook("-abc", handler) assert.Error(err) _, err = server.registerWebhook("a-b-c-d-e", handler) assert.Error(err) } func TestWebhookTrigger(t *testing.T) { assert := assert.New(t) server, err := newWebServer("http://localhost", "80") assert.NoError(err) called := false handler := func(w http.ResponseWriter, r *http.Request) { called = true assert.Equal("POST", r.Method) w.WriteHeader(200) } url, err := server.registerWebhook("test-hook", handler) assert.NoError(err) server.finalize() go server.ListenAndServe() resp, err := http.Post(url.String(), "text/plain", strings.NewReader("test")) assert.NoError(err) assert.Equal(200, resp.StatusCode) assert.True(called) assert.NoError(server.Shutdown(context.Background())) }
package cine import "container/list" type MessageQueue struct { queue *list.List limit int In chan *ActorCall Out chan *ActorCall Stop chan bool } func NewMessageQueue(limit int) *MessageQueue { q := new(MessageQueue) q.queue = list.New() q.limit = limit q.In = make(chan *ActorCall) q.Out = make(chan *ActorCall) q.Stop = make(chan bool) go q.Run() return q } func (q *MessageQueue) processIn(msg *ActorCall) bool { if msg.Function.IsNil() { return false } q.queue.PushBack(msg) return true } func (q *MessageQueue) doIn() bool { select { case msg := <-q.In: return q.processIn(msg) case <-q.Stop: return false } } func (q *MessageQueue) doInOut() bool { select { case msg := <-q.In: return q.processIn(msg) case q.Out <- q.queue.Front().Value.(*ActorCall): q.queue.Remove(q.queue.Front()) case <-q.Stop: return false } return true } func (q *MessageQueue) doOut() bool { select { case q.Out <- q.queue.Front().Value.(*ActorCall): q.queue.Remove(q.queue.Front()) case <-q.Stop: return false } return true } func (q *MessageQueue) Run() { defer func() { q.drain() close(q.In) close(q.Out) }() for { if q.queue.Len() == 0 { if !q.doIn() { break } } else if q.queue.Len() < q.limit { if !q.doInOut() { break } } else { if !q.doOut() { break } } } } func (q *MessageQueue) drain() { for { select { case r := <-q.In: close(r.Done) continue default: return } } }
// +build !linux package cgroup import ( "github.com/lavaorg/telex" ) func (g *CGroup) Gather(acc telex.Accumulator) error { return nil }
package ipspeicher import ( "errors" "github.com/DATA-DOG/go-sqlmock" "testing" ) /* func (ips *Speicher) getNeuste(name string) (Eintrag, error) { row := ips.Db.QueryRow("SELECT * FROM ips WHERE name = ? ORDER BY seit DESC LIMIT 1", name) return SqlEintrag(row) } func (ips *Speicher) istGespeichert(e Eintrag) (bool, error) { neuste, err := ips.getNeuste(e.Name) if err != nil { return false, err } return neuste.Ip == e.Ip, nil } func (ips *Speicher) speichern(e Eintrag) error { _, err := ips.Db.Exec("INSERT INTO ips VALUES (?, ?, strftime('%s', 'now'))", e.Name, e.Ip) return err } func (ips *Speicher) anzahl(query string, args ...interface{}) (int, error) { row := ips.Db.QueryRow(query, args...) var count int if err := row.Scan(&count); err != nil { return 0, err } return count, nil } func (ips *Speicher) liste(count int, query string, args ...interface{}) ([]Eintrag, error) { rows, err := ips.Db.Query(query, args...) if err != nil { return nil, err } defer rows.Close() einträge := make([]Eintrag, count) for i := 0; rows.Next(); i++ { einträge[i], err = SqlEintrag(rows) if err != nil { return einträge, err } } if err := rows.Err(); err != nil { return einträge, err } return einträge, nil } func (ips *Speicher) Verlauf(e Eintrag) ([]Eintrag, error) { count, err := ips.anzahl("SELECT COUNT(*) FROM ips WHERE name = ?", e.Name) if err != nil { return nil, err } return ips.liste( count, "SELECT * FROM ips WHERE name = ? ORDER BY seit DESC", e.Name) } func (ips *Speicher) Namen() ([]Eintrag, error) { count, err := ips.anzahl("SELECT DISTINCT COUNT(name) FROM ips") if err != nil { return nil, err } return ips.liste( count, "SELECT ips1.* FROM ips AS ips1 LEFT JOIN ips as ips2 ON ips1.name = ips2.name AND ips1.Date < ips2.Date WHERE ip2.Date IS NULL") } func (ips *Speicher) Sichern(e Eintrag) (bool, error) { result, err := ips.istGespeichert(e) if err != nil { return false, err } if result { return true, nil } if err = ips.speichern(e); err != nil { return false, err } return false, nil } */ func TestSichern(t *testing.T) { sql, err := sqlmock.New() if err != nil { t.Error(err) } /** TODO **/ } func TestSchließen(t *testing.T) { sql, err := sqlmock.New() if err != nil { t.Error(err) } ips := Speicher{Db: sql} ips.Schließen() } func TestNewSpeicher(t *testing.T) { sqlmock.ExpectExec("CREATE TABLE IF NOT EXISTS ips \\(name TEXT, ip TEXT, seit INT\\)"). WillReturnResult(sqlmock.NewResult(0, 0)) _, err := NewSpeicher(sqlmock.New()) if err != nil { t.Error(err) } } func TestNewSpeicherFehlerDurchreichen(t *testing.T) { exp := errors.New("test") _, is := NewSpeicher(nil, exp) if is == nil { t.Errorf("Got %s, want %s", is, exp) } }
package qcloud import ( "fmt" api "yunion.io/x/onecloud/pkg/apis/compute" "yunion.io/x/onecloud/pkg/multicloud" ) type SElasticcacheParameters struct { multicloud.SElasticcacheParameterBase multicloud.QcloudTags cacheDB *SElasticcache InstanceEnumParam []SElasticcacheParameter `json:"InstanceEnumParam"` InstanceIntegerParam []SElasticcacheParameter `json:"InstanceIntegerParam"` InstanceTextParam []SElasticcacheParameter `json:"InstanceTextParam"` RequestID string `json:"RequestId"` TotalCount int64 `json:"TotalCount"` } type SElasticcacheParameter struct { multicloud.SElasticcacheParameterBase multicloud.QcloudTags cacheDB *SElasticcache CurrentValue string `json:"CurrentValue"` DefaultValue string `json:"DefaultValue"` EnumValue []string `json:"EnumValue,omitempty"` NeedRestart string `json:"NeedRestart"` ParamName string `json:"ParamName"` Tips string `json:"Tips"` ValueType string `json:"ValueType"` Max *string `json:"Max,omitempty"` Min *string `json:"Min,omitempty"` TextValue []string `json:"TextValue,omitempty"` } func (self *SElasticcacheParameter) GetId() string { return fmt.Sprintf("%s/%s", self.cacheDB.InstanceID, self.ParamName) } func (self *SElasticcacheParameter) GetName() string { return self.ParamName } func (self *SElasticcacheParameter) GetGlobalId() string { return self.GetId() } func (self *SElasticcacheParameter) GetStatus() string { return api.ELASTIC_CACHE_PARAMETER_STATUS_AVAILABLE } func (self *SElasticcacheParameter) GetParameterKey() string { return self.ParamName } func (self *SElasticcacheParameter) GetParameterValue() string { return self.CurrentValue } func (self *SElasticcacheParameter) GetParameterValueRange() string { return fmt.Sprintf("%s", self.EnumValue) } func (self *SElasticcacheParameter) GetDescription() string { return self.Tips } func (self *SElasticcacheParameter) GetModifiable() bool { return true } func (self *SElasticcacheParameter) GetForceRestart() bool { return self.NeedRestart == "true" }
package main import "fmt" // Crawl uses fetcher to recursively crawl // pages starting with url, to a maximum of depth. func Crawl(url string, depth int, fetcher Fetcher) { totalurl := []string{url} cururl := []string{url} insertUrl := func(iurl string) { for t, turl := range totalurl { if turl == iurl { fmt.Println("already found: ", iurl, " at ", t) return } } cururl = append(cururl, iurl) } for i := 0; i < depth; i++ { urln := len(cururl) urls := make([]chan []string, urln) for j, curl := range cururl { urls[j] = make(chan []string) go c2(curl, fetcher, urls[j]) } cururl = make([]string, 0) for j := 0; j < urln; j++ { newurl, ok := <-urls[j] if ok { for _, nurl := range newurl { insertUrl(nurl) } } } totalurl = append(totalurl, cururl...) } fmt.Println(totalurl) } func c2(url string, fetcher Fetcher, newurl chan []string) { defer close(newurl) // TODO: Fetch URLs in parallel. // TODO: Don't fetch the same URL twice. // This implementation doesn't do either: _, urls, err := fetcher.Fetch(url) if err != nil { fmt.Println(err) return } fmt.Printf("found: %s\n", url) newurl <- urls return } func main() { Crawl("http://example.com/", 2, fetcher) } var fetcher SimpleFetcher
package client import "fmt" type ResponseError struct { Err error StatusCode int } func (r *ResponseError) Error() string { return fmt.Sprintf("status %d: err %v", r.StatusCode, r.Err) }
package telegram import ( "crypto/sha1" "encoding/hex" "fmt" "log" "strconv" "time" "github.com/c0re100/RadioBot/config" "github.com/c0re100/RadioBot/fb2k" "github.com/c0re100/RadioBot/utils" "github.com/c0re100/go-tdlib" ) type groupStatus struct { chatID int64 msgID int64 vcID int32 duartion int32 Ptcps []string voteSkip []int32 isVoting bool isLoadPtcps bool lastVoteTime int64 } var ( grpStatus = &groupStatus{chatID: config.GetChatID()} ) // GetQueue get queue song list func GetQueue() []int { return config.GetStatus().GetQueue() } // GetRecent get recent song list func GetRecent() []int { return config.GetStatus().GetRecent() } func getUserIDHash(uID int32) string { h := sha1.New() h.Write([]byte(strconv.FormatInt(int64(uID), 10))) bs := h.Sum(nil) return hex.EncodeToString(bs) } func startVote(chatID, msgID int64, userID int32) { if chatID != config.GetChatID() { return } if !config.IsVoteEnabled() { msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText("This group is not allowed to vote.", nil), true, true) bot.SendMessage(chatID, 0, msgID, nil, nil, msgText) return } if !config.IsWebEnabled() { c, err := userBot.GetChat(chatID) if err != nil { log.Println(err) return } if c.VoiceChatGroupCallId == 0 { msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText("This group do not have a voice chat.", nil), true, true) bot.SendMessage(chatID, 0, msgID, nil, nil, msgText) return } // Preload all users _, _ = userBot.LoadGroupCallParticipants(c.VoiceChatGroupCallId, 5000) } hashedID := getUserIDHash(userID) if config.IsPtcpsOnly() { if !utils.ContainsString(grpStatus.Ptcps, hashedID) { msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText("Only users which are in a voice chat can vote!", nil), true, true) bot.SendMessage(chatID, 0, msgID, nil, nil, msgText) return } } if grpStatus.isVoting { msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText("Vote in progress...", nil), true, true) bot.SendMessage(chatID, 0, msgID, nil, nil, msgText) return } if time.Now().Unix() < grpStatus.lastVoteTime+config.GetReleaseTime() { msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText("Skip a song was voted too recently...", nil), true, true) bot.SendMessage(chatID, 0, msgID, nil, nil, msgText) return } voteKb := tdlib.NewReplyMarkupInlineKeyboard([][]tdlib.InlineKeyboardButton{ { *tdlib.NewInlineKeyboardButton("Yes - 1", tdlib.NewInlineKeyboardButtonTypeCallback([]byte("vote_skip"))), }, }) msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText("Skip a song?", nil), true, true) m, err := bot.SendMessage(chatID, 0, msgID, nil, voteKb, msgText) if err != nil { log.Println("Can't send message.") return } grpStatus.isVoting = true grpStatus.duartion = config.GetVoteTime() grpStatus.msgID = m.Id grpStatus.lastVoteTime = time.Now().Unix() updateTime := config.GetUpdateTime() if !utils.ContainsInt32(grpStatus.voteSkip, userID) { grpStatus.voteSkip = append(grpStatus.voteSkip, userID) } updateVote(chatID, m.Id, false) // Wait N seconds time.Sleep(time.Duration(updateTime) * time.Second) addVoteJob(chatID, m.Id, updateTime) if !sch.IsRunning() { log.Println("Starting scheduler...") startScheduler() } } func updateVote(chatID, msgID int64, isAuto bool) { if isAuto { grpStatus.duartion -= config.GetUpdateTime() } if grpStatus.duartion <= 0 { endVote(chatID, msgID) return } voteKb := tdlib.NewReplyMarkupInlineKeyboard([][]tdlib.InlineKeyboardButton{ { *tdlib.NewInlineKeyboardButton(fmt.Sprintf("Yes - %v", len(grpStatus.voteSkip)), tdlib.NewInlineKeyboardButtonTypeCallback([]byte("vote_skip"))), }, }) msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText(fmt.Sprintf("Skip a song?\n"+ "Vote count: %v\n"+ "Vote timeleft: %v second(s)", len(grpStatus.voteSkip), grpStatus.duartion), nil), true, true) bot.EditMessageText(chatID, msgID, voteKb, msgText) } func resetVote() { sch.RemoveByTag("timeleft") grpStatus.isLoadPtcps = false grpStatus.isVoting = false grpStatus.duartion = 0 grpStatus.voteSkip = []int32{} } func finalizeVote(chatID, msgID int64, ptcpCount int32) { percentage := float64(len(grpStatus.voteSkip)) / float64(ptcpCount) * 100 status := "Failed" if percentage >= config.GetSuccessRate() { status = "Succeed" } msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText(fmt.Sprintf("Skip a song?\n"+ "Vote count: %v\n"+ "Vote Ended!\n\n"+ "Status: %v", len(grpStatus.voteSkip), status), nil), true, true) bot.EditMessageText(chatID, msgID, nil, msgText) resetVote() if status == "Succeed" { fb2k.SetKillSwitch() if len(GetQueue()) == 0 { fb2k.PlayNext() } else { fb2k.PlaySelected(GetQueue()[0]) } } } func endVote(chatID, msgID int64) { vs := grpStatus msgText := tdlib.NewInputMessageText(tdlib.NewFormattedText(fmt.Sprintf("Skip a song?\n"+ "Vote count: %v\n"+ "Vote Ended!\n\n"+ "Status: Generating vote results...", len(vs.voteSkip)), nil), true, true) bot.EditMessageText(chatID, vs.msgID, nil, msgText) if !config.IsWebEnabled() { c, err := userBot.GetChat(chatID) if err != nil { resetVote() log.Println(err) return } if c.VoiceChatGroupCallId == 0 { resetVote() log.Println("No group call currently.") return } vc, err := userBot.GetGroupCall(c.VoiceChatGroupCallId) if err != nil { resetVote() log.Println(err) return } finalizeVote(chatID, msgID, vc.ParticipantCount) } else { finalizeVote(chatID, msgID, int32(len(grpStatus.Ptcps))) } } func setUserVote(chatID, msgID int64, userID int32, queryID tdlib.JSONInt64) { if config.IsJoinNeeded() { cm, err := bot.GetChatMember(config.GetChatID(), userID) if err != nil { bot.AnswerCallbackQuery(queryID, "Failed to fetch chat info! Please try again later~", true, "", 10) return } if cm.Status.GetChatMemberStatusEnum() == "chatMemberStatusLeft" { bot.AnswerCallbackQuery(queryID, "Only users which are in the group can vote!", true, "", 10) return } } if utils.ContainsInt32(grpStatus.voteSkip, userID) { bot.AnswerCallbackQuery(queryID, "You're already vote!", false, "", 45) return } if config.IsPtcpsOnly() { bot.AnswerCallbackQuery(queryID, "Only users which are in a voice chat can vote!", false, "", 5) return } AddVote(userID) updateVote(chatID, msgID, false) } // AddVote add user to vote list func AddVote(userID int32) { if !utils.ContainsInt32(grpStatus.voteSkip, userID) { grpStatus.voteSkip = append(grpStatus.voteSkip, userID) } }
package main import "fmt" func main() { fmt.Println("") var ( prev = [...]string{"The Wolf Hall", "Circee", "The Body Mass"} ) books := prev for i := range prev { books[i] = prev[i] + " 2nd Ed." } fmt.Printf("prevBooks: %#v\n", prev) fmt.Printf("books: %#v\n", books) }
package main import ( "bufio" "fmt" "os" ) var writer = bufio.NewWriter(os.Stdout) var reader = bufio.NewReader(os.Stdin) func printf(f string, a ...interface{}) { fmt.Fprintf(writer, f, a...) } func scanf(f string, a ...interface{}) { fmt.Fscanf(reader, f, a...) } var endOfWorldNums = make([]int, 10004) func checkThreepleSix(n int) bool { for i := n; i > 0; i /= 10 { if i%1000 == 666 { return true } } return false } func fillNums() { count := 1 for i := 666; count <= 10000; i++ { if checkThreepleSix(i) { endOfWorldNums[count] = i count++ } } } func main() { defer writer.Flush() fillNums() var n int scanf("%d\n", &n) printf("%d\n", endOfWorldNums[n]) }
package gomo import "testing" func TestImplicitCredentials(t *testing.T) { creds := implicitCredentials{} if creds.grantType() != implicitGrantType { t.Error("Incorrect grant type") } }
package models import ( "../utils" ) type Link struct { Id uint `json:"id"` Url string `json:"url"` Desc string `json:"desc"` Title string `json:"title"` Avatar string `json:"avatar"` } func GetFriendLinks() ([]Link) { data := []Link{} // 从数据库中取回数据 rows, err := db.Query("SELECT `id`, `url`, `desc`, `title`, `avatar` FROM `link`") defer rows.Close() utils.CheckErr(err) var ( id uint url string desc string title string avatar string ) for rows.Next() { err = rows.Scan(&id, &url, &desc, &title, &avatar) utils.CheckErr(err) data = append(data, Link{id, url, desc, title, avatar}) } return data } func AddFriend(url string, desc string, title string, avatar string) bool { if url == "" || desc == "" || title == "" || avatar == "" { return false } var sql string = "INSERT INTO `link` (`url`, `desc`, `title`, `avatar`) VALUES (?, ?, ?, ?)" _, err := db.Exec(sql, url, desc, title, avatar) utils.CheckErr(err) return true } func EditFriend(id int, url string, desc string, title string, avatar string) bool { if id == 0 || url == "" || desc == "" || title == "" || avatar == "" { return false } var sql string ="UPDATE `link` SET `url` = ?, `desc` = ?, `title` = ?, `avatar` = ? WHERE `id` = ?" _, err := db.Exec(sql, url, desc, title, avatar, id) utils.CheckErr(err) return true } func DelFriend(id int) bool { if id == 0 { return false } var sql string = "DELETE FROM `link` WHERE `id` = ?" _, err := db.Exec(sql, id) utils.CheckErr(err) return true }
// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package app import ( "github.com/golang/glog" "github.com/kubernetes-incubator/metrics-server/metrics/options" metricsink "github.com/kubernetes-incubator/metrics-server/metrics/sinks/metric" nodemetricsstorage "github.com/kubernetes-incubator/metrics-server/metrics/storage/nodemetrics" podmetricsstorage "github.com/kubernetes-incubator/metrics-server/metrics/storage/podmetrics" "k8s.io/apimachinery/pkg/apimachinery/announced" "k8s.io/apimachinery/pkg/apimachinery/registered" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" v1listers "k8s.io/client-go/listers/core/v1" "k8s.io/metrics/pkg/apis/metrics" "k8s.io/metrics/pkg/apis/metrics/install" "k8s.io/metrics/pkg/apis/metrics/v1beta1" ) var ( groupFactoryRegistry = make(announced.APIGroupFactoryRegistry) registry = registered.NewOrDie("") Scheme = runtime.NewScheme() Codecs = serializer.NewCodecFactory(Scheme) ) func installMetricsAPIs(s *options.HeapsterRunOptions, g *genericapiserver.GenericAPIServer, metricSink *metricsink.MetricSink, nodeLister v1listers.NodeLister, podLister v1listers.PodLister) { install.Install(groupFactoryRegistry, registry, Scheme) // we need to add the options to empty v1 metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Group: "", Version: "v1"}) unversioned := schema.GroupVersion{Group: "", Version: "v1"} Scheme.AddUnversionedTypes(unversioned, &metav1.Status{}, &metav1.APIVersions{}, &metav1.APIGroupList{}, &metav1.APIGroup{}, &metav1.APIResourceList{}, ) apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(metrics.GroupName, registry, Scheme, metav1.ParameterCodec, Codecs) apiGroupInfo.GroupMeta.GroupVersion = v1beta1.SchemeGroupVersion nodemetricsStorage := nodemetricsstorage.NewStorage(metrics.Resource("nodemetrics"), metricSink, nodeLister) podmetricsStorage := podmetricsstorage.NewStorage(metrics.Resource("podmetrics"), metricSink, podLister) heapsterResources := map[string]rest.Storage{ "nodes": nodemetricsStorage, "pods": podmetricsStorage, } apiGroupInfo.VersionedResourcesStorageMap[v1beta1.SchemeGroupVersion.Version] = heapsterResources if err := g.InstallAPIGroup(&apiGroupInfo); err != nil { glog.Fatalf("Error in registering group versions: %v", err) } }
// Copyright 2021 The ChromiumOS Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package policy import ( "context" "time" "chromiumos/tast/common/fixture" "chromiumos/tast/common/pci" "chromiumos/tast/common/policy" "chromiumos/tast/common/policy/fakedms" "chromiumos/tast/common/servo" "chromiumos/tast/ctxutil" "chromiumos/tast/errors" "chromiumos/tast/local/chrome" "chromiumos/tast/local/policyutil" "chromiumos/tast/local/power" "chromiumos/tast/local/power/charge" "chromiumos/tast/testing" "chromiumos/tast/testing/hwdep" ) func init() { testing.AddTest(&testing.Test{ Func: DeviceBatteryChargeMode, LacrosStatus: testing.LacrosVariantUnneeded, Desc: "Tests the DeviceBatteryCharge policies that extend battery life", Contacts: []string{ "chromeos-oem-services@google.com", // Use team email for tickets. "bkersting@google.com", "lamzin@google.com", }, SoftwareDeps: []string{"wilco", "chrome"}, Timeout: 25 * time.Minute, // Disabled due to <1% pass rate over 30 days. See b/241942929 //Attr: []string{"group:wilco_bve"}, HardwareDeps: hwdep.D(hwdep.Battery()), Vars: []string{"servo"}, Fixture: fixture.ChromeEnrolledLoggedIn, SearchFlags: []*testing.StringPair{ pci.SearchFlag(&policy.DeviceBatteryChargeMode{}, pci.VerifiedFunctionalityOS), pci.SearchFlag(&policy.DeviceBatteryChargeCustomStopCharging{}, pci.VerifiedValue), pci.SearchFlag(&policy.DeviceBatteryChargeCustomStartCharging{}, pci.VerifiedValue), }, }) } // DeviceBatteryChargeMode verifies DeviceBatteryCharge policies, a group of power management policies, dynamically controls // battery charging state to minimize stress and wear-out due to the exposure of rapid charging/discharging cycles and extend // the battery life. If the policy is set then battery charge mode will be applied on the DUT. Leaving the policy unset applies // the standard battery charge mode. // // The Policy takes either one of the five values ranging from 1 to 5: // 1 = Fully charge battery at a standard rate. // 2 = Charge battery using fast charging technology. // 3 = Charge battery for devices that are primarily connected to an external power source. // 4 = Adaptive charge battery based on battery usage pattern. // 5 = Charge battery while it is within a fixed range. // If Custom battery charge mode (5) is selected, then DeviceBatteryChargeCustomStartCharging and // DeviceBatteryChargeCustomStopCharging values need to be specified alongside. func DeviceBatteryChargeMode(ctx context.Context, s *testing.State) { const ( // Minimum battery percentage requires in DUT for successful sub testing. // Subtest "custom_charge_outside_range" doesn't charge DUT if the battery is above 80%. minLevel = 81 // DUT generally has three power state [Charging, Full & Discharging] and we are interested in checking the // Discharging state while connecting it to a constant power supply. That's why it is logical to keep // a reasonable buffer from the Full state (100%) to have a proper distinction during sub testing. maxLevel = 95 ) cr := s.FixtValue().(chrome.HasChrome).Chrome() fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS() // Shorten deadline to leave time for cleanup. cleanupCtx := ctx ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second) defer cancel() srvo, err := servo.NewDirect(ctx, s.RequiredVar("servo")) if err != nil { s.Fatal("Failed to connect to servo: ", err) } defer srvo.Close(cleanupCtx) // Putting battery within testable range. if err := charge.EnsureBatteryWithinRange(ctx, cr, srvo, minLevel, maxLevel); err != nil { s.Fatalf("Failed to ensure battery percentage within %d%% to %d%%: %v", minLevel, maxLevel, err) } // Connect DUT with power supply. if err := srvo.SetPDRole(ctx, servo.PDRoleSrc); err != nil { s.Fatal("Failed to switch servo_pd_role to src: ", err) } for _, tc := range []struct { name string policies []policy.Policy wantOnAc bool wantDischarging bool }{ { name: "unset", policies: []policy.Policy{}, wantOnAc: true, wantDischarging: false, }, { name: "standard_charge", policies: []policy.Policy{&policy.DeviceBatteryChargeMode{ Val: 1, }}, wantOnAc: true, wantDischarging: false, }, { name: "fast_charge", policies: []policy.Policy{&policy.DeviceBatteryChargeMode{ Val: 2, }}, wantOnAc: true, wantDischarging: false, }, { name: "primarily_ac", policies: []policy.Policy{&policy.DeviceBatteryChargeMode{ Val: 3, }}, wantOnAc: true, wantDischarging: false, }, { name: "adaptive_charge", policies: []policy.Policy{&policy.DeviceBatteryChargeMode{ Val: 4, }}, wantOnAc: true, wantDischarging: false, }, { name: "custom_charge_outside_range", policies: []policy.Policy{&policy.DeviceBatteryChargeMode{ Val: 5, }, &policy.DeviceBatteryChargeCustomStartCharging{Val: 40}, &policy.DeviceBatteryChargeCustomStopCharging{Val: 80}, }, wantOnAc: true, wantDischarging: true, }, { name: "custom_charge_within_range", policies: []policy.Policy{&policy.DeviceBatteryChargeMode{ Val: 5, }, &policy.DeviceBatteryChargeCustomStartCharging{Val: 40}, &policy.DeviceBatteryChargeCustomStopCharging{Val: 100}, }, wantOnAc: true, wantDischarging: false, }, } { s.Run(ctx, tc.name, func(ctx context.Context, s *testing.State) { // Perform cleanup. if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil { s.Fatal("Failed to clean up: ", err) } // Update policies. if err := policyutil.ServeAndRefresh(ctx, fdms, cr, tc.policies); err != nil { s.Fatal("Failed to update policies: ", err) } if err := testing.Poll(ctx, func(ctx context.Context) error { // Checking current battery status and power state. status, err := power.GetStatus(ctx) if err != nil { return testing.PollBreak(errors.Wrap(err, "failed to get battery status")) } if status.LinePowerConnected != tc.wantOnAc { return errors.Errorf("unexpected AC supply: want %v; got %v", tc.wantOnAc, status.LinePowerConnected) } if status.BatteryDischarging != tc.wantDischarging { return errors.Errorf("unexpected discharging state: want %v; got %v", tc.wantDischarging, status.BatteryDischarging) } return nil }, &testing.PollOptions{ Timeout: 30 * time.Second, }); err != nil { s.Error("Failed to wait for expected battery state: ", err) } }) } }
package commander // The following are ACTION functions, chose one if you like it. type ( Action func(c Context) _Result // default internal function ActionResult func() _Result ActionNormal func(c Context) error ActionSimple func(c Context) ActionNative func() ActionNativeSimple func() error ActionNativeDocopt func(m map[string]interface{}) error ) // parseAction handle function to Action type func parseAction(arg interface{}) (a Action) { switch action := arg.(type) { case func(c Context) _Result: // Action a = action case func() _Result: // ActionResult a = func(c Context) _Result { return action() } case func(c Context) error: // ActionNormal a = func(c Context) _Result { if err := action(c); err != nil { return newResultError(err) } return resultPass() } case func(c Context): // ActionSimple a = func(c Context) _Result { action(c) return resultPass() } case func(): // ActionNative a = func(c Context) _Result { action() return resultPass() } case func() error: // ActionNativeSimple a = func(c Context) _Result { if err := action(); err != nil { return newResultError(err) } return resultPass() } case func(m map[string]interface{}) error: // ActionNativeDocopt a = func(c Context) _Result { if err := action(c.Map()); err != nil { return newResultError(err) } return resultPass() } default: a = nil } return } // emptyAction if action is empty func emptyAction(a Action) bool { return a == nil }
// Copyright (C) 2017 Michał Matczuk // Use of this source code is governed by an AGPL-style // license that can be found in the LICENSE file. package server import ( "context" "encoding/json" "errors" "fmt" "io" "net" "net/http" "strconv" "strings" "time" tunnel "github.com/NodeFactoryIo/vedran/pkg/http-tunnel" "github.com/NodeFactoryIo/vedran/pkg/http-tunnel/proto" "github.com/inconshreveable/go-vhost" log "github.com/sirupsen/logrus" "golang.org/x/net/http2" ) // Server is responsible for proxying public connections to the client over a // tunnel connection. type Server struct { *registry config *serverData listener net.Listener connPool *connPool httpClient *http.Client logger *log.Entry vhostMuxer *vhost.TLSMuxer PortPool Pooler authHandler func(string) bool } // ServerConfig defines all data needed for running the Server. type ServerConfig struct { // Address is TCP address to listen for client connections. If empty ":0" is used. Address string // PortPool assigns and release ports PortPool Pooler // AuthHandler is function validates provided auth token AuthHandler func(string) bool // Logger is optional logger. If nil logging is disabled. Logger *log.Entry } type serverData struct { addr string listener net.Listener logger *log.Entry authHandler func(string) bool } // NewServer creates a new Server based on configuration. // Caller must invoke Start() on returned instance in order to start server func NewServer(config *ServerConfig) (*Server, error) { serverData := &serverData{} if config.Address == "" { return nil, errors.New("provided empty address") } serverData.addr = config.Address logger := config.Logger if logger == nil { l := log.New() l.SetLevel(log.ErrorLevel) logger = log.NewEntry(l) } serverData.logger = logger if config.AuthHandler == nil { return nil, errors.New("provided auth handler is nil") } serverData.authHandler = config.AuthHandler return newServer(serverData, config.PortPool) } func newServer(serverData *serverData, pPool Pooler) (*Server, error) { listener, err := listener(serverData) if err != nil { return nil, fmt.Errorf("listener failed: %s", err) } s := &Server{ registry: newRegistry(serverData.logger), PortPool: pPool, config: serverData, listener: listener, logger: serverData.logger, } s.authHandler = serverData.authHandler t := &http2.Transport{} pool := newConnPool(t, s.disconnected) t.ConnPool = pool s.connPool = pool s.httpClient = &http.Client{ Transport: t, CheckRedirect: func(req *http.Request, via []*http.Request) error { return http.ErrUseLastResponse }, } return s, nil } func listener(config *serverData) (net.Listener, error) { if config.listener != nil { return config.listener, nil } if config.addr == "" { return nil, errors.New("missing addr") } return net.Listen("tcp", config.addr) } // disconnected clears resources used by client, it's invoked by connection pool // when client goes away. func (s *Server) disconnected(identifier string) { ilogger := s.logger.WithFields(log.Fields{"identifier": identifier}) ilogger.Debug("disconnected") i := s.registry.clear(identifier) if i == nil { ilogger.Errorf("ERROR ON DISCONNECT (registry not found)") return } iclogger := ilogger.WithFields(log.Fields{"client-name": i.ClientName}) iclogger.Debug("DISCONNECT") for _, l := range i.Listeners { iclogger.Debugf("close listener for %v", l.Addr()) _ = l.Close() _ = s.PortPool.Release(i.ClientName) } } // Start starts accepting connections form clients. For accepting http traffic // from end users server must be run as handler on http server. func (s *Server) Start() { addr := s.listener.Addr().String() alogger := s.logger.WithFields(log.Fields{"address": addr}) for { conn, err := s.listener.Accept() if err != nil { if strings.Contains(err.Error(), "use of closed network connection") { alogger.Debug("control connection listener closed") return } alogger.Error("accept of control connection failed", err) continue } if err := tunnel.KeepAlive(conn); err != nil { alogger.Error("TCP keepalive for control connection failed", err) } go s.handleClient(conn) } } type TunnelExt struct { IdName string Tunnels map[string]*proto.Tunnel } func (s *Server) handleClient(conn net.Conn) { alogger := s.logger.WithFields(log.Fields{"address": conn.RemoteAddr()}) alogger.Debug("try connect") var ( conid string req *http.Request resp *http.Response tunnels TunnelExt err error inConnPool bool token string ) conid = conn.RemoteAddr().String() s.PreSubscribe(conid) if err = conn.SetDeadline(time.Time{}); err != nil { alogger.Error("setting infinite deadline failed", err) goto reject } if err := s.connPool.AddConn(conn, conid); err != nil { alogger.Error("adding connection failed", err) goto reject } inConnPool = true req, err = http.NewRequest(http.MethodConnect, s.connPool.URL(conid), nil) if err != nil { alogger.Error("handshake request creation failed", err) goto reject } { ctx, cancel := context.WithTimeout(context.Background(), tunnel.DefaultTimeout) defer cancel() req = req.WithContext(ctx) } resp, err = s.httpClient.Do(req) if err != nil { alogger.Error("handshake failed 1 ", err) goto reject } if resp.StatusCode != http.StatusOK { err = fmt.Errorf("Status %s", resp.Status) alogger.Error("handshake failed 2 ", err) goto reject } // needs additional auth if s.authHandler != nil { token = resp.Header.Get("X-Auth-Header") if token == "" { err = errors.New("Auth header missing") alogger.Error("handshake failed ", err) goto reject } authorized := s.authHandler(token) if !authorized { err = errors.New("Unauthorized request") alogger.Error("handshake failed", err) goto reject } } if resp.ContentLength == 0 { err = fmt.Errorf("tunnels Content-Legth: 0") alogger.Error("handshake failed 3", err) goto reject } if err = json.NewDecoder(&io.LimitedReader{R: resp.Body, N: 126976}).Decode(&tunnels); err != nil { alogger.Error("handshake failed 4", err) goto reject } alogger.Debugf("client name has been set to %s and id %s", tunnels.IdName, conid) s.Subscribe(tunnels.IdName, conid) if len(tunnels.Tunnels) == 0 { err = fmt.Errorf("no tunnels") alogger.Error("handshake failed 5", err) goto reject } if err = s.addTunnels(tunnels.IdName, tunnels.Tunnels); err != nil { alogger.Error("handshake failed 6", err) goto reject } alogger.Debugf("%s connected", tunnels.IdName) return reject: alogger.Debug("rejected") if inConnPool { s.notifyError(err, conid) s.connPool.DeleteConn(tunnels.IdName) } conn.Close() } // notifyError tries to send error to client. func (s *Server) notifyError(serverError error, conid string) { if serverError == nil { return } req, err := http.NewRequest(http.MethodConnect, s.connPool.URL(conid), nil) if err != nil { s.logger.Errorf("client error notification failed for %s with %v", conid, err) return } req.Header.Set(proto.HeaderError, serverError.Error()) ctx, cancel := context.WithTimeout(context.Background(), tunnel.DefaultTimeout) defer cancel() _, _ = s.httpClient.Do(req.WithContext(ctx)) } func (s *Server) adrListenRegister(in string, cid string, portname string) (string, error) { inarr := strings.Split(in, ":") host := inarr[0] port := inarr[1] if port == "AUTO" { port, err := s.PortPool.Acquire(cid, portname) if err != nil { return "", fmt.Errorf("Error on acquire port from port pool:%s", err) } addr := host + ":" + strconv.Itoa(port) s.logger.WithFields(log.Fields{ "client-id": cid, "portname": portname, "address": addr, }).Debug("address auto assign") return addr, nil } return in, nil } // addTunnels invokes addHost or addListener based on data from proto.Tunnel. If // a tunnel cannot be added whole batch is reverted. func (s *Server) addTunnels(cname string, tunnels map[string]*proto.Tunnel) error { i := &RegistryItem{ Hosts: []*HostAuth{}, Listeners: []net.Listener{}, ClientName: cname, } var err error var portnames []string for name, t := range tunnels { portnames = append(portnames, name) cplogger := s.logger.WithFields(log.Fields{"client-id": cname, "port-name": name}) switch t.Protocol { case proto.HTTP: i.Hosts = append(i.Hosts, &HostAuth{t.Host, NewAuth(t.Auth)}) case proto.TCP, proto.TCP4, proto.TCP6, proto.UNIX: var l net.Listener addr, err := s.adrListenRegister(t.Addr, cname, name) if err != nil { goto rollback } l, err = net.Listen(t.Protocol, addr) if err != nil { goto rollback } cplogger.Debugf("open listener for address %v", l.Addr()) i.Listeners = append(i.Listeners, l) case proto.SNI: if s.vhostMuxer == nil { err = fmt.Errorf("unable to configure SNI for tunnel %s: %s", name, t.Protocol) goto rollback } var l net.Listener l, err = s.vhostMuxer.Listen(t.Host) if err != nil { goto rollback } cplogger.Debugf("add SNI vhost for host %s", t.Host) i.Listeners = append(i.Listeners, l) default: err = fmt.Errorf("unsupported protocol for tunnel %s: %s", name, t.Protocol) goto rollback } } i.ListenerNames = portnames err = s.set(i, cname) if err != nil { goto rollback } for k, l := range i.Listeners { go s.listen(l, i.ClientName, i.ListenerNames[k]) } return nil rollback: for _, l := range i.Listeners { l.Close() } return err } // Unsubscribe removes client from registry, disconnects client if already // connected and returns it's RegistryItem. func (s *Server) Unsubscribe(identifier string, idname string) *RegistryItem { s.connPool.DeleteConn(identifier) return s.registry.Unsubscribe(identifier, idname) } // Ping measures the RTT response time. func (s *Server) Ping(identifier string) (time.Duration, error) { return s.connPool.Ping(identifier) } func (s *Server) listen(l net.Listener, cname string, pname string) { addr := l.Addr().String() cplogger := s.logger.WithFields(log.Fields{"client-name": cname, "port-name": pname}) for { conn, err := l.Accept() if err != nil { if strings.Contains(err.Error(), "use of closed network connection") || strings.Contains(err.Error(), "listener closed") { cplogger.Errorf("listener closed for address %s", addr) return } cplogger.Errorf("listener closed for address %s with error %v", addr, err) continue } msg := &proto.ControlMessage{ Action: proto.ActionProxy, ForwardedProto: l.Addr().Network(), } msg.ForwardedId = pname msg.ForwardedHost = l.Addr().String() err = tunnel.KeepAlive(conn) cpclogger := cplogger.WithFields(log.Fields{"ctrl-msg": msg}) if err != nil { cpclogger.Error("TCP keepalive for tunneled connection failed", err) } go func() { if err := s.proxyConn(cname, conn, msg); err != nil { cpclogger.Error("proxy error", err) } }() } } // ServeHTTP proxies http connection to the client. func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { resp, err := s.RoundTrip(r) if err == errUnauthorised { w.Header().Set("WWW-Authenticate", "Basic realm=\"User Visible Realm\"") http.Error(w, err.Error(), http.StatusUnauthorized) return } if err != nil { s.logger.WithFields(log.Fields{ "addr": r.RemoteAddr, "host": r.Host, "url": r.URL, }).Error("round trip failed", err) http.Error(w, err.Error(), http.StatusBadGateway) return } defer resp.Body.Close() copyHeader(w.Header(), resp.Header) w.WriteHeader(resp.StatusCode) transfer(w, resp.Body, s.logger.WithFields(log.Fields{ "dir": "client to user", "dst": r.RemoteAddr, "src": r.Host, })) } // RoundTrip is http.RoundTriper implementation. func (s *Server) RoundTrip(r *http.Request) (*http.Response, error) { identifier, auth, ok := s.Subscriber(r.Host) if !ok { return nil, errClientNotSubscribed } outr := r.WithContext(r.Context()) if r.ContentLength == 0 { outr.Body = nil // Issue 16036: nil Body for http.Transport retries } outr.Header = cloneHeader(r.Header) if auth != nil { token := r.Header.Get("X-Auth-Header") if auth.Token != token { return nil, errUnauthorised } outr.Header.Del("X-Auth-Header") } setXForwardedFor(outr.Header, r.RemoteAddr) scheme := r.URL.Scheme if scheme == "" { if r.TLS != nil { scheme = proto.HTTPS } else { scheme = proto.HTTP } } if r.Header.Get("X-Forwarded-Host") == "" { outr.Header.Set("X-Forwarded-Host", r.Host) outr.Header.Set("X-Forwarded-Proto", scheme) } msg := &proto.ControlMessage{ Action: proto.ActionProxy, ForwardedHost: r.Host, ForwardedProto: scheme, } return s.proxyHTTP(identifier, outr, msg) } func (s *Server) proxyConn(identifier string, conn net.Conn, msg *proto.ControlMessage) error { s.logger.WithFields(log.Fields{ "identifier": identifier, "ctrlMsg": msg, }).Debug("proxy connection") defer conn.Close() pr, pw := io.Pipe() defer pr.Close() defer pw.Close() req, err := s.connectRequest(identifier, msg, pr) if err != nil { return err } ctx, cancel := context.WithCancel(context.Background()) req = req.WithContext(ctx) done := make(chan struct{}) go func() { transfer(pw, conn, log.WithContext(s.logger.Context).WithFields(log.Fields{ "dir": "user to client", "dst": identifier, "src": conn.RemoteAddr(), })) cancel() close(done) }() resp, err := s.httpClient.Do(req) if err != nil { return fmt.Errorf("io error: %s", err) } defer resp.Body.Close() transfer(conn, resp.Body, log.WithContext(s.logger.Context).WithFields(log.Fields{ "dir": "client to user", "dst": conn.RemoteAddr(), "src": identifier, })) select { case <-done: case <-time.After(tunnel.DefaultTimeout): } s.logger.WithFields(log.Fields{ "identifier": identifier, "ctrlMsg": msg, }).Debug("proxy connection done") return nil } func (s *Server) proxyHTTP(identifier string, r *http.Request, msg *proto.ControlMessage) (*http.Response, error) { s.logger.WithFields(log.Fields{ "identifier": identifier, "ctrlMsg": msg, }).Debug("proxy HTTP request") pr, pw := io.Pipe() defer pr.Close() defer pw.Close() req, err := s.connectRequest(identifier, msg, pr) if err != nil { return nil, fmt.Errorf("proxy request error: %s", err) } go func() { cw := &countWriter{pw, 0} err := r.Write(cw) if err != nil { s.logger.WithFields(log.Fields{ "identifier": identifier, "ctrlMsg": msg, }).Error("proxy error", err) } s.logger.WithFields(log.Fields{ "identifier": identifier, "bytes": cw.count, "dir": "user to client", "dst": r.Host, "src": r.RemoteAddr, }).Debug("transferred") if r.Body != nil { r.Body.Close() } }() resp, err := s.httpClient.Do(req) if err != nil { return nil, fmt.Errorf("io error: %s", err) } s.logger.WithFields(log.Fields{ "identifier": identifier, "ctrlMsg": msg, "status code": resp.StatusCode, }).Debug("proxy HTTP done") return resp, nil } // connectRequest creates HTTP request to client with a given identifier having // control message and data input stream, output data stream results from // response the created request. func (s *Server) connectRequest(cname string, msg *proto.ControlMessage, r io.Reader) (*http.Request, error) { conid := s.registry.GetID(cname) if conid == "" { return nil, errors.New("could not create request: ID not found") } req, err := http.NewRequest(http.MethodPut, s.connPool.URL(conid), r) if err != nil { return nil, fmt.Errorf("could not create request: %s", err) } msg.WriteToHeader(req.Header) return req, nil } // Addr returns network address clients connect to. func (s *Server) Addr() string { if s.listener == nil { return "" } return s.listener.Addr().String() } // Stop closes the server. func (s *Server) Stop() { s.logger.Debug("stop http-tunnel server") if s.listener != nil { s.listener.Close() } }
package lib import ( "testing" "github.com/muesli/termenv" "github.com/stretchr/testify/require" ) func TestPad(t *testing.T) { s := termenv.String("gh").Foreground(profile.Convert(termenv.ANSIYellow)).String() require.Equal(t, s+" ", RPad(s, 4)) } func TestOverlayDraw(t *testing.T) { var o Overlay require.Equal(t, "", o.Drawer().Draw(2)) o.Add("abc\ndef", nil) o.Add("ghi", termenv.ANSIYellow) o.Add("jkl\nmno\np", nil) d := o.Drawer() require.Equal(t, "abc", d.Draw(4)) require.Equal(t, "def"+termenv.String("gh").Foreground(profile.Convert(termenv.ANSIYellow)).String(), d.Draw(5)) require.Equal( t, termenv.String("i").Foreground(profile.Convert(termenv.ANSIYellow)).String()+"jkl", d.Draw(5), ) d.Advance() require.Equal(t, "mn", d.Draw(2)) d.Advance() // skip remaining "o" on this line require.Equal(t, "p", d.Draw(2)) } type s struct{ xs []int } func (in *s) copy() *s { tmp := *in return &tmp } func TestOverlayMultiDraw(t *testing.T) { var o Overlay o.Add(`ok`, nil) require.Equal(t, "ok", o.Drawer().Draw(2)) require.Equal(t, "ok", o.Drawer().Draw(2)) d := o.Drawer() require.Equal(t, "ok", d.Draw(2)) require.Equal(t, "", d.Draw(2)) }
func isAnagram(s string, t string) bool { if len(s) != len(t) { return false } arr := make([]int, 26) for _, ch := range []rune(s) { arr[ch - 'a']++ } for _, ch := range []rune(t) { if arr[ch - 'a'] == 0 { return false } arr[ch - 'a']-- } for _, val := range arr { if val != 0 { return false } } return true }
package verification import ( "bytes" "errors" "io/ioutil" "net" "net/http" "reflect" "testing" "time" "github.com/fabric8-services/fabric8-common/log" "github.com/fabric8-services/fabric8-webhook/util" "github.com/goadesign/goa" goalogrus "github.com/goadesign/goa/logging/logrus" ) var gs *goa.Service func init() { gs = goa.New("fabric8-webhook-test") // record HTTP request metrics in prometh gs.WithLogger(goalogrus.New(log.Logger())) } func TestNew(t *testing.T) { type args struct { duration time.Duration } type fields struct { clientTransport util.RoundTripFunc } tests := []struct { name string fields fields args args want Service wantHookIPs []string wantErr bool }{ { name: "verification.New Test Positive", fields: fields{ clientTransport: util.RoundTripFunc(func(req *http.Request) (*http.Response, error) { // Test request parameters return &http.Response{ StatusCode: 200, // Send response to be tested Body: ioutil.NopCloser(bytes.NewBufferString(`{ "hooks": [ "192.30.252.0/22", "185.199.108.0/22", "140.82.112.0/20" ] }`)), }, nil }), }, args: args{15 * time.Minute}, want: Service(&service{ hookIPs: nil, Service: gs, ticker: time.NewTicker(15 * time.Minute), }), wantHookIPs: []string{(&net.IPNet{IP: net.IPv4(192, 30, 252, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}).String(), (&net.IPNet{IP: net.IPv4(185, 199, 108, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}).String(), (&net.IPNet{IP: net.IPv4(140, 82, 112, 0), Mask: net.IPv4Mask(255, 255, 240, 0)}).String()}, wantErr: false, }, { name: "verification.New Test Negative", fields: fields{ clientTransport: util.RoundTripFunc(func(req *http.Request) (*http.Response, error) { // Test request parameters return &http.Response{ StatusCode: 400, // Send response to be tested Body: nil, }, errors.New("Mock Error Response") }), }, args: args{1 * time.Nanosecond}, want: nil, wantHookIPs: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { util.SetMockNetClient(tt.fields.clientTransport) got, err := New(gs, tt.args.duration) if (err != nil) != tt.wantErr { t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr) return } var hookIPs []string if err == nil { for _, ipnet := range got.(*service).hookIPs { hookIPs = append(hookIPs, ipnet.String()) } // Changing s.HookIPs and ticker as reflect.Deepequal doesn't work for IPNet got.(*service).hookIPs = tt.want.(*service).hookIPs got.(*service).ticker = tt.want.(*service).ticker got.(*service).ticker.Stop() } if !reflect.DeepEqual(got, tt.want) || !reflect.DeepEqual(hookIPs, tt.wantHookIPs) { t.Errorf("New() = %v, want %v, HookIPs %v, want %v", got, tt.want, hookIPs, tt.wantHookIPs) } }) } } func Test_service_Verify(t *testing.T) { type fields struct { hooks []*net.IPNet } type args struct { req *http.Request } tests := []struct { name string fields fields args args want bool wantErr bool }{ { name: "Verify Source Positive 1", fields: fields{ hooks: []*net.IPNet{{IP: net.IPv4(192, 30, 252, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(185, 199, 108, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(140, 82, 112, 0), Mask: net.IPv4Mask(255, 255, 240, 0)}}, }, args: args{ &http.Request{ Header: func() http.Header { h := http.Header{} h.Add("X-Forwarded-For", "192.30.252.1,92.30.252.3") return h }(), }, }, want: true, }, { name: "Verify Source Positive 2", fields: fields{ hooks: []*net.IPNet{{IP: net.IPv4(192, 30, 252, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(185, 199, 108, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(140, 82, 112, 0), Mask: net.IPv4Mask(255, 255, 240, 0)}}, }, args: args{ &http.Request{ RemoteAddr: "192.30.252.1:8080", }, }, want: true, }, { name: "Verify Source Negative 1", fields: fields{ hooks: []*net.IPNet{{IP: net.IPv4(192, 30, 252, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(185, 199, 108, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(140, 82, 112, 0), Mask: net.IPv4Mask(255, 255, 240, 0)}}, }, args: args{ &http.Request{ RemoteAddr: "92.30.252.0:8080", }, }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &service{ hookIPs: tt.fields.hooks, Service: gs, } got, err := s.Verify(tt.args.req) if got != tt.want && !tt.wantErr { t.Errorf("service.Verify() = %v, want %v", got, tt.want) } if tt.wantErr && err == nil { t.Error("service.Verify() = wantErr") } }) } } func Test_service_setHooks(t *testing.T) { type fields struct { clientTransport util.RoundTripFunc } tests := []struct { name string fields fields want []string wantErr bool }{ { name: "setHookIPs Verification Test Positive", fields: fields{ clientTransport: util.RoundTripFunc(func(req *http.Request) (*http.Response, error) { // Test request parameters return &http.Response{ StatusCode: 200, // Send response to be tested Body: ioutil.NopCloser(bytes.NewBufferString(`{ "verifiable_password_authentication": true, "github_services_sha": "2f2313161ed4f940a57ae3f0936eb8e9695bb8a8", "hooks": [ "192.30.252.0/22", "185.199.108.0/22", "140.82.112.0/20" ], "git": [ "192.30.252.0/22", "185.199.108.0/22", "140.82.112.0/20", "13.229.188.59/32", "13.250.177.223/32", "18.194.104.89/32", "18.195.85.27/32", "35.159.8.160/32", "52.74.223.119/32" ], "pages": [ "192.30.252.153/32", "192.30.252.154/32", "185.199.108.153/32", "185.199.109.153/32", "185.199.110.153/32", "185.199.111.153/32" ], "importer": [ "54.87.5.173", "54.166.52.62", "23.20.92.3" ] }`)), }, nil }), }, want: []string{(&net.IPNet{IP: net.IPv4(192, 30, 252, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}).String(), (&net.IPNet{IP: net.IPv4(185, 199, 108, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}).String(), (&net.IPNet{IP: net.IPv4(140, 82, 112, 0), Mask: net.IPv4Mask(255, 255, 240, 0)}).String()}, wantErr: false, }, { name: "setHookIPs Verification Test Negative - 1 Response Err", fields: fields{ clientTransport: util.RoundTripFunc(func(req *http.Request) (*http.Response, error) { // Test request parameters return &http.Response{ StatusCode: 400, // Send response to be tested Body: nil, }, errors.New("Mock Error Response") }), }, want: nil, wantErr: true, }, { name: "setHookIPs Verification Test Negative - 2 Body Error", fields: fields{ clientTransport: util.RoundTripFunc(func(req *http.Request) (*http.Response, error) { // Test request parameters return &http.Response{ StatusCode: 200, // Send response to be tested Body: util.ErrReader("Mock Error"), }, nil }), }, want: nil, wantErr: true, }, { name: "setHookIPs Verification Test Negative - 3 Unmarshal Error", fields: fields{ clientTransport: util.RoundTripFunc(func(req *http.Request) (*http.Response, error) { // Test request parameters return &http.Response{ StatusCode: 200, // Send response to be tested Body: ioutil.NopCloser(bytes.NewBufferString(`{ "hooks": "192.30.252.0/22" }`)), }, nil }), }, want: nil, wantErr: true, }, { name: "setHookIPs Verification Test Negative - 4 IPNet parsing", fields: fields{ clientTransport: util.RoundTripFunc(func(req *http.Request) (*http.Response, error) { // Test request parameters return &http.Response{ StatusCode: 200, // Send response to be tested Body: ioutil.NopCloser(bytes.NewBufferString(`{ "hooks": [ "192.30.252.0/22", "185.1979.108.777/722", "140.82.112.0/20" ] }`)), }, nil }), }, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &service{ Service: gs, } util.SetMockNetClient(tt.fields.clientTransport) err := s.setHookIPs() if (err != nil) != tt.wantErr { t.Errorf("service.setHooks() error = %v, wantErr %v", err, tt.wantErr) } var hookIPs []string if err == nil { for _, ipnet := range s.hookIPs { hookIPs = append(hookIPs, ipnet.String()) } } if !reflect.DeepEqual(hookIPs, tt.want) { t.Errorf("New() = %v, want %v,", hookIPs, tt.want) } }) } } func Test_service_isGithubIP(t *testing.T) { type fields struct { hooks []*net.IPNet Service *goa.Service } type args struct { i string } tests := []struct { name string fields fields args args want bool }{ { name: "isGithubIP test positive", fields: fields{ hooks: []*net.IPNet{{IP: net.IPv4(135, 104, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 255)}, {IP: net.IPv4(185, 199, 108, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(140, 82, 112, 0), Mask: net.IPv4Mask(255, 255, 240, 0)}}, }, args: args{i: "185.199.108.17"}, want: true, }, { name: "isGithubIP test negative", fields: fields{ hooks: []*net.IPNet{{IP: net.IPv4(135, 104, 0, 0), Mask: net.IPv4Mask(255, 255, 255, 255)}, {IP: net.IPv4(185, 199, 108, 0), Mask: net.IPv4Mask(255, 255, 252, 0)}, {IP: net.IPv4(140, 82, 112, 0), Mask: net.IPv4Mask(255, 255, 240, 0)}}, }, args: args{i: "25.199.108.17"}, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { s := &service{ hookIPs: tt.fields.hooks, Service: tt.fields.Service, } if got := s.isGithubIP(tt.args.i); got != tt.want { t.Errorf("service.isGithubIP() = %v, want %v", got, tt.want) } }) } }
package http import ( "marketplace/transactions/domain" "marketplace/transactions/internal/request" "marketplace/transactions/internal/usecase" "net/http" "github.com/gin-gonic/gin" "github.com/go-pg/pg/v10" "github.com/sirupsen/logrus" ) func CreateTransactionHandler(db *pg.DB, cmd usecase.CreateTransactionCmd) gin.HandlerFunc { return func(c *gin.Context) { createTransaction := &request.CreateTransactionRequest{} err := c.BindJSON(createTransaction) user := c.MustGet("acc").(domain.Account) if err != nil { logrus.WithError(err).Error("Bad request. Data are not well formated.") c.Status(http.StatusBadRequest) return } transac, err := cmd(db, c, createTransaction, user) if err != nil { if err.Error() == "Not found" { logrus.WithError(err).Error("The ads is not found.") c.Status(http.StatusNotFound) return } else if err.Error() == "You can not make an offer for an add that you created." { logrus.WithError(err).Error("You can not make an offer for an add that you created.") c.Status(http.StatusBadRequest) return } else if err.Error() == "too expensive" { logrus.WithError(err).Error("You can not make this transaction because you don't have enough money on your account") c.Status(http.StatusUnauthorized) return } logrus.WithError(err).Error("Bad request. Data are not well formated.") c.Status(http.StatusInternalServerError) return } c.JSON(http.StatusCreated, request.ConvertToResponse([]domain.Transaction{transac})) } }
// +build linux,!legacy_appindicator //go:build linux && !legacy_appindicator package systray /* #cgo linux pkg-config: ayatana-appindicator3-0.1 #include "systray.h" */ import "C"
package main func main() { for i := 0; i <= 150; i++ { if (i%7) == 0 && (i%11) == 0 { println("Múltiplo de 7 e 11") } else if (i % 7) == 0 { println("Múltiplo de 7") } else if (i % 11) == 0 { println("Múltiplo de 11") } else { println(i) } } }
package api import ( "crypto/tls" "encoding/json" "errors" "fmt" "net/http" "sort" "time" "github.com/gorilla/mux" "github.com/hashicorp/go-multierror" "github.com/mlowicki/rhythm/api/auth" "github.com/mlowicki/rhythm/api/auth/gitlab" "github.com/mlowicki/rhythm/api/auth/ldap" "github.com/mlowicki/rhythm/conf" "github.com/mlowicki/rhythm/model" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" "github.com/xeipuuv/gojsonschema" ) var ( errForbidden = errors.New("Forbidden") errUnauthorized = errors.New("Unauthorized") errJobAlreadyExists = errors.New("Job already exists") errJobNotFound = errors.New("Job not found") ) type authorizer interface { GetProjectAccessLevel(r *http.Request, group string, project string) (auth.AccessLevel, error) } func encoder(w http.ResponseWriter) *json.Encoder { enc := json.NewEncoder(w) enc.SetIndent("", " ") w.Header().Set("Content-Type", "application/json") return enc } type storage interface { GetJobs() ([]*model.Job, error) GetGroupJobs(group string) ([]*model.Job, error) GetProjectJobs(group, project string) ([]*model.Job, error) GetJob(group, project, id string) (*model.Job, error) SaveJob(j *model.Job) error DeleteJob(group, project, id string) error GetTasks(group, project, id string) ([]*model.Task, error) GetJobConf(group, project, id string) (*model.JobConf, error) SaveJobConf(state *model.JobConf) error QueueJob(group, project, id string) error } type handler struct { a authorizer s storage h func(auth authorizer, s storage, w http.ResponseWriter, r *http.Request) error } func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { err := h.h(h.a, h.s, w, r) if err != nil { log.Errorf("API handler error: %s", err) errs := make([]string, 0, 1) if merr, ok := err.(*multierror.Error); ok { for _, err := range merr.Errors { errs = append(errs, err.Error()) } } else { errs = append(errs, err.Error()) } encoder(w).Encode(struct{ Errors []string }{errs}) } } func filterReadableJobs(a authorizer, r *http.Request, jobs []*model.Job) ([]*model.Job, error) { readable := make([]*model.Job, 0, len(jobs)) lvls := make(map[string]auth.AccessLevel) var err error for _, job := range jobs { key := fmt.Sprintf("%s/%s", job.Group, job.Project) lvl, found := lvls[key] if !found { lvl, err = a.GetProjectAccessLevel(r, job.Group, job.Project) if err != nil { return nil, err } lvls[key] = lvl } if lvl != auth.NoAccess { readable = append(readable, job) } } return readable, nil } func getJobs(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { jobs, err := s.GetJobs() if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } readable, err := filterReadableJobs(a, r, jobs) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } encoder(w).Encode(readable) return nil } func getTasks(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { vars := mux.Vars(r) group := vars["group"] project := vars["project"] lvl, err := a.GetProjectAccessLevel(r, group, project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if lvl == auth.NoAccess { w.WriteHeader(http.StatusForbidden) return errForbidden } tasks, err := s.GetTasks(group, project, vars["id"]) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } sort.Slice(tasks, func(i, j int) bool { return tasks[i].End.Before(tasks[j].End) }) encoder(w).Encode(tasks) return nil } func getGroupJobs(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { jobs, err := s.GetGroupJobs(mux.Vars(r)["group"]) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } readable, err := filterReadableJobs(a, r, jobs) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } encoder(w).Encode(readable) return nil } func getProjectJobs(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { vars := mux.Vars(r) group := vars["group"] project := vars["project"] lvl, err := a.GetProjectAccessLevel(r, group, project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if lvl == auth.NoAccess { w.WriteHeader(http.StatusForbidden) return errForbidden } jobs, err := s.GetProjectJobs(group, project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } encoder(w).Encode(jobs) return nil } func getJob(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { vars := mux.Vars(r) group := vars["group"] project := vars["project"] lvl, err := a.GetProjectAccessLevel(r, group, project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if lvl == auth.NoAccess { w.WriteHeader(http.StatusForbidden) return errForbidden } job, err := s.GetJob(group, project, vars["id"]) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if job == nil { w.WriteHeader(http.StatusNotFound) } else { encoder(w).Encode(job) } return nil } func deleteJob(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { vars := mux.Vars(r) group := vars["group"] project := vars["project"] lvl, err := a.GetProjectAccessLevel(r, group, project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if lvl != auth.ReadWrite { w.WriteHeader(http.StatusForbidden) return errForbidden } err = s.DeleteJob(group, project, vars["id"]) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } w.WriteHeader(http.StatusNoContent) return nil } func runJob(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { vars := mux.Vars(r) group := vars["group"] project := vars["project"] lvl, err := a.GetProjectAccessLevel(r, group, project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if lvl != auth.ReadWrite { w.WriteHeader(http.StatusForbidden) return errForbidden } err = s.QueueJob(group, project, vars["id"]) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } w.WriteHeader(http.StatusNoContent) return nil } func validateSchema(payload gojsonschema.JSONLoader, schema gojsonschema.JSONLoader) error { res, err := gojsonschema.Validate(schema, payload) if err != nil { return err } if !res.Valid() { var errs *multierror.Error for _, err := range res.Errors() { errs = multierror.Append(errs, errors.New(err.String())) } return errs } return nil } func createJob(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { var payload newJobPayload decoder := json.NewDecoder(r.Body) err := decoder.Decode(&payload) if err != nil { w.WriteHeader(http.StatusBadRequest) return fmt.Errorf("JSON decoding failed: %s", err) } schemaLoader := gojsonschema.NewGoLoader(newJobSchema) payloadLoader := gojsonschema.NewGoLoader(payload) err = validateSchema(payloadLoader, schemaLoader) if err != nil { return err } if payload.Env == nil { payload.Env = make(map[string]string) } if payload.Secrets == nil { payload.Secrets = make(map[string]string) } if payload.Arguments == nil { payload.Arguments = make([]string, 0) } if payload.Labels == nil { payload.Labels = make(map[string]string) } lvl, err := a.GetProjectAccessLevel(r, payload.Group, payload.Project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if lvl != auth.ReadWrite { w.WriteHeader(http.StatusForbidden) return errForbidden } jobConf := &model.JobConf{ JobID: model.JobID{ Group: payload.Group, Project: payload.Project, ID: payload.ID, }, Schedule: model.JobSchedule{ Type: model.Cron, Cron: payload.Schedule.Cron, }, Env: payload.Env, Secrets: payload.Secrets, Container: model.JobContainer{}, CPUs: payload.CPUs, Mem: payload.Mem, Disk: payload.Disk, Cmd: payload.Cmd, User: payload.User, Arguments: payload.Arguments, Labels: payload.Labels, MaxRetries: payload.MaxRetries, } jobRuntime := &model.JobRuntime{} job := &model.Job{JobConf: *jobConf, JobRuntime: *jobRuntime} if payload.Container.Docker.Image != "" { job.Container.Type = model.Docker job.Container.Docker = &model.JobDocker{ Image: payload.Container.Docker.Image, ForcePullImage: payload.Container.Docker.ForcePullImage, } } else { job.Container.Type = model.Mesos job.Container.Mesos = &model.JobMesos{ Image: payload.Container.Mesos.Image, } } if payload.Shell == nil { job.Shell = true } else { job.Shell = *payload.Shell } storedJob, err := s.GetJob(payload.Group, payload.Project, payload.ID) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if storedJob != nil { w.WriteHeader(http.StatusBadRequest) return errJobAlreadyExists } job.State = model.IDLE err = s.SaveJob(job) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } w.WriteHeader(http.StatusNoContent) return nil } func updateJob(a authorizer, s storage, w http.ResponseWriter, r *http.Request) error { vars := mux.Vars(r) var payload updateJobPayload group := vars["group"] project := vars["project"] decoder := json.NewDecoder(r.Body) err := decoder.Decode(&payload) if err != nil { w.WriteHeader(http.StatusBadRequest) return fmt.Errorf("JSON decoding failed: %s", err) } schemaLoader := gojsonschema.NewGoLoader(updateJobSchema) payloadLoader := gojsonschema.NewGoLoader(payload) err = validateSchema(payloadLoader, schemaLoader) if err != nil { return err } lvl, err := a.GetProjectAccessLevel(r, group, project) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if lvl != auth.ReadWrite { w.WriteHeader(http.StatusForbidden) return errForbidden } job, err := s.GetJobConf(group, project, vars["id"]) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } if job == nil { w.WriteHeader(http.StatusNotFound) return errJobNotFound } if payload.Schedule != nil { schedule := job.Schedule if payload.Schedule.Cron != nil { schedule.Type = model.Cron schedule.Cron = *payload.Schedule.Cron } job.Schedule = schedule } if payload.Env != nil { job.Env = *payload.Env } if payload.Secrets != nil { job.Secrets = *payload.Secrets } if payload.Container != nil { container := job.Container if payload.Container.Docker != nil { container.Type = model.Docker container.Mesos = nil if container.Docker == nil { container.Docker = &model.JobDocker{} } if payload.Container.Docker.Image != nil { container.Docker.Image = *payload.Container.Docker.Image } if payload.Container.Docker.ForcePullImage != nil { container.Docker.ForcePullImage = *payload.Container.Docker.ForcePullImage } if container.Docker.Image == "" { w.WriteHeader(http.StatusBadRequest) return errors.New("container.docker.image is required") } } else if payload.Container.Mesos != nil { container.Type = model.Mesos container.Docker = nil if container.Mesos == nil { container.Mesos = &model.JobMesos{} } container.Mesos.Image = *payload.Container.Mesos.Image } job.Container = container } if payload.CPUs != nil { job.CPUs = *payload.CPUs } if payload.Mem != nil { job.Mem = *payload.Mem } if payload.Disk != nil { job.Disk = *payload.Disk } if payload.Cmd != nil { job.Cmd = *payload.Cmd } if payload.User != nil { job.User = *payload.User } if payload.Shell != nil { job.Shell = *payload.Shell } if payload.Arguments != nil { job.Arguments = *payload.Arguments } if payload.Labels != nil { job.Labels = *payload.Labels } if payload.MaxRetries != nil { job.MaxRetries = *payload.MaxRetries } err = s.SaveJobConf(job) if err != nil { w.WriteHeader(http.StatusInternalServerError) return err } w.WriteHeader(http.StatusNoContent) return nil } // State describes server info. type State struct { IsLeader func() bool Version string } // New creates instance of API server and runs it in separate goroutine. func New(c *conf.API, s storage, state State) { r := mux.NewRouter() v1 := r.PathPrefix("/api/v1").Subrouter().StrictSlash(true) var ( a authorizer err error ) switch c.Auth.Backend { case conf.APIAuthBackendGitLab: a, err = gitlab.New(&c.Auth.GitLab) case conf.APIAuthBackendNone: a = &auth.NoneAuthorizer{} case conf.APIAuthBackendLDAP: ldap.SetTimeout(c.Auth.LDAP.Timeout) a, err = ldap.New(&c.Auth.LDAP) default: log.Fatalf("Unknown authorization backend: %s", c.Auth.Backend) } if err != nil { log.Fatal(err) } log.Printf("Authorization backend: %s", c.Auth.Backend) v1.Handle("/health", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { encoder(w).Encode(struct { ServerTime string Version string Leader bool }{ time.Now().Format(time.UnixDate), state.Version, state.IsLeader(), }) })) v1.Handle("/jobs", &handler{a, s, getJobs}).Methods("GET") v1.Handle("/jobs", &handler{a, s, createJob}).Methods("POST") v1.Handle("/jobs/{group}", &handler{a, s, getGroupJobs}).Methods("GET") v1.Handle("/jobs/{group}/{project}", &handler{a, s, getProjectJobs}).Methods("GET") v1.Handle("/jobs/{group}/{project}/{id}", &handler{a, s, getJob}).Methods("GET") v1.Handle("/jobs/{group}/{project}/{id}", &handler{a, s, deleteJob}).Methods("DELETE") v1.Handle("/jobs/{group}/{project}/{id}", &handler{a, s, updateJob}).Methods("PUT") v1.Handle("/jobs/{group}/{project}/{id}/tasks", &handler{a, s, getTasks}).Methods("GET") v1.Handle("/jobs/{group}/{project}/{id}/run", &handler{a, s, runJob}).Methods("POST") v1.Handle("/metrics", promhttp.Handler()) tlsConf := &tls.Config{ MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_RSA_WITH_AES_256_CBC_SHA, }, } srv := &http.Server{ Handler: r, Addr: c.Addr, ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, IdleTimeout: 60 * time.Second, TLSConfig: tlsConf, TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler), 0), } go func() { if c.CertFile != "" || c.KeyFile != "" { log.Fatal(srv.ListenAndServeTLS(c.CertFile, c.KeyFile)) } else { log.Fatal(srv.ListenAndServe()) } }() }
package pork import ( "testing" "github.com/mspaulding06/nap" ) func TestGetRepositoryReadme(t *testing.T) { token := "49117eb33240d82724587351e54434122667b3f9" GitHubAPI().SetAuth(nap.NewAuthToken(token)) if err := GetRepositoryReadme("mspaulding06/testrepo"); err != nil { t.Fail() } }
package structs import ( "fmt" "go/types" "sort" "golang.org/x/tools/go/analysis" "golang.org/x/tools/go/analysis/passes/buildssa" ssaPkg "golang.org/x/tools/go/ssa" ) var Analyzer = &analysis.Analyzer{ Name: "structs", Doc: Doc, Run: run, Requires: []*analysis.Analyzer{ buildssa.Analyzer, }, } const Doc = "structs finds all structs in a package" func isStruct(typ types.Type) bool { for { switch t := typ.(type) { case *types.Struct: return true case *types.Named: typ = t.Underlying() case *types.Pointer: typ = t.Elem() default: return false } } } func run(pass *analysis.Pass) (interface{}, error) { ssa := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA) structs := make([]string, 0) for _, m := range ssa.Pkg.Members { if typ, ok := m.(*ssaPkg.Type); ok { if isStruct(typ.Type()) { structs = append(structs, m.String()) } } } sort.Strings(structs) for _, s := range structs { fmt.Println(s) } return nil, nil }
package version var ( Name = "motionctrl" Number = "0.0.13" )
package cursor import ( "testing" ) func TestCursor(t *testing.T) { var b = []byte("select * from stu") // spawn a new Cursor var c = NewCursor(b,0) if c.Index != 0 { t.Errorf("Next() skipped Index 0") } c.Next() if c.Index != 1 { t.Errorf("Next() failed to Increment") } c.SetByte(byte('!')) if c.GetByte() != byte('!') { t.Errorf("SetByte() failed to set the provided byte") } // these will create out of range errors if they dont perform correctly var c2 = NewCursor(b,0) for c2.Next() { } for c2.Prev() { } }
package main import ( "fmt" "runtime" "sync" "time" ) /** go 线程调度 */ var gw sync.WaitGroup func a() { for i := 0;i<10;i++ { fmt.Println("A:",i) gw.Done() time.Sleep(1000*time.Millisecond) } } func b() { for i := 0;i<10;i++ { fmt.Println("B:",i) gw.Done() time.Sleep(1000*time.Millisecond) } } func main() { //获取当前时间戳 startTime:=time.Now().Second() //控制物理CPU runtime.GOMAXPROCS(2) gw.Add(20) go a() go b() gw.Wait() endTime:=time.Now().Second() fmt.Println("startTime:",startTime) fmt.Println("endTime:",endTime) fmt.Println(endTime-startTime) }
package dynamic import ( "github.com/jinzhu/now" "logic" "time" ) const ( WeekCycleSeconds = 60 * 60 * 24 * 7 MonthCycleSeconds = 60 * 60 * 24 * 30 ) func (r *Record) SetExpire(a *logic.Activity, nowTime time.Time) { switch r.Type { case FreqType_FreqPerDay: r.Expire = int32(now.New(nowTime).EndOfDay().Unix()) case FreqType_FreqPerWeek: r.Expire = r.ExpireEnd(int32(a.CreativeStart().Unix()), int32(nowTime.Unix()), WeekCycleSeconds) case FreqType_FreqPerMonth: r.Expire = r.ExpireEnd(int32(a.CreativeStart().Unix()), int32(nowTime.Unix()), MonthCycleSeconds) case FreqType_FreqCustom: //should not run to here } } //TODO: the algrithm to be reviewed func (r *Record) ExpireEnd(start, now, cycle int32) int32 { return (((now-start)/cycle)+1)*cycle + start }
package model import ( "context" "time" "guild/manager" "shared/common" "shared/csv/static" "shared/global" "shared/protobuf/pb" "shared/utility/errors" "shared/utility/glog" "shared/utility/mysql" ) const ( GuildJoinModelAuto = 0 // 公会自由加入 GuildJoinModelHandle = 1 // 公会加入需要审批 GuildChatJoin = 1 // 公会加入 GuildChatQuit = 2 // 退出公会 GuildNormalTaskNum = 2 // 每次普通任务池随机出公会任务数量 GuildSeparateTaskNum = 1 // 每次特殊任务池随机出公会任务数量 GuildTaskTotalNum = 8 // 公会任务总数量 ) type Guild struct { ID int64 `db:"id" major:"true"` // 公会ID Name string `db:"name"` // 名称 Chairman int64 `db:"chairman"` // 会长 ViceChairmen []int64 `db:"vice_chairmen"` // 副会长 Icon *common.GuildIcon `db:"icon"` // 图标 Title string `db:"title"` // 说明 Exp int32 `db:"exp"` // 经验 Level int32 `db:"level"` // 等级 JoinModel int32 `db:"join_model"` // 加入模式:自由加入、手动审批 Members []*common.GuildMember `db:"members"` // 成员 Chats []common.GuildChat `db:"chats"` // 聊天框 AppliedList []common.GuildApply `db:"applied_list"` // 申请列表 DissolveTime int64 `db:"dissolve_time"` // 解散时间戳,可以24h反悔 LastTimeCancelDissolve int64 `db:"last_time_cancel_dissolve"` // 上次取消解散的时间 HelpList []int64 `db:"help_list"` // 帮助列表 CreateTime int64 `db:"ctime"` // 创建时间 ClearFlag int32 `db:"clear_flag"` // 是否清除数据,解散后记录清除数据标记 ChairmanLastLoginTime int64 `db:"chairman_last_login_time"` // 会长上次登录时间,过久不登录自动转让会长 MemberLastLoginTime int64 `db:"member_last_login_time"` // 成员上次登录时间,过久没人不登录自动解散公会 Tasks []*common.GuildTask `db:"tasks"` // 任务 TaskLastRefreshTime int64 `db:"task_last_refresh_time"` // 上次任务刷新时间 LastSendGroupMailTime int64 `db:"last_send_group_mail_time"` // 上次集体邮件时间 // Area int32 `db:"area"` // 区 // BossDamage *number.CalNumber `db:"boss_damage"` // BOSS伤害 HelpRequests *common.GraveyardHelpRequests `db:"help_requests"` // 公会互助请求 YggCoBuilds map[int32]*YggCoBuild `db:"-"` // 世界探索公会联合建筑 GuildGreetings *GuildGreetings `db:"guild_greetings"` // 角色异界问候 *mysql.EmbedModule `db:"-"` } func NewGuild(id int64) *Guild { return &Guild{ ID: id, Name: "", Chairman: 0, ViceChairmen: []int64{}, Icon: &common.GuildIcon{}, Title: "", Exp: 0, Level: 0, JoinModel: 0, Members: []*common.GuildMember{}, Chats: []common.GuildChat{}, AppliedList: []common.GuildApply{}, DissolveTime: 0, HelpList: []int64{}, CreateTime: time.Now().Unix(), ClearFlag: 0, HelpRequests: common.NewGraveyardRequests(), EmbedModule: &mysql.EmbedModule{}, GuildGreetings: NewGreetings(), } } func (g *Guild) InitNewGuild(userID int64) bool { g.Chairman = userID member := common.NewGuildMember(userID) // 初始化在线状态 member.Status = common.UserOnline member.LastLoginTime = time.Now().Unix() member.Position = common.GuildPositionChairman g.Members = append(g.Members, member) g.Level = 1 return false } func (g *Guild) CheckInDissolving() error { if g.DissolveTime > 0 { return common.ErrGuildInDissolving } return nil } // 检查公会是否解散,人死了什么都没了,公会也是 func (g *Guild) CheckDissolved() error { if g.IsDissolved() { return common.ErrGuildDissolved } return nil } // 检查公会解散CD,返回值为true,代表还在CD中,不可解散 func (g *Guild) CheckDissolvedCD() bool { return g.LastTimeCancelDissolve > 0 && time.Unix(g.LastTimeCancelDissolve, 0).Add(time.Duration(manager.CSV.GlobalEntry.GuildDissolveCD)*time.Second).After(time.Now()) } func (g *Guild) IsCleared() bool { return g.ClearFlag == 0 } func (g *Guild) Clear() { g.ClearFlag = 1 } func (g *Guild) IsDissolved() bool { return g.DissolveTime > 0 && time.Now().Add(-time.Duration(manager.CSV.GlobalEntry.GuildDissolveDuration)*time.Second).After(time.Unix(g.DissolveTime, 0)) } func (g *Guild) CheckIsMember(userID int64) error { var isInGuild bool for _, member := range g.Members { if member.UserID == userID { isInGuild = true } } if !isInGuild { return errors.Swrapf(common.ErrGuildNotMember, g.ID, userID) } return nil } func (g *Guild) CheckPrivilegeChairMan(userID int64) error { return g.CheckPrivilege(userID, common.GuildPositionChairman) } func (g *Guild) CheckPrivilegeViceChairMan(userID int64) error { return g.CheckPrivilege(userID, common.GuildPositionViceChairman) } func (g *Guild) CheckSendGroupMailCD() error { // TODO:read config if !time.Now().Add(-24 * time.Hour).After(time.Unix(g.LastSendGroupMailTime, 0)) { return common.ErrGuildSendGroupMailInCD } return nil } func (g *Guild) IsChairMan(userID int64) bool { return g.Chairman == userID } func (g *Guild) IsViceChairMan(userID int64) bool { for _, v := range g.ViceChairmen { if v == userID { return true } } return false } func (g *Guild) IsElite(userID int64) bool { for _, v := range g.Members { if v.UserID == userID { return v.Position == common.GuildPositionElite } } return false } func (g *Guild) IsCommon(userID int64) bool { for _, v := range g.Members { if v.UserID == userID { return v.Position == common.GuildPositionCommon } } return false } func (g *Guild) EliteNum() int32 { var result int32 for _, v := range g.Members { if v.Position == common.GuildPositionElite { result++ } } return result } func (g *Guild) CheckPrivilege(userID int64, position int32) error { switch position { case common.GuildPositionChairman: if g.IsChairMan(userID) { return nil } return common.ErrGuildNoPrivilege case common.GuildPositionViceChairman: if g.IsChairMan(userID) { return nil } if g.IsViceChairMan(userID) { return nil } return common.ErrGuildNoPrivilege case common.GuildPositionElite: if g.IsChairMan(userID) { return nil } if g.IsViceChairMan(userID) { return nil } if g.IsElite(userID) { return nil } return common.ErrGuildNoPrivilege case common.GuildPositionCommon: return nil } return nil } func (g *Guild) IsAutoHandle() bool { return g.JoinModel == GuildJoinModelAuto } func (g *Guild) Apply(userID int64) { // 添加到申请列表 g.AppliedList = append(g.AppliedList, *common.NewGuildApply(userID)) } func (g *Guild) CancelApply(userID int64) { // 从申请列表移除 for i, v := range g.AppliedList { if v.UserID == userID { g.AppliedList = append(g.AppliedList[:i], g.AppliedList[i+1:]...) break } } } func (g *Guild) Quit(ctx context.Context, userID int64) { // 从成员列表移除 for i, v := range g.Members { if v.UserID == userID { g.Members = append(g.Members[:i], g.Members[i+1:]...) break } } // 从副会长列表移除 for i, v := range g.ViceChairmen { if v == userID { g.ViceChairmen = append(g.ViceChairmen[:i], g.ViceChairmen[i+1:]...) break } } userCaches, err := manager.Global.GetUserCaches(ctx, []int64{userID}) if err != nil { glog.Errorf("GetUserCaches(%v) error", userID, err) } if userCache, ok := userCaches[userID]; ok { g.ChatEvent(userID, userCache.Name, static.GuildActionGuildChatQuit) } g.SendGreetingsByMails(ctx, []int64{userID}) } func (g *Guild) CheckMemberCount(approveCount int) error { if len(g.Members)+approveCount >= int(manager.CSV.GlobalEntry.GuildMemberMaxNum) { return common.ErrGuildIsFull } return nil } func (g *Guild) CheckAppliedListCount(maxCount int) error { if len(g.AppliedList) >= maxCount { return common.ErrGuildIsFull } return nil } func (g *Guild) CheckInAppliedList(userIDs []int64) error { appliedMap := map[int64]bool{} for _, v := range g.AppliedList { appliedMap[v.UserID] = true } for _, userID := range userIDs { if !appliedMap[userID] { return common.ErrGuildNotInAppliedList } } return nil } func (g *Guild) Approve(ctx context.Context, userID int64) { member := common.NewGuildMember(userID) // 初始化在线状态 caches, err := manager.Global.GetUserCachesExtension(ctx, []int64{userID}, global.UserCacheWithOnline) if err != nil { glog.Errorf("GetUserCachesExtension(%v) error", userID, err) } cache, ok := caches[userID] if ok { member.LastLoginTime = cache.LastLoginTime member.Status = int8(cache.OnlineStatus) } // 添加成员列表 g.Members = append(g.Members, member) g.ChatEvent(userID, cache.Name, static.GuildActionGuildChatJoin) } func (g *Guild) RefreshLoginTime(userID, lastLoginTime int64, status int8) { if g.IsChairMan(userID) { g.ChairmanLastLoginTime = lastLoginTime } for i, member := range g.Members { if member.UserID == userID { g.Members[i].LastLoginTime = lastLoginTime g.Members[i].Status = status } } g.MemberLastLoginTime = lastLoginTime } func (g *Guild) RefreshOnlineStatus() { // 玩家上线和下线都会主动同步状态,当玩家在线的时候需要持续自己去获取状态同步 for i, member := range g.Members { if member.Status == common.UserOnline { // 如果玩家在线超过5分钟没有同步,就同步一次,防止服务器意外崩溃没有同步状态 now := time.Now() if (now.Add(-5 * time.Minute)).After(time.Unix(member.LastLoginTime, 0)) { lastLoginTime, err := manager.Global.UserLastLoginTime(context.Background(), member.UserID) if err != nil { glog.Errorf("UserLastLoginTime(%d) error: %v", member.UserID, err) } else { // 不在线,更新状态 if lastLoginTime == 0 { g.Members[i].LastLoginTime = now.Unix() g.Members[i].Status = common.UserOffline } } } } } } func (g *Guild) MemberPosition(userID int64) int32 { for _, v := range g.Members { if v.UserID == userID { return v.Position } } return 0 } func (g *Guild) Chat(maxChatLen int, userID int64, userName, content string, avatar, frame int32) { // 添加成员列表 g.Chats = append(g.Chats, *common.NewGuildChat(0, userID, userName, g.MemberPosition(userID), content, avatar, frame)) // 会话长度超了,移除最旧的会话 if len(g.Chats) > maxChatLen { g.Chats = g.Chats[1:] } } func (g *Guild) ChatEvent(userId int64, userName string, eventType int8) { g.Chats = append(g.Chats, *common.NewGuildChat(eventType, userId, userName, 0, "", 0, 0)) if len(g.Chats) > int(manager.CSV.GlobalEntry.GuildChatLimit) { g.Chats = g.Chats[1:] } } // 升职,不做检查 func (g *Guild) Promotion(userID int64, position int32) { for i, member := range g.Members { if member.UserID == userID { g.Members[i].Position = position } } // 升职成为副会长 if position == common.GuildPositionViceChairman { g.ViceChairmen = append(g.ViceChairmen, userID) } } // 降职,不做检查 func (g *Guild) Demotion(userID int64, position int32) { for i, member := range g.Members { if member.UserID == userID { g.Members[i].Position = position } } // 从副会长降职 if position == common.GuildPositionElite { for i, member := range g.ViceChairmen { if member == userID { g.ViceChairmen = append(g.ViceChairmen[:i], g.ViceChairmen[i+1:]...) } } } } func (g *Guild) Transfer(ctx context.Context, userID int64) { oldChairman := g.Chairman g.Chairman = userID // 从副会长删除 for i, member := range g.ViceChairmen { if member == userID { g.ViceChairmen = append(g.ViceChairmen[:i], g.ViceChairmen[i+1:]...) } } // 会长变成副会长 g.ViceChairmen = append(g.ViceChairmen, oldChairman) // 职位变化 for i, member := range g.Members { if member.UserID == userID { g.Members[i].Position = common.GuildPositionChairman } else if member.UserID == oldChairman { g.Members[i].Position = common.GuildPositionViceChairman } } userCaches, err := manager.Global.GetUserCaches(ctx, []int64{oldChairman, g.Chairman}) if err != nil { glog.Errorf("GetUserCaches(%v) error", userID, err) } if userCache, ok := userCaches[oldChairman]; ok { g.ChatEvent(userID, userCache.Name, static.GuildActionGuildChatUnchairman) } if userCache, ok := userCaches[g.Chairman]; ok { g.ChatEvent(userID, userCache.Name, static.GuildActionGuildChatChairman) } } func (g *Guild) HandleApplied(users []int64) { userM := map[int64]bool{} for _, userID := range users { userM[userID] = true } // 添加成员列表 for i, v := range g.AppliedList { if userM[v.UserID] { g.AppliedList = append(g.AppliedList[:i], g.AppliedList[i+1:]...) } } } func (g *Guild) VOGuildInfo(ctx context.Context, userId int64) (*pb.VOGuildInfo, error) { intimacyMap, err := manager.Global.GetGuildIntimacyMap(ctx, g.ID, userId) if err != nil { return nil, err } uids := make([]int64, 0, len(g.Members)) voMembers := make([]*pb.VOGuildMember, 0, len(g.Members)) for i, member := range g.Members { voMembers = append(voMembers, g.Members[i].VOGuildMember(intimacyMap[member.UserID])) uids = append(uids, member.UserID) } // 添加成员其他数据 userCaches, err := manager.Global.GetUserCaches(ctx, uids) if err != nil { return nil, err } for i, member := range voMembers { if userCache, ok := userCaches[member.UserID]; ok { voMembers[i].UserName = userCache.Name voMembers[i].Avatar = userCache.Avatar voMembers[i].Frame = userCache.Frame voMembers[i].Power = userCache.Power } } voChats := make([]*pb.VOGuildChat, 0, len(g.Chats)) for i, _ := range g.Chats { voChats = append(voChats, g.Chats[i].VOGuildChat()) } return &pb.VOGuildInfo{ GuildID: g.ID, Name: g.Name, Chairman: g.Chairman, ViceChairmen: g.ViceChairmen, Icon: g.Icon.VOGuildIcon(), Title: g.Title, Exp: g.Exp, Level: g.Level, JoinModel: g.JoinModel, CreateTime: g.CreateTime, Members: voMembers, Chats: voChats, DissolveTime: g.DissolveTime, LastCancelDissolveTime: g.LastTimeCancelDissolve, }, nil } // 返回公会列表详情 func (g *Guild) VOGuildShowInfo(ctx context.Context) (*pb.VOGuildShowInfo, error) { voChairmen := make([]*pb.VOGuildMember, 0, 1+manager.CSV.GlobalEntry.GuildViceChairmenNum) uids := make([]int64, 0, 1+manager.CSV.GlobalEntry.GuildViceChairmenNum) for i, member := range g.Members { if member.Position >= common.GuildPositionViceChairman { voChairmen = append(voChairmen, g.Members[i].VOGuildMember(0)) uids = append(uids, member.UserID) } } // 添加成员其他数据 userCaches, err := manager.Global.GetUserCaches(ctx, uids) if err != nil { return nil, err } for i, member := range voChairmen { if userCache, ok := userCaches[member.UserID]; ok { // fmt.Println("===============================", userCache.ID, userCache.Name) voChairmen[i].UserName = userCache.Name voChairmen[i].Avatar = userCache.Avatar voChairmen[i].Frame = userCache.Frame voChairmen[i].Power = userCache.Power } } icon := &pb.VOGuildIcon{} if g.Icon != nil { icon = g.Icon.VOGuildIcon() } return &pb.VOGuildShowInfo{ GuildID: g.ID, Name: g.Name, Chairman: g.Chairman, ViceChairmen: g.ViceChairmen, Icon: icon, Title: g.Title, Exp: g.Exp, Level: g.Level, JoinModel: g.JoinModel, CreateTime: g.CreateTime, ChairmenInfo: voChairmen, }, nil } func (g *Guild) AddGuildExp(addExp int32) { if addExp <= 0 { return } expArr := manager.CSV.Guild.GetExpArr() nowLevel := g.Level maxLevel := int32(len(expArr)) if nowLevel >= maxLevel { return } nowExp := g.Exp + addExp for i := nowLevel - 1; i < maxLevel; i++ { exp := expArr[i] if exp == nowExp { nowLevel = i + 1 break } else if exp > nowExp { break } nowLevel = i + 1 } if nowLevel >= maxLevel { // 满级后经验就不会再加了,停在满级0经验 g.Exp = expArr[maxLevel-1] g.Level = maxLevel } else { g.Exp = nowExp g.Level = nowLevel } } func (g *Guild) AddGuildMemberActivation(member *common.GuildMember, addExp int32) { // 增加 贡献度(公会等级经验) g.AddGuildExp(addExp * manager.CSV.Guild.GetGuildContributionExpRatio()) // 增加个人贡献度 member.Activation.Plus(addExp) } func (g *Guild) GetMemberUserIDs() []int64 { userIDs := make([]int64, 0, len(g.Members)) for _, v := range g.Members { userIDs = append(userIDs, v.UserID) } return userIDs }
package mongo import ( "gopkg.in/mgo.v2" ) var session *mgo.Session func Connect(server string) (err error) { session, err = mgo.Dial(server) return } func NewDb(databaseName string) *mgo.Database { newSession := session.Copy() db := newSession.DB(databaseName) return db }
package service import ( "errors" "github.com/astaxie/beego" "github.com/astaxie/beego/validation" "net/http" "strconv" "tripod/convert" "webserver/common" "webserver/controllers" "webserver/models" "webserver/models/maccount" "webserver/models/mservice" ) type OrderListController struct { controllers.BaseController orderType int orderStatus int limit int offset int orderCount int data []map[string]interface{} Count int } func (c *OrderListController) Post() { defer c.Recover() if err := c.ParseJsonBody(); err != nil { c.WriteCommonResponse(http.StatusBadRequest, 0, err) return } if err := c.validParam(); err != nil { c.WriteCommonResponse(http.StatusOK, 0, err) return } if respBody, err := c.getOrderList(); err != nil { c.WriteCommonResponse(http.StatusOK, 0, err) } else { c.WriteBodyResponse(respBody) } } func (c *OrderListController) validParam() error { valid := validation.Validation{} c.orderType = convert.Atoi(c.GetParam("order_type")) //1: offer 2:have c.orderStatus = convert.Atoi(c.GetParam("order_status")) // 1: 已完成 2:进行中 page := convert.Atoi(c.GetParam("page")) c.limit = convert.Atoi(c.GetParam("page_num")) c.offset = (page - 1) * c.limit c.limit += c.offset //beego.BeeLogger.Debug("validParam %d %d %d %d", c.orderType, c.orderStatus, c.limit, c.offset) if valid.HasErrors() { var reason string for _, err := range valid.Errors { reason += err.Key + err.Message } return errors.New(reason) } return nil } func (c *OrderListController) getOrderList() (interface{}, error) { if err := c.OrderListHandler(); err != nil { return nil, err } var resp common.GetResponse resp.Code = 1 resp.Data = c.data return resp, nil } func (c *OrderListController) OrderListHandler() error { c.data = make([]map[string]interface{}, 0, 10) orders, err := maccount.FindOrderByUserId(c.User.Id) controllers.CheckError("FindOrderByUserId", err) for _, order := range orders { if c.orderStatus == 1 { //进行中 if !convert.CheckIntInclude(order.Status, models.Order_Processing) { continue } } else { // finished //check if deleted if order.PublishUserId == c.User.Id { if order.IsShow&models.PUBLISH_DELETED == models.PUBLISH_DELETED { continue } } else { if order.IsShow&models.ACCEPT_DELETED == models.ACCEPT_DELETED { continue } } if c.orderStatus == 2 { //已完成 if order.Status != models.STATUS_COMPLISHED { continue } } else if c.orderStatus == 3 { //已取消 if order.Status != models.STATUS_PUBLISH_CANCEL && order.Status != models.STATUS_ACCEPT_CANCEL { continue } } else if c.orderStatus == 4 { //已仲裁 if !convert.CheckIntInclude(order.Status, models.Order_Complain) { continue } } else { continue } } if c.orderType == 1 { if convert.CheckIntInclude(order.OrderType, common.PublishToEarn) { if c.User.Id == order.PublishUserId { c.GetOrderInfo(&order, order.AcceptUserId) } } else { if c.User.Id == order.AcceptUserId { c.GetOrderInfo(&order, order.PublishUserId) } } } else { if convert.CheckIntInclude(order.OrderType, common.PublishToEarn) { if c.User.Id == order.AcceptUserId { c.GetOrderInfo(&order, order.PublishUserId) } } else { if c.User.Id == order.PublishUserId { c.GetOrderInfo(&order, order.AcceptUserId) } } } if c.Count > c.limit { break } } return nil } func (c *OrderListController) GetOrderInfo(order *maccount.Order, userId int) error { if item, err := GetServiceList(order.ServiceType, order.ServiceId); err == nil { if err := controllers.AddUserInfo(item, userId); err == nil { AddOrderExtra(item, order) c.SetOrderOther(item, order) c.Count++ if c.Count > c.offset && c.Count <= c.limit { c.data = append(c.data, item) } } } return nil } func (c *OrderListController) SetOrderOther(item map[string]interface{}, order *maccount.Order) { var status int if c.User.Id == order.PublishUserId { item["order_deposit"] = convert.GetFormatMoney(order.PublishPunishment) if order.Status == models.STATUS_PUBLISH_CANCEL { status = 1 } else if order.Status == models.STATUS_ACCEPT_CANCEL { status = 2 } else if order.Status == models.STATUS_PUBLISH_COMPLAIN { status = 3 } else if order.Status == models.STATUS_ACCEPT_COMPLAIN { status = GetComplainStatus(order.OrderId) } else if order.Status == models.STATUS_PUBLISH_WIN { status = 5 } else if order.Status == models.STATUS_ACCEPT_WIN { status = 6 } else { status = 0 } } else { item["order_deposit"] = convert.GetFormatMoney(order.AcceptPunishment) if order.Status == models.STATUS_PUBLISH_CANCEL { status = 2 } else if order.Status == models.STATUS_ACCEPT_CANCEL { status = 1 } else if order.Status == models.STATUS_PUBLISH_COMPLAIN { status = GetComplainStatus(order.OrderId) } else if order.Status == models.STATUS_ACCEPT_COMPLAIN { status = 3 } else if order.Status == models.STATUS_PUBLISH_WIN { status = 6 } else if order.Status == models.STATUS_ACCEPT_WIN { status = 5 } else { status = 0 } } item["order_quit_status"] = strconv.Itoa(status) if _, err := maccount.GetOrderComment(c.User.Id, order.OrderId); err == nil { item["order_comment"] = "1" } else { item["order_comment"] = "0" } if order.ServiceType == common.SERVICE_PET_CARE || order.ServiceType == common.SERVICE_LEND_OUT { item["order_image"] = controllers.GetImageUrlArray(order.Extra) } else { item["order_image"] = []string{} } } func GetServiceList(sType, serviceId int) (map[string]interface{}, error) { var err error var item map[string]interface{} switch sType { case common.SERVICE_FOOD_OFFER: record := &mservice.ServiceOfferDinner{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetOfferDinnderData(record) } case common.SERVICE_FOOD_HAVE: record := &mservice.ServiceHaveDinner{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetHaveDinnderData(record) } case common.SERVICE_MARKET_GO: record := &mservice.ServiceMarket{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetMarketGOData(record) } case common.SERVICE_MARKET_REQUEST: record := &mservice.ServiceMarketbuy{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetMarketBuyData(record) } case common.SERVICE_PET_CARE: record := &mservice.ServicePetcare{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetPetCareData(record) } case common.SERVICE_PET_TRANSFER: record := &mservice.ServicePettransfer{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetPetTransferData(record) } case common.SERVICE_SKILL_SELL: record := &mservice.ServiceSkill{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetSkillData(record) } case common.SERVICE_TEACH_COURSE: record := &mservice.ServiceCourse{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetHomeCourseData(record) } case common.SERVICE_WANT_COURSE: record := &mservice.ServiceWantcourse{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetWantCourseData(record) } case common.SERVICE_LEND_OUT: record := &mservice.ServiceLendout{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetLendOutData(record) } case common.SERVICE_BORROW_IN: record := &mservice.ServiceBorrow{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetBorrowData(record) } case common.SERVICE_SELL_OUT: record := &mservice.ServiceGoodsale{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetGoodSaleData(record) } case common.SERVICE_WANT_BUY: record := &mservice.ServiceWantbuy{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetWantBuyData(record) } case common.SERVICE_BE_AGENCY, common.SERVICE_NEED_AGENCY: record := &mservice.ServiceAgency{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetDoAgencyData(record) } case common.SERVICE_HOUSE_KEEP: record := &mservice.ServiceHousekeep{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetHouseKeepData(record) } case common.SERVICE_HOUSE_REQUIRE: record := &mservice.ServiceHouserequest{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetHouseRequestData(record) } case common.SERVICE_WANT_LIFT: record := &mservice.ServiceWantlift{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetWantLiftData(record) } case common.SERVICE_OFFER_LIFT: record := &mservice.ServiceOfferlift{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetOfferLiftData(record) } case common.SERVICE_WANT_FELLOW: record := &mservice.ServiceWantfellow{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetWantFellowData(record) } case common.SERVICE_WANT_ACTIVITY: record := &mservice.ServiceWantactivity{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetWantActivityData(record) } case common.SERVICE_OTHER_CAN, common.SERVICE_OTHER_WANT: record := &mservice.ServiceOther{} if err = models.FindRecordById(record, serviceId); err == nil { item = GetOtherData(record) } default: beego.BeeLogger.Error("AddServiceListController unrecognize service_type %d", sType) return nil, common.ErrParamService } return item, err } func AddOrderExtra(item map[string]interface{}, order *maccount.Order) { item["order_id"] = order.OrderId item["order_start_time"] = order.OptStart item["order_end_time"] = order.OptEnd item["order_status"] = strconv.Itoa(order.Status) item["order_guarantee"] = convert.GetFormatMoney(order.Guarantee) item["order_amount"] = convert.GetFormatMoney(order.Amount) item["order_count"] = strconv.Itoa(order.Count) item["order_extra"] = order.Extra } func GetComplainStatus(orderId string) int { if complain, err := maccount.FindComplianByOrderId(orderId); err == nil { if complain.BlameReason != "" || complain.BlameProof != "" { return 7 } } return 4 }
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package shell import ( "fmt" "yunion.io/x/onecloud/pkg/multicloud/aws" "yunion.io/x/onecloud/pkg/util/shellutils" ) func init() { type AccountListOptions struct { } shellutils.R(&AccountListOptions{}, "account-list", "List accounts", func(cli *aws.SRegion, args *AccountListOptions) error { accounts, err := cli.ListAccounts() if err != nil { return err } printList(accounts, 0, 0, 0, []string{}) return nil }) type OrganizationPoliciesListOptions struct { FILTER string `json:"filter" choices:"SERVICE_CONTROL_POLICY|TAG_POLICY|BACKUP_POLICY|AISERVICES_OPT_OUT_POLICY"` } shellutils.R(&OrganizationPoliciesListOptions{}, "org-policy-list", "List policies of organizations", func(cli *aws.SRegion, args *OrganizationPoliciesListOptions) error { policies, err := cli.ListPolicies(args.FILTER) if err != nil { return err } printList(policies, 0, 0, 0, []string{}) return nil }) type OrganizationPolicyShowOptions struct { ID string `json:"id"` } shellutils.R(&OrganizationPolicyShowOptions{}, "org-policy-show", "Show details of an organizations policy", func(cli *aws.SRegion, args *OrganizationPolicyShowOptions) error { content, err := cli.DescribeOrgPolicy(args.ID) if err != nil { return err } fmt.Println(content.PrettyString()) return nil }) type OrganizationPoliciesListForTargetOptions struct { OrganizationPoliciesListOptions TARGET string `json:"target"` } shellutils.R(&OrganizationPoliciesListForTargetOptions{}, "org-target-policy-list", "List policies for target of organizations", func(cli *aws.SRegion, args *OrganizationPoliciesListForTargetOptions) error { policies, err := cli.ListPoliciesForTarget(args.FILTER, args.TARGET) if err != nil { return err } printList(policies, 0, 0, 0, []string{}) return nil }) type OrganizationParentsListOptions struct { ID string `json:"id"` } shellutils.R(&OrganizationParentsListOptions{}, "org-parent-list", "List parent nodes of a child node", func(cli *aws.SRegion, args *OrganizationParentsListOptions) error { err := cli.ListParents(args.ID) if err != nil { return err } return nil }) type OrganizationalUnitShowOptions struct { ID string `help:"Id of organizational unit"` } shellutils.R(&OrganizationalUnitShowOptions{}, "org-ou-show", "Show details of organizational unit", func(cli *aws.SRegion, args *OrganizationalUnitShowOptions) error { err := cli.DescribeOrganizationalUnit(args.ID) if err != nil { return err } return nil }) }
package main import ( "io/ioutil" "github.com/rs/zerolog/log" "gopkg.in/yaml.v3" ) type Config struct { Transport []*ConfigTransport `yaml:"Transport"` } type ConfigTransport struct { Addr string `yaml:"Addr"` TargetHost string `yaml:"TargetHost"` TargetPort int `yaml:"TargetPort"` UsePool bool `yaml:"UsePool"` } var config Config func parseConfig(configPath string) { var data []byte data, err := ioutil.ReadFile(configPath) if err != nil { panic(err) } // config.Transport = append(config.Transport, &ConfigTransport{}) // config.Transport = append(config.Transport, &ConfigTransport{}) // data, err = yaml.Marshal(&config) // log.Debug().Str("data", string(data)).Msg("data") // fmt.Println(string(data)) err = yaml.Unmarshal(data, &config) if err != nil { panic(err) } log.Debug().Interface("config", &config).Msg("print config") // _, err := toml.DecodeFile(configPath, &config) // if err != nil { // panic(err) // } }
package main import ( "net" "time" ) type ( Settings struct { ID int Services []string NumOfSecCheck int64 NumOfSecWait int64 NumOfAttempts int } SettingsChanged struct { Settings Date time.Time } SettingsLoader interface { Start() Close() } settingsLoader struct { router EventRouter errors chan<- interface{} close chan bool } ) func (s Settings) String() string { return "" } func GetSettings(pk string) Settings { return Settings{ ID: 0, Services: nil, NumOfSecCheck: 0, NumOfSecWait: 0, NumOfAttempts: 0, } } func NewSettingsLoader(router EventRouter, errors chan<- interface{}, config ConfigAWS) SettingsLoader { return settingsLoader{ router: router, errors: errors, close: make(chan bool), } } func (s settingsLoader) Start() { ln, err := net.Listen("tcp", ":9999") if err != nil { panic(err) } defer ln.Close() for { select { case <-s.close: return default: if conn, err := ln.Accept(); err != nil { s.errors <- err } else { s.readData(conn) } } } } func (s settingsLoader) Close() { s.close <- true } func (s settingsLoader) readData(conn net.Conn) { buf := make([]byte, 32) if n, err := conn.Read(buf); err != nil { s.errors <- err } else { if string(buf[:n]) == "new settings" { go s.fetchDynamoDB() } } } func (s settingsLoader) fetchDynamoDB() { //todo fetcher s.router.Route(SettingsChanged{ Settings: Settings{ ID: 0, Services: nil, NumOfSecCheck: 0, NumOfSecWait: 0, NumOfAttempts: 0, }, Date: time.Time{}, }) }
//go:build !windows // +build !windows package nvim // BinaryName is the name of default nvim binary name. const BinaryName = "nvim"
package main import "fmt" func fibo(x int) int { if x == 0 {return x} else if x == 1{return 1}else {return fibo(x-1) + fibo (x-2)} } func main() { fmt.Println("Enter the number:") var x int fmt.Scan(&x) fmt.Println(fibo(x)) }
package jsonrpc import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "testing" ) func TestTransactions_Info(t *testing.T) { c, err := newTestClient() require.NoError(t, err) res, err := c.Transactions.Info("db1aa687737858cc9199bfa336f9b1c035915c30aaee60b1e0f8afadfdb946bd") assert.NoError(t, err) assert.NotNil(t, res.Result) } func TestTransactions_Create_Broadcast(t *testing.T) { c, err := newTestClient() require.NoError(t, err) // Passphrase here is the passphrase of a genesis delegate on the testnet res, err := c.Transactions.Create("1", "19LmwF3rG9JehYdcf99zQQv9sd2bBJ83G6", "clay harbor enemy utility margin pretty hub comic piece aerobic umbrella acquire") assert.NoError(t, err) assert.NotNil(t, res.Result) res2, err := c.Transactions.Broadcast([]string{res.ID}) assert.NoError(t, err) assert.NotNil(t, res2.Result) } func TestTransactions_Bip38Create(t *testing.T) { c, err := newTestClient() require.NoError(t, err) res, err := c.Transactions.Bip38Create("1", "AHXtmB84sTZ9Zd35h9Y1vfFvPE2Xzqj8ri", "5KWQF4hrHPUoCeKfyojyJZHjePpEJBvNr22qbudcARZUUXhCfDJ", "19LmwF3rG9JehYdcf99zQQv9sd2bBJ83G6") assert.NoError(t, err) assert.NotNil(t, res.Result) }
package main import "testing" type ProfitCase struct { prices Prices expectedProfit int } func TestMaxProfitCalculation(t *testing.T) { cases := []ProfitCase{{ []int{10, 7, 5, 8, 11, 9}, 6, }, { []int{5, 5, 5, 5, 5, 5, 5, 5}, 0, }, { []int{10, 9, 8, 7, 3, 1}, -1, }, { []int{50, 50, 100, 40, 40, 95}, 55, }} for i, c := range cases { profit := c.prices.MaxProfit() if profit != c.expectedProfit { t.Logf("case %v: expected profit == %v, got == %v", i, c.expectedProfit, profit) t.Fail() } } }
package netcat import ( "fmt" "log" "net" ) /* Функция StartUDPClient(): 1) Присоединяется к порту; 2) Присоединяется к удалённому серверу UDP; 3) В бесконечном цикле предаёт сообщения, набранные пользователем в консоли: 3.1. Сканирует сообщение пользователя fmt.Scanln; 3.2. Отправляет сообщения пользователя на сервер: conn.Write. */ func StartUDPClient() { service := "localhost:8080" RemoteAddr, _ := net.ResolveUDPAddr("udp", service) conn, err := net.DialUDP("udp", nil, RemoteAddr) if err != nil { log.Fatal(err) } defer conn.Close() for { var source string fmt.Print("Insert yor message: ") _, err := fmt.Scanln(&source) if err != nil { fmt.Println("Incerrect message", err) continue } if n, err := conn.Write([]byte(source)); n == 0 || err != nil { fmt.Println(err) return } } }
// Copyright (C) 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package master import ( "context" "sync" "github.com/google/gapid/core/event/task" ) type satellite struct { lock sync.Mutex info *Satellite issues chan issue } // issue is used to package a command with a channel to feed the result of // handling the command. type issue struct { command *Command result chan error } func newSatellite(ctx context.Context, name string, services ServiceList) *satellite { return &satellite{ info: &Satellite{ Name: name, Services: &services, }, issues: make(chan issue), } } // processCommands reads the issues from the channel and hands them to the command handler, sending // the result back through the issue channel. func (sat *satellite) processCommands(ctx context.Context, handler CommandHandler) { for { select { case <-task.ShouldStop(ctx): return case i := <-sat.issues: i.result <- handler(ctx, i.command) close(i.result) } } } // sendCommand posts an issue for the command into the channel, then blocks until it gets a result. func (sat *satellite) sendCommand(ctx context.Context, command *Command) { result := make(chan error) sat.lock.Lock() if sat.issues != nil { sat.issues <- issue{command: command, result: result} } sat.lock.Unlock() err := <-result if err != nil { sat.lock.Lock() if sat.issues != nil { close(sat.issues) sat.issues = nil } sat.lock.Unlock() } }
package main import "fmt" type messageType int const ( INFO messageType = 0 + iota WARNING ERROR ) const ( InfoColor = "\033[1;34m%s\003[0m" WarningColor = "\033[1;33m%s\003[0m" ErrorColor = "\033[1;31m%s\003[0m" ) func main() { showMessage(INFO, "Hello! This works") } func showMessage(messageType messageType, message string) { switch messageType { case INFO: printMessage := fmt.Sprintf("\nInformation: \n%s\n", message) fmt.Printf(InfoColor, printMessage) case WARNING: printMessage := fmt.Sprintf("\nWarning: \n%s\n", message) fmt.Printf(WarningColor, printMessage) case ERROR: printMessage := fmt.Sprintf("\nError: \n%s\n", message) fmt.Printf(ErrorColor, printMessage) } }
package money import ( "fmt" "reflect" ) // Money 통화를 나타낸다. type Money struct { amount int currency string } // Dollar USD 통화를 나타낸다. func Dollar(amount int) Money { return Money{amount, "USD"} } // Won KRW 통화를 나타낸다. func Won(amount int) Money { return Money{amount, "KRW"} } // Construct Dollar 생성자 func Construct(amount int) Money { return Dollar(amount) } // times 는 Dollar의 곱셈 연산을 합니다. // VO를 구현하기 위해서 `Dollar`를 반환합니다. func (money *Money) times(multiplier int) Money { // dollar.amount = dollar.amount * multiplier return Money{money.amount * multiplier, money.currency} } func (money *Money) equals(object interface{}) (bool, error) { defer func() { fmt.Println(object, reflect.TypeOf(object)) }() switch v := object.(type) { case nil: return false, nil case int, int32, int64: return money.amount == v, nil case Money: return money.amount == v.amount, nil default: var NotCalcuableError = fmt.Errorf("This value is not calcuable.") return false, NotCalcuableError } }