text
stringlengths
11
4.05M
package examples import ( "fmt" "strconv" ) func convert() { s := strconv.Itoa(34) fmt.Print(s) if s, err := strconv.Atoi(s); err != nil { fmt.Print(s) } }
package printer import ( "fmt" "io" ) // DebugPrinter wraps a Printer with debug messages type DebugPrinter struct { P Printer } func (d *DebugPrinter) Reset() { d.P.Reset() } func (d *DebugPrinter) PushContext(c ContextType) { d.P.PushContext(c) } func (d *DebugPrinter) PopContext() { d.P.PopContext() } func (d *DebugPrinter) SetWriter(w io.Writer) { d.P.SetWriter(w) } func (d *DebugPrinter) UpdateLevel(delta int) { d.P.UpdateLevel(delta) } func (d *DebugPrinter) SameLine() { fmt.Print("/* SameLine */") d.P.SameLine() } func (d *DebugPrinter) IsSameLine() bool { fmt.Print("/* IsSameLine */") return d.P.IsSameLine() } func (d *DebugPrinter) Chop(line string) string { return d.P.Chop(line) } func (d *DebugPrinter) Print(values ...string) { d.P.Print(values...) } func (d *DebugPrinter) PrintLevel(term string, values ...string) { d.P.PrintLevel(term, values...) } func (d *DebugPrinter) PrintBlockStart(b BlockType, empty bool) { fmt.Print("/* PrintBlockStart", b, "*/") d.P.PrintBlockStart(b, empty) } func (d *DebugPrinter) PrintBlockEnd(b BlockType) { d.P.PrintBlockEnd(b) fmt.Print("/* PrintBlockEnd", "*/") } func (d *DebugPrinter) PrintPackage(name string) { fmt.Println("/* PrintPackage", name, "*/") d.P.PrintPackage(name) } func (d *DebugPrinter) PrintImport(name, path string) { fmt.Println("/* PrintImport", name, path, "*/") d.P.PrintImport(name, path) } func (d *DebugPrinter) PrintType(name, typedef string) { fmt.Println("/* PrintType", name, typedef, "*/") d.P.PrintType(name, typedef) } func (d *DebugPrinter) PrintValue(vtype, typedef, names, values string, ntuple, vtuple bool) { fmt.Println("/* PrintValue", vtype, typedef, names, values, ntuple, vtuple, "*/") d.P.PrintValue(vtype, typedef, names, values, ntuple, vtuple) } func (d *DebugPrinter) PrintStmt(stmt, expr string) { fmt.Println("/* PrintStmt", stmt, expr, "*/") d.P.PrintStmt(stmt, expr) } func (d *DebugPrinter) PrintReturn(expr string, tuple bool) { fmt.Println("/* PrintReturn", expr, tuple, "*/") d.P.PrintReturn(expr, tuple) } func (d *DebugPrinter) PrintFunc(receiver, name, params, results string) { fmt.Println("/* PrintFunc", receiver, name, params, results, "*/") d.P.PrintFunc(receiver, name, params, results) } func (d *DebugPrinter) PrintFor(init, cond, post string) { fmt.Println("/* PrintFor", init, cond, post, "*/") d.P.PrintFor(init, cond, post) } func (d *DebugPrinter) PrintRange(key, value, expr string) { fmt.Println("/* PrintRange", key, value, expr, "*/") d.P.PrintRange(key, value, expr) } func (d *DebugPrinter) PrintSwitch(init, expr string) { fmt.Println("/* PrintSwitch", init, expr, "*/") d.P.PrintSwitch(init, expr) } func (d *DebugPrinter) PrintCase(expr string) { fmt.Println("/* PrintCase", expr, "*/") d.P.PrintCase(expr) } func (d *DebugPrinter) PrintEndCase() { fmt.Println("/* PrintEndCase", "*/") d.P.PrintEndCase() } func (d *DebugPrinter) PrintIf(init, cond string) { fmt.Println("/* PrintIf", init, cond, "*/") d.P.PrintIf(init, cond) } func (d *DebugPrinter) PrintElse() { fmt.Println("/* PrintElse", "*/") d.P.PrintElse() } func (d *DebugPrinter) PrintEmpty() { fmt.Println("/* PrintEmpty", "*/") d.P.PrintEmpty() } func (d *DebugPrinter) PrintAssignment(lhs, op, rhs string, ltuple, rtuple bool) { fmt.Println("/* PrintAssignment", lhs, op, rhs, ltuple, rtuple, "*/") d.P.PrintAssignment(lhs, op, rhs, ltuple, rtuple) } func (d *DebugPrinter) PrintSend(ch, value string) { fmt.Println("/* PrintSend", ch, value, "*/") d.P.PrintSend(ch, value) } func (d *DebugPrinter) FormatIdent(id, itype string) string { fmt.Println("/* FormatIdent", id, itype, "*/") return d.P.FormatIdent(id, itype) } func (d *DebugPrinter) FormatLiteral(lit string) string { fmt.Println("/* FormatLiteral", lit, "*/") return d.P.FormatLiteral(lit) } func (d *DebugPrinter) FormatCompositeLit(typedef, elt string) string { fmt.Println("/* FormatCompositeLit", typedef, elt, "*/") return d.P.FormatCompositeLit(typedef, elt) } func (d *DebugPrinter) FormatEllipsis(expr string) string { fmt.Println("/* FormatEllipsis", expr, "*/") return d.P.FormatEllipsis(expr) } func (d *DebugPrinter) FormatStar(expr string) string { fmt.Println("/* FormatStar", expr, "*/") return d.P.FormatStar(expr) } func (d *DebugPrinter) FormatParen(expr string) string { fmt.Println("/* FormatParen", expr, "*/") return d.P.FormatParen(expr) } func (d *DebugPrinter) FormatUnary(op, operand string) string { fmt.Println("/* FormatUnary", op, operand, "*/") return d.P.FormatUnary(op, operand) } func (d *DebugPrinter) FormatBinary(lhs, op, rhs string) string { fmt.Println("/* FormatBinary", lhs, op, rhs, "*/") return d.P.FormatBinary(lhs, op, rhs) } func (d *DebugPrinter) FormatPair(v Pair, t FieldType) string { fmt.Println("/* FormatPair", v, t, "*/") return d.P.FormatPair(v, t) } func (d *DebugPrinter) FormatArray(len, elt string) string { fmt.Println("/* FormatArray", len, elt, "*/") return d.P.FormatArray(len, elt) } func (d *DebugPrinter) FormatArrayIndex(array, index, ctype string) string { fmt.Println("/* FormatArrayIndex", array, index, ctype, "*/") return d.P.FormatArrayIndex(array, index, ctype) } func (d *DebugPrinter) FormatMapIndex(array, index, ctype string, check bool) string { fmt.Println("/* FormatMapIndex", array, index, ctype, check, "*/") return d.P.FormatMapIndex(array, index, ctype, check) } func (d *DebugPrinter) FormatSlice(slice, low, high, max string) string { fmt.Println("/* FormatSlice", low, high, max, "*/") return d.P.FormatSlice(slice, low, high, max) } func (d *DebugPrinter) FormatMap(key, elt string) string { fmt.Println("/* FormatMap", key, elt, "*/") return d.P.FormatMap(key, elt) } func (d *DebugPrinter) FormatKeyValue(key, value string, isMap bool) string { fmt.Println("/* FormatKeyValue", key, value, isMap, "*/") return d.P.FormatKeyValue(key, value, isMap) } func (d *DebugPrinter) FormatStruct(name, fields string) string { fmt.Println("/* FormatStruct", fields, "*/") return d.P.FormatStruct(name, fields) } func (d *DebugPrinter) FormatInterface(name, methods string) string { fmt.Println("/* FormatInterface", methods, "*/") return d.P.FormatInterface(name, methods) } func (d *DebugPrinter) FormatChan(chdir, mtype string) string { fmt.Println("/* FormatChan", chdir, mtype, "*/") return d.P.FormatChan(chdir, mtype) } func (d *DebugPrinter) FormatCall(fun, args string, isFuncLit bool) string { fmt.Println("/* FormatCall", fun, args, isFuncLit, "*/") return d.P.FormatCall(fun, args, isFuncLit) } func (d *DebugPrinter) FormatFuncType(params, results string, withFunc bool) string { fmt.Println("/* FormatFuncType", params, results, withFunc, "*/") return d.P.FormatFuncType(params, results, withFunc) } func (d *DebugPrinter) FormatFuncLit(ftype, body string) string { fmt.Println("/* FormatFuncLit", ftype, body, "*/") return d.P.FormatFuncLit(ftype, body) } func (d *DebugPrinter) FormatSelector(pname, sel string, isObject bool) string { fmt.Println("/* FormatSelector", pname, sel, isObject, "*/") return d.P.FormatSelector(pname, sel, isObject) } func (d *DebugPrinter) FormatTypeAssert(orig, assert string) string { fmt.Println("/* FormatTypeAssert", orig, assert, "*/") return d.P.FormatTypeAssert(orig, assert) }
package robot import ( "github.com/gin-gonic/gin" "github.com/grearter/rpa-agent/util" "github.com/sirupsen/logrus" "net/http" ) // Delete 删除/停止机器人 func Delete(c *gin.Context) { robotID := c.Param("robotId") logrus.Infof("stop robot '%s' success", robotID) c.JSON(http.StatusOK, util.NewRespWithData(nil)) return }
package walletrpcclient import ( "bytes" "context" "fmt" "io" "sort" "github.com/decred/dcrd/chaincfg/chainhash" "github.com/decred/dcrd/dcrutil" "github.com/decred/dcrd/wire" pb "github.com/decred/dcrwallet/rpc/walletrpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "github.com/raedahgroup/dcrcli/walletrpcclient/walletcore" ) type Client struct { walletServiceClient pb.WalletServiceClient } func New(address, cert string, noTLS bool) (*Client, error) { c := &Client{} conn, err := c.connect(address, cert, noTLS) if err != nil { return nil, err } // register clients c.walletServiceClient = pb.NewWalletServiceClient(conn) return c, nil } func (c *Client) connect(address, cert string, noTLS bool) (*grpc.ClientConn, error) { var conn *grpc.ClientConn var err error if noTLS { conn, err = grpc.Dial(address, grpc.WithInsecure()) if err != nil { return nil, err } } else { creds, err := credentials.NewClientTLSFromFile(cert, "") if err != nil { return nil, err } opts := []grpc.DialOption{ grpc.WithTransportCredentials( creds, ), } conn, err = grpc.Dial(address, opts...) if err != nil { return nil, err } } return conn, nil } func (c *Client) SendFromAccount(amountInDCR float64, sourceAccount uint32, destinationAddress, passphrase string) (*SendResult, error) { // convert amount from float64 DCR to int64 Atom as required by dcrwallet ConstructTransaction implementation amountInAtom, err := dcrutil.NewAmount(amountInDCR) if err != nil { return nil, err } // type of amountInAtom is `dcrutil.Amount` which is an int64 alias amount := int64(amountInAtom) // construct transaction pkScript, err := walletcore.GetPKScript(destinationAddress) if err != nil { return nil, err } constructRequest := &pb.ConstructTransactionRequest{ SourceAccount: sourceAccount, NonChangeOutputs: []*pb.ConstructTransactionRequest_Output{{ Destination: &pb.ConstructTransactionRequest_OutputDestination{ Script: pkScript, ScriptVersion: 0, }, Amount: amount, }}, } constructResponse, err := c.walletServiceClient.ConstructTransaction(context.Background(), constructRequest) if err != nil { return nil, fmt.Errorf("error constructing transaction: %s", err.Error()) } return c.signAndPublishTransaction(constructResponse.UnsignedTransaction, passphrase) } func (c *Client) SendFromUTXOs(utxoKeys []string, amountInDCR float64, sourceAccount uint32, destinationAddress, passphrase string) (*SendResult, error) { // convert amount to atoms amountInAtom, err := dcrutil.NewAmount(amountInDCR) amount := int64(amountInAtom) // fetch all utxos to extract details for the utxos selected by user req := &pb.UnspentOutputsRequest{ Account: sourceAccount, TargetAmount: 0, RequiredConfirmations: 0, IncludeImmatureCoinbases: true, } stream, err := c.walletServiceClient.UnspentOutputs(context.Background(), req) if err != nil { return nil, err } // loop through utxo stream to find user selected utxos inputs := make([]*wire.TxIn, 0, len(utxoKeys)) for { item, err := stream.Recv() if err == io.EOF { break } if err != nil { return nil, err } transactionHash, err := chainhash.NewHash(item.TransactionHash) if err != nil { return nil, fmt.Errorf("invalid transaction hash: %s", err.Error()) } outputKey := fmt.Sprintf("%s:%v", transactionHash.String(), item.OutputIndex) useUtxo := false for _, key := range utxoKeys { if outputKey == key { useUtxo = true } } if !useUtxo { continue } outpoint := wire.NewOutPoint(transactionHash, item.OutputIndex, int8(item.Tree)) input := wire.NewTxIn(outpoint, item.Amount, nil) inputs = append(inputs, input) if len(inputs) == len(utxoKeys) { break } } // generate address from sourceAccount to receive change receiveResult, err := c.Receive(sourceAccount) if err != nil { return nil, err } changeAddress := receiveResult.Address unsignedTx, err := walletcore.NewUnsignedTx(inputs, amount, destinationAddress, changeAddress) if err != nil { return nil, err } // serialize unsigned tx var txBuf bytes.Buffer txBuf.Grow(unsignedTx.SerializeSize()) err = unsignedTx.Serialize(&txBuf) if err != nil { return nil, fmt.Errorf("error serializing transaction: %s", err.Error()) } return c.signAndPublishTransaction(txBuf.Bytes(), passphrase) } func (c *Client) signAndPublishTransaction(serializedTx []byte, passphrase string) (*SendResult, error) { ctx := context.Background() // sign transaction signRequest := &pb.SignTransactionRequest{ Passphrase: []byte(passphrase), SerializedTransaction: serializedTx, } signResponse, err := c.walletServiceClient.SignTransaction(ctx, signRequest) if err != nil { return nil, fmt.Errorf("error signing transaction: %s", err.Error()) } // publish transaction publishRequest := &pb.PublishTransactionRequest{ SignedTransaction: signResponse.Transaction, } publishResponse, err := c.walletServiceClient.PublishTransaction(ctx, publishRequest) if err != nil { return nil, fmt.Errorf("error publishing transaction: %s", err.Error()) } transactionHash, _ := chainhash.NewHash(publishResponse.TransactionHash) response := &SendResult{ TransactionHash: transactionHash.String(), } return response, nil } func (c *Client) Balance() ([]*AccountBalanceResult, error) { ctx := context.Background() accounts, err := c.walletServiceClient.Accounts(ctx, &pb.AccountsRequest{}) if err != nil { return nil, fmt.Errorf("error fetching accounts: %s", err.Error()) } balanceResult := make([]*AccountBalanceResult, 0, len(accounts.Accounts)) for _, v := range accounts.Accounts { accountBalance, err := c.SingleAccountBalance(v.AccountNumber, ctx) if err != nil { return nil, err } if v.AccountName == "imported" && accountBalance.Total == 0 { continue } accountBalance.AccountName = v.AccountName balanceResult = append(balanceResult, accountBalance) } return balanceResult, nil } func (c *Client) SingleAccountBalance(accountNumber uint32, ctx context.Context) (*AccountBalanceResult, error) { if ctx == nil { ctx = context.Background() } req := &pb.BalanceRequest{ AccountNumber: accountNumber, RequiredConfirmations: 0, } res, err := c.walletServiceClient.Balance(ctx, req) if err != nil { return nil, fmt.Errorf("error fetching balance for account: %d :%s", accountNumber, err.Error()) } return &AccountBalanceResult{ AccountNumber: accountNumber, Total: dcrutil.Amount(res.Total), Spendable: dcrutil.Amount(res.Spendable), LockedByTickets: dcrutil.Amount(res.LockedByTickets), VotingAuthority: dcrutil.Amount(res.VotingAuthority), Unconfirmed: dcrutil.Amount(res.Unconfirmed), }, nil } func (c *Client) Receive(accountNumber uint32) (*ReceiveResult, error) { ctx := context.Background() req := &pb.NextAddressRequest{ Account: accountNumber, GapPolicy: pb.NextAddressRequest_GAP_POLICY_WRAP, Kind: pb.NextAddressRequest_BIP0044_EXTERNAL, } r, err := c.walletServiceClient.NextAddress(ctx, req) if err != nil { return nil, fmt.Errorf("error generating receive address: %s", err.Error()) } res := &ReceiveResult{ Address: r.Address, } return res, nil } func (c *Client) IsAddressValid(address string) (bool, error) { r, err := c.ValidateAddress(address) if err != nil { return false, err } return r.IsValid, nil } func (c *Client) ValidateAddress(address string) (*pb.ValidateAddressResponse, error) { req := &pb.ValidateAddressRequest{ Address: address, } return c.walletServiceClient.ValidateAddress(context.Background(), req) } func (c *Client) NextAccount(accountName string, passphrase string) (uint32, error) { req := &pb.NextAccountRequest{ AccountName: accountName, Passphrase: []byte(passphrase), } r, err := c.walletServiceClient.NextAccount(context.Background(), req) if err != nil { return 0, err } return r.AccountNumber, nil } func (c *Client) UnspentOutputs(account uint32, targetAmount int64) ([]*UnspentOutputsResult, error) { req := &pb.UnspentOutputsRequest{ Account: account, TargetAmount: targetAmount, RequiredConfirmations: 0, IncludeImmatureCoinbases: true, } stream, err := c.walletServiceClient.UnspentOutputs(context.Background(), req) if err != nil { return nil, err } outputs := []*UnspentOutputsResult{} for { item, err := stream.Recv() if err == io.EOF { break } if err != nil { return nil, err } transactionHash, _ := chainhash.NewHash(item.TransactionHash) outputItem := &UnspentOutputsResult{ OutputKey: fmt.Sprintf("%s:%v", transactionHash.String(), item.OutputIndex), TransactionHash: transactionHash.String(), OutputIndex: item.OutputIndex, Amount: item.Amount, AmountString: dcrutil.Amount(item.Amount).String(), PkScript: item.PkScript, AmountSum: dcrutil.Amount(item.AmountSum).String(), ReceiveTime: item.ReceiveTime, Tree: item.Tree, FromCoinbase: item.FromCoinbase, } outputs = append(outputs, outputItem) } return outputs, nil } func (c *Client) GetTransactions() ([]*Transaction, error) { req := &pb.GetTransactionsRequest{} stream, err := c.walletServiceClient.GetTransactions(context.Background(), req) if err != nil { return nil, err } var transactions []*Transaction for { in, err := stream.Recv() if err == io.EOF { break } if err != nil { return nil, err } var transactionDetails []*pb.TransactionDetails if in.MinedTransactions != nil { transactionDetails = append(transactionDetails, in.MinedTransactions.Transactions...) } if in.UnminedTransactions != nil { transactionDetails = append(transactionDetails, in.UnminedTransactions...) } txs, err := c.processTransactions(transactionDetails) if err != nil { return nil, err } transactions = append(transactions, txs...) } // sort transactions by date (list newer first) sort.SliceStable(transactions, func(i1, i2 int) bool { return transactions[i1].Timestamp > transactions[i2].Timestamp }) return transactions, nil }
package goroutines import ( "fmt" "time" ) func run() { for i := 0; i < 5; i++ { fmt.Println("i is ", i) } } func greeting() { fmt.Println("Hello") } // Init init func Init() { go run() // pushed to the stack go greeting() // pushed to the stack // greeting get popped first // run get's popped last time.Sleep(time.Second) }
package fakes import ( "errors" "github.com/cloudfoundry-incubator/notifications/web/services" ) type FakePreferencesFinder struct { ReturnValue services.PreferencesBuilder FindErrors bool UserGUID string } func NewFakePreferencesFinder(returnValue services.PreferencesBuilder) *FakePreferencesFinder { return &FakePreferencesFinder{ ReturnValue: returnValue, } } func (fake *FakePreferencesFinder) Find(userGUID string) (services.PreferencesBuilder, error) { fake.UserGUID = userGUID if fake.FindErrors { return fake.ReturnValue, errors.New("Meltdown") } return fake.ReturnValue, nil }
package main func dfs(node int, nodes map[int][]int, fn func (int)) { dfs_recur(node, map[int]bool{}, fn) } func dfs_recur(node int, v map[int]bool, fn func (int)) { v[node] = true fn(node) for _, n := range nodes[node] { if _, ok := v[n]; !ok { dfs_recur(n, v, fn) } } }
package main import ( "math" ) func main() { const n = 500000000 const d = 3e20 / n println(d) println(int64(d)) println(math.Sin(180)) }
package server import ( "time" ) type TimeInfo struct { FrameCount int RunTime time.Duration //总运行时间 StartTime time.Time //开始运行时间 DeltaTime time.Duration //update间隔照章 LastUpdateTime time.Time //最后一次更新时间 LastBeatTime time.Time //最后一次心跳时间 LastScanTime time.Time //最后一次扫描时间 LastFreshTime time.Time //最后一次刷新时间 }
// Copyright (c) 2017-2018 Zededa, Inc. // SPDX-License-Identifier: Apache-2.0 package zedagent // cipher specific parser/utility routines import ( "bytes" "crypto/sha256" "errors" "fmt" "io/ioutil" zconfig "github.com/lf-edge/eve/api/go/config" "github.com/lf-edge/eve/pkg/pillar/types" log "github.com/sirupsen/logrus" ) // XXX:TBD controller certificate change should trigger reprocessing // of cipherContexts/cipherBlocks func handleControllerCertModify(ctxArg interface{}, key string, configArg interface{}) { log.Infof("handleControllerCertModify(%s)\n", key) config := configArg.(types.ControllerCertificate) log.Infof("handleControllerCertModify(%s) done %v\n", key, config) } func handleControllerCertDelete(ctxArg interface{}, key string, configArg interface{}) { log.Infof("handleControllerCertDelete(%s)\n", key) } // XXX:TBD cipherContext change should trigger reprocessing // of cipherBlocks func handleCipherContextModify(ctxArg interface{}, key string, configArg interface{}) { log.Infof("handleCipherContextModify(%s)\n", key) config := configArg.(types.CipherContext) log.Infof("handleCipherContextModify(%s) done %v\n", key, config) } func handleCipherContextDelete(ctxArg interface{}, key string, configArg interface{}) { log.Infof("handleCipherContextDelete(%s)\n", key) } // parseCipherBlock : will collate all the relevant information // ciphercontext will be used to get the certs and encryption schemes func parseCipherBlock(ctx *getconfigContext, cfgCipherBlock *zconfig.CipherBlock) types.CipherBlock { cipherBlock := types.CipherBlock{} if cfgCipherBlock == nil { return cipherBlock } cipherBlock.ID = cfgCipherBlock.GetCipherContextId() cipherBlock.InitialValue = cfgCipherBlock.GetInitialValue() cipherBlock.CipherData = cfgCipherBlock.GetCipherData() cipherBlock.ClearTextHash = cfgCipherBlock.GetClearTextSha256() // should contain valid cipher data if len(cipherBlock.CipherData) == 0 || len(cipherBlock.ID) == 0 { log.Infof("%s, cipher block does not contain valid data\n", cipherBlock.ID) return cipherBlock } cipherBlock.IsCipher = true // get the cipher context cipherContext := getCipherContextConfig(ctx, cipherBlock.ID) if cipherContext == nil { return cipherBlock } // copy the relevant attributes, from cipher context to cipher block cipherBlock.KeyExchangeScheme = cipherContext.KeyExchangeScheme cipherBlock.EncryptionScheme = cipherContext.EncryptionScheme // get the relevant controller cert and device cert ccert, dcert, err := getCipherContextCerts(ctx, cipherContext) if err != nil { return cipherBlock } cipherBlock.ControllerCert = ccert cipherBlock.DeviceCert = dcert // finally, mark the cipher block as valid cipherBlock.IsValidCipher = true return cipherBlock } // cipher context config parsing and publish var cipherContextConfigHash []byte func parseCipherContextConfig(getconfigCtx *getconfigContext, config *zconfig.EdgeDevConfig) { cfgCipherContextList := config.GetCipherContexts() h := sha256.New() for _, cfgCipherContext := range cfgCipherContextList { computeConfigElementSha(h, cfgCipherContext) } newConfigHash := h.Sum(nil) same := bytes.Equal(newConfigHash, cipherContextConfigHash) if same { return } log.Infof("parseCipherContextConfig: Applying updated config\n"+ "Last Sha: % x\n"+ "New Sha: % x\n"+ "cfgCipherContextList: %v\n", cipherContextConfigHash, newConfigHash, cfgCipherContextList) cipherContextConfigHash = newConfigHash // First look for deleted ones items := getconfigCtx.pubCipherContextConfig.GetAll() for idStr := range items { found := false for _, cfgCipherContext := range cfgCipherContextList { if cfgCipherContext.GetContextId() == idStr { found = true break } } // cipherContext not found, delete if !found { log.Infof("parseCipherContextConfig: deleting %s\n", idStr) unpublishCipherContextConfig(getconfigCtx, idStr) } } for _, cfgCipherContext := range cfgCipherContextList { if cfgCipherContext.GetContextId() == "" { log.Debugf("parseCipherContextConfig ignoring empty\n") continue } cipherContext := new(types.CipherContext) cipherContext.ID = cfgCipherContext.GetContextId() cipherContext.HashScheme = cfgCipherContext.GetHashScheme() cipherContext.KeyExchangeScheme = cfgCipherContext.GetKeyExchangeScheme() cipherContext.EncryptionScheme = cfgCipherContext.GetEncryptionScheme() cipherContext.DeviceCertHash = cfgCipherContext.GetDeviceCertHash() cipherContext.ControllerCertHash = cfgCipherContext.GetControllerCertHash() log.Debugf("parseCipherContextConfig publishing %v\n", cipherContext) publishCipherContextConfig(getconfigCtx, cipherContext) } } func publishCipherContextConfig(getconfigCtx *getconfigContext, config *types.CipherContext) { key := config.Key() log.Debugf("publishCipherContext %s\n", key) pub := getconfigCtx.pubCipherContextConfig pub.Publish(key, *config) } func unpublishCipherContextConfig(getconfigCtx *getconfigContext, key string) { log.Debugf("unpublishCipherContextConfig(%s)\n", key) pub := getconfigCtx.pubCipherContextConfig c, _ := pub.Get(key) if c == nil { log.Errorf("unpublishCipherContext(%s) not found\n", key) return } pub.Unpublish(key) } func getCipherContextConfig(getconfigCtx *getconfigContext, key string) *types.CipherContext { log.Debugf("getCipherContextConfig(%s)\n", key) pub := getconfigCtx.pubCipherContextConfig c, _ := pub.Get(key) if c == nil { log.Errorf("CipherContextConfig(%s) not found\n", key) return nil } config := c.(types.CipherContext) return &config } func getCipherContextCerts(ctx *getconfigContext, cipherContext *types.CipherContext) ([]byte, []byte, error) { log.Debugf("getCipherContextCerts(%s)\n", cipherContext.Key()) // get controller cert ccert := getCipherContextControllerCert(ctx, cipherContext.ControllerCertHash, cipherContext.HashScheme) if len(ccert) == 0 { errStr := fmt.Sprintf("%s, Failed to collect controller cert information", cipherContext.ID) log.Errorln(errStr) return ccert, []byte{}, errors.New(errStr) } // try to get device cert dcert := getCipherContextDeviceCert(ctx, cipherContext.DeviceCertHash, cipherContext.HashScheme) if len(dcert) == 0 { errStr := fmt.Sprintf("%s, Failed to collect device cert information", cipherContext.ID) log.Errorln(errStr) return ccert, dcert, errors.New(errStr) } return ccert, dcert, nil } func getCipherContextControllerCert(ctx *getconfigContext, suppliedHash []byte, hashScheme zconfig.CipherHashAlgorithm) []byte { items := ctx.pubControllerCertConfig.GetAll() for _, item := range items { certConfig := item.(types.ControllerCertificate) match := bytes.Equal(certConfig.CertHash, suppliedHash) if match { return certConfig.Cert } } return []byte{} } func getCipherContextDeviceCert(ctx *getconfigContext, suppliedHash []byte, hashScheme zconfig.CipherHashAlgorithm) []byte { // TBD:XXX as of now, only one certBytes, err := ioutil.ReadFile(types.DeviceCertName) if err == nil { match := computeAndMatchHash(certBytes, suppliedHash, hashScheme) if match { return certBytes } } return []byte{} } func computeAndMatchHash(cert []byte, suppliedHash []byte, hashScheme zconfig.CipherHashAlgorithm) bool { switch hashScheme { case zconfig.CipherHashAlgorithm_HASH_NONE: return false case zconfig.CipherHashAlgorithm_HASH_SHA256_16bytes: h := sha256.New() h.Write(cert) computedHash := h.Sum(nil) return bytes.Equal(suppliedHash, computedHash[:16]) } return false } // for controller certificates, publish utilities func publishControllerCertConfig(getconfigCtx *getconfigContext, config *types.ControllerCertificate) { key := config.Key() log.Debugf("publishControllerCertificate %s\n", key) pub := getconfigCtx.pubControllerCertConfig pub.Publish(key, *config) } func unpublishControllerCertConfig(getconfigCtx *getconfigContext, key string) { log.Debugf("unpublishControllerCertConfig %s\n", key) pub := getconfigCtx.pubControllerCertConfig c, _ := pub.Get(key) if c == nil { log.Errorf("unpublishCertObjConfig(%s) not found\n", key) return } pub.Unpublish(key) }
func flipAndInvertImage(A [][]int) [][]int { var n = len(A) if n == 0 { return A } var m = len(A[0]) for i := 0; i < n; i++ { for j := 0; j << 1 < m; j++ { var t = A[i][j] A[i][j] = 1 - A[i][m - j - 1] A[i][m - j - 1] = 1 - t } } return A }
package timewheel type Chunk struct { id int32 used int32 prev int32 next int32 data TimeWheelTaskData } type TimeWheelAllocator struct { capacity int32 size int32 freeHead int32 Chunks []Chunk stat TimeWheelAllocatorStat } type TimeWheelAllocatorStat struct { Alloc uint64 AllocOk uint64 Free uint64 FreeOk uint64 FreeAll uint64 } func NewTimeWheelAllocator(capacity int32) *TimeWheelAllocator { allocator := &TimeWheelAllocator{capacity: capacity} allocator.Chunks = make([]Chunk, capacity) allocator.init() return allocator } func (this *TimeWheelAllocator) init() { this.Chunks[0].id = 0 this.Chunks[0].used = 0 this.Chunks[0].prev = this.capacity - 1 this.Chunks[0].next = 1 for i := int32(1); i < this.capacity; i++ { this.Chunks[i].id = i this.Chunks[i].prev = i - 1 this.Chunks[i].next = i + 1 } this.Chunks[this.capacity-1].next = 0 } func (this *TimeWheelAllocator) Alloc() int32 { chunk := this.AllocEx() if chunk == nil { return -1 } return chunk.id } func (this *TimeWheelAllocator) AllocEx() *Chunk { this.stat.Alloc++ if this.size >= this.capacity { return nil } freeNum := this.capacity - this.size chunk := &this.Chunks[this.freeHead] if freeNum == 1 { this.freeHead = -1 } else { this.Chunks[chunk.prev].next = chunk.next this.Chunks[chunk.next].prev = chunk.prev this.freeHead = chunk.next } chunk.used = 1 this.size++ this.stat.AllocOk++ return chunk } func (this *TimeWheelAllocator) Free(id int32) { this.stat.Free++ if id < 0 || int(id) > len(this.Chunks) { return } chunk := &this.Chunks[id] if chunk.used == 0 { return } // push_back to free list if this.freeHead == -1 { chunk.next = id chunk.prev = id this.freeHead = id } else { head := &this.Chunks[this.freeHead] tail := &this.Chunks[head.prev] chunk.next = this.freeHead chunk.prev = tail.id tail.next = id head.prev = id } chunk.used = 0 this.size-- this.stat.FreeOk++ } func (this *TimeWheelAllocator) FreeAll() { this.stat.FreeAll++ this.init() }
// Package parser is used to parse input-programs written in monkey // and convert them to an abstract-syntax tree. package parser import ( "fmt" "strconv" "strings" "github.com/kasworld/nonkey/enum/precedence" "github.com/kasworld/nonkey/enum/tokentype" "github.com/kasworld/nonkey/interpreter/ast" "github.com/kasworld/nonkey/interpreter/asti" "github.com/kasworld/nonkey/interpreter/lexer" "github.com/kasworld/nonkey/interpreter/token" ) // prefix Parse function // infix parse function // postfix parse function type ( prefixParseFn func() asti.ExpressionI infixParseFn func(asti.ExpressionI) asti.ExpressionI postfixParseFn func() asti.ExpressionI ) // Parser object type Parser struct { // l is our lexer l *lexer.Lexer // prevToken holds the previous token from our lexer. // (used for "++" + "--") prevToken token.Token // curToken holds the current token from our lexer. curToken token.Token // peekToken holds the next token which will come from the lexer. peekToken token.Token // errors holds parsing-errors. errors []Error // prefixParseFns holds a map of parsing methods for // prefix-based syntax. prefixParseFns [tokentype.TokenType_Count]prefixParseFn // infixParseFns holds a map of parsing methods for // infix-based syntax. infixParseFns [tokentype.TokenType_Count]infixParseFn // postfixParseFns holds a map of parsing methods for // postfix-based syntax. postfixParseFns [tokentype.TokenType_Count]postfixParseFn // are we inside a ternary expression? // // Nested ternary expressions are illegal :) tern bool } // New returns our new parser-object. func New(l *lexer.Lexer) *Parser { // Create the parser, and prime the pump p := &Parser{l: l, errors: []Error{}} p.nextToken() p.nextToken() // Register prefix-functions p.prefixParseFns = [tokentype.TokenType_Count]prefixParseFn{ tokentype.BACKTICK: p.parseBacktickLiteral, tokentype.BANG: p.parsePrefixExpression, tokentype.DEFINE_FUNCTION: p.parseFunctionDefinition, tokentype.EOF: p.parsingBroken, tokentype.FALSE: p.parseBoolean, tokentype.FLOAT: p.parseFloatLiteral, tokentype.FOR: p.parseForLoopExpression, tokentype.FOREACH: p.parseForEach, tokentype.FUNCTION: p.parseFunctionLiteral, tokentype.IDENT: p.parseIdentifier, tokentype.IF: p.parseIfExpression, tokentype.ILLEGAL: p.parsingBroken, tokentype.INT: p.parseIntegerLiteral, tokentype.LBRACE: p.parseHashLiteral, tokentype.LBRACKET: p.parseArrayLiteral, tokentype.LPAREN: p.parseGroupedExpression, tokentype.MINUS: p.parsePrefixExpression, tokentype.REGEXP: p.parseRegexpLiteral, tokentype.STRING: p.parseStringLiteral, tokentype.SWITCH: p.parseSwitchStatement, tokentype.TRUE: p.parseBoolean, } // Register infix functions p.infixParseFns = [tokentype.TokenType_Count]infixParseFn{ tokentype.AND: p.parseInfixExpression, tokentype.ASSIGN: p.parseAssignExpression, tokentype.ASTERISK: p.parseInfixExpression, tokentype.ASTERISK_EQUALS: p.parseAssignExpression, tokentype.CONTAINS: p.parseInfixExpression, tokentype.DOTDOT: p.parseInfixExpression, tokentype.EQ: p.parseInfixExpression, tokentype.GT: p.parseInfixExpression, tokentype.GT_EQUALS: p.parseInfixExpression, tokentype.LBRACKET: p.parseIndexExpression, tokentype.LPAREN: p.parseCallExpression, tokentype.LT: p.parseInfixExpression, tokentype.LT_EQUALS: p.parseInfixExpression, tokentype.MINUS: p.parseInfixExpression, tokentype.MINUS_EQUALS: p.parseAssignExpression, tokentype.MOD: p.parseInfixExpression, tokentype.NOT_CONTAINS: p.parseInfixExpression, tokentype.NOT_EQ: p.parseInfixExpression, tokentype.OR: p.parseInfixExpression, tokentype.PERIOD: p.parseMethodCallExpression, tokentype.PLUS: p.parseInfixExpression, tokentype.PLUS_EQUALS: p.parseAssignExpression, tokentype.POW: p.parseInfixExpression, tokentype.QUESTION: p.parseTernaryExpression, tokentype.SLASH: p.parseInfixExpression, tokentype.SLASH_EQUALS: p.parseAssignExpression, } // Register postfix functions. p.postfixParseFns = [tokentype.TokenType_Count]postfixParseFn{ tokentype.MINUS_MINUS: p.parsePostfixExpression, tokentype.PLUS_PLUS: p.parsePostfixExpression, } // All done return p } // nextToken moves to our next token from the lexer. func (p *Parser) nextToken() { p.prevToken = p.curToken p.curToken = p.peekToken p.peekToken = p.l.NextToken() } // ParseProgram used to parse the whole program func (p *Parser) ParseProgram() *ast.Program { program := &ast.Program{} program.Statements = []asti.StatementI{} for p.curToken.Type != tokentype.EOF { stmt := p.parseStatement() if stmt != nil { program.Statements = append(program.Statements, stmt) } p.nextToken() } return program } // parseStatement parses a single statement. func (p *Parser) parseStatement() asti.StatementI { switch p.curToken.Type { case tokentype.LET: return p.parseLetStatement() case tokentype.CONST: return p.parseConstStatement() case tokentype.RETURN: return p.parseReturnStatement() default: return p.parseExpressionStatement() } } // parseLetStatement parses a let-statement. func (p *Parser) parseLetStatement() *ast.LetStatement { stmt := &ast.LetStatement{Token: p.curToken} if !p.expectPeek(tokentype.IDENT) { return nil } stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal} if !p.expectPeek(tokentype.ASSIGN) { return nil } p.nextToken() stmt.Value = p.parseExpression(precedence.LOWEST) for !p.curTokenIs(tokentype.SEMICOLON) { if p.curTokenIs(tokentype.EOF) { p.AddError("unterminated let statement") return nil } p.nextToken() } return stmt } // parseConstStatement parses a constant declaration. func (p *Parser) parseConstStatement() *ast.ConstStatement { stmt := &ast.ConstStatement{Token: p.curToken} if !p.expectPeek(tokentype.IDENT) { return nil } stmt.Name = &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal} if !p.expectPeek(tokentype.ASSIGN) { return nil } p.nextToken() stmt.Value = p.parseExpression(precedence.LOWEST) for !p.curTokenIs(tokentype.SEMICOLON) { if p.curTokenIs(tokentype.EOF) { p.AddError("unterminated const statement") return nil } p.nextToken() } return stmt } // parseReturnStatement parses a return-statement. func (p *Parser) parseReturnStatement() *ast.ReturnStatement { stmt := &ast.ReturnStatement{Token: p.curToken} p.nextToken() stmt.ReturnValue = p.parseExpression(precedence.LOWEST) for !p.curTokenIs(tokentype.SEMICOLON) { if p.curTokenIs(tokentype.EOF) { p.AddError("unterminated return statement") return nil } p.nextToken() } return stmt } // no prefix parse function error func (p *Parser) noPrefixParseFnError(t tokentype.TokenType) { p.AddError("no prefix parse function for %s", t.Literal()) } // parse Expression Statement func (p *Parser) parseExpressionStatement() *ast.ExpressionStatement { stmt := &ast.ExpressionStatement{Token: p.curToken} stmt.Expression = p.parseExpression(precedence.LOWEST) for p.peekTokenIs(tokentype.SEMICOLON) { p.nextToken() } return stmt } func (p *Parser) parseExpression(precedence1 precedence.Precedence) asti.ExpressionI { postfix := p.postfixParseFns[p.curToken.Type] if postfix != nil { return (postfix()) } prefix := p.prefixParseFns[p.curToken.Type] if prefix == nil { p.noPrefixParseFnError(p.curToken.Type) return nil } leftExp := prefix() for !p.peekTokenIs(tokentype.SEMICOLON) && precedence1 < p.peekPrecedence() { infix := p.infixParseFns[p.peekToken.Type] if infix == nil { return leftExp } p.nextToken() leftExp = infix(leftExp) } return leftExp } // parsingBroken is hit if we see an EOF in our input-stream // this means we're screwed func (p *Parser) parsingBroken() asti.ExpressionI { return nil } // parseIdentifier parses an identifier. func (p *Parser) parseIdentifier() asti.ExpressionI { return &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal} } // parseIntegerLiteral parses an integer literal. func (p *Parser) parseIntegerLiteral() asti.ExpressionI { lit := &ast.IntegerLiteral{Token: p.curToken} var value int64 var err error if strings.HasPrefix(p.curToken.Literal, "0b") { value, err = strconv.ParseInt(p.curToken.Literal[2:], 2, 64) } else if strings.HasPrefix(p.curToken.Literal, "0x") { value, err = strconv.ParseInt(p.curToken.Literal[2:], 16, 64) } else { value, err = strconv.ParseInt(p.curToken.Literal, 10, 64) } if err != nil { p.AddError("could not parse %q as integer", p.curToken.Literal) return nil } lit.Value = value return lit } // parseFloatLiteral parses a float-literal func (p *Parser) parseFloatLiteral() asti.ExpressionI { flo := &ast.FloatLiteral{Token: p.curToken} value, err := strconv.ParseFloat(p.curToken.Literal, 64) if err != nil { p.AddError("could not parse %q as float", p.curToken.Literal) return nil } flo.Value = value return flo } // parseSwitchStatement handles a switch statement func (p *Parser) parseSwitchStatement() asti.ExpressionI { // switch expression := &ast.SwitchExpression{Token: p.curToken} // look for (xx) if !p.expectPeek(tokentype.LPAREN) { return nil } p.nextToken() expression.Value = p.parseExpression(precedence.LOWEST) if expression.Value == nil { return nil } if !p.expectPeek(tokentype.RPAREN) { return nil } // Now we have a block containing blocks. if !p.expectPeek(tokentype.LBRACE) { return nil } p.nextToken() // Process the block which we think will contain // various case-statements for !p.curTokenIs(tokentype.RBRACE) { if p.curTokenIs(tokentype.EOF) { p.AddError("unterminated switch statement") return nil } tmp := &ast.CaseExpression{Token: p.curToken} // Default will be handled specially if p.curTokenIs(tokentype.DEFAULT) { // We have a default-case here. tmp.Default = true } else if p.curTokenIs(tokentype.CASE) { // skip "case" p.nextToken() // Here we allow "case default" even though // most people would prefer to write "default". if p.curTokenIs(tokentype.DEFAULT) { tmp.Default = true } else { // parse the match-expression. tmp.Expr = append(tmp.Expr, p.parseExpression(precedence.LOWEST)) for p.peekTokenIs(tokentype.COMMA) { // skip the comma p.nextToken() // setup the expression. p.nextToken() tmp.Expr = append(tmp.Expr, p.parseExpression(precedence.LOWEST)) } } } if !p.expectPeek(tokentype.LBRACE) { p.AddError("expected token to be '{', got %s instead", p.curToken.Type) fmt.Printf("error\n") return nil } // parse the block tmp.Block = p.parseBlockStatement() if !p.curTokenIs(tokentype.RBRACE) { p.AddError("Syntax Error: expected token to be '}', got %s instead", p.curToken.Type) fmt.Printf("error\n") return nil } p.nextToken() // save the choice away expression.Choices = append(expression.Choices, tmp) } // ensure we're at the the closing "}" if !p.curTokenIs(tokentype.RBRACE) { return nil } // More than one default is a bug count := 0 for _, c := range expression.Choices { if c.Default { count++ } } if count > 1 { p.AddError("A switch-statement should only have one default block") return nil } return expression } // parseBoolean parses a boolean token. func (p *Parser) parseBoolean() asti.ExpressionI { return &ast.Boolean{Token: p.curToken, Value: p.curTokenIs(tokentype.TRUE)} } // parsePrefixExpression parses a prefix-based expression. func (p *Parser) parsePrefixExpression() asti.ExpressionI { expression := &ast.PrefixExpression{ Token: p.curToken, Operator: p.curToken.Type, } p.nextToken() expression.Right = p.parseExpression(precedence.PREFIX) return expression } // parsePostfixExpression parses a postfix-based expression. func (p *Parser) parsePostfixExpression() asti.ExpressionI { expression := &ast.PostfixExpression{ Token: p.prevToken, Operator: p.curToken.Type, } return expression } // parseInfixExpression parses an infix-based expression. func (p *Parser) parseInfixExpression(left asti.ExpressionI) asti.ExpressionI { expression := &ast.InfixExpression{ Token: p.curToken, Operator: p.curToken.Type, Left: left, } curPrecedence := p.curPrecedence() p.nextToken() expression.Right = p.parseExpression(curPrecedence) return expression } // parseTernaryExpression parses a ternary expression func (p *Parser) parseTernaryExpression(condition asti.ExpressionI) asti.ExpressionI { if p.tern { p.AddError("nested ternary expressions are illegal") return nil } p.tern = true defer func() { p.tern = false }() expression := &ast.TernaryExpression{ Token: p.curToken, Condition: condition, } p.nextToken() //skip the '?' curPrecedence := p.curPrecedence() expression.IfTrue = p.parseExpression(curPrecedence) if !p.expectPeek(tokentype.COLON) { //skip the ":" return nil } // Get to next token, then parse the else part p.nextToken() expression.IfFalse = p.parseExpression(curPrecedence) p.tern = false return expression } // parseGroupedExpression parses a grouped-expression. func (p *Parser) parseGroupedExpression() asti.ExpressionI { p.nextToken() exp := p.parseExpression(precedence.LOWEST) if !p.expectPeek(tokentype.RPAREN) { return nil } return exp } // parseIfCondition parses an if-expression. func (p *Parser) parseIfExpression() asti.ExpressionI { expression := &ast.IfExpression{Token: p.curToken} if !p.expectPeek(tokentype.LPAREN) { return nil } p.nextToken() expression.Condition = p.parseExpression(precedence.LOWEST) if !p.expectPeek(tokentype.RPAREN) { return nil } if !p.expectPeek(tokentype.LBRACE) { return nil } expression.Consequence = p.parseBlockStatement() if p.peekTokenIs(tokentype.ELSE) { p.nextToken() if !p.expectPeek(tokentype.LBRACE) { return nil } expression.Alternative = p.parseBlockStatement() } return expression } // parseForLoopExpression parses a for-loop. func (p *Parser) parseForLoopExpression() asti.ExpressionI { expression := &ast.ForLoopExpression{Token: p.curToken} if !p.expectPeek(tokentype.LPAREN) { return nil } p.nextToken() expression.Condition = p.parseExpression(precedence.LOWEST) if !p.expectPeek(tokentype.RPAREN) { return nil } if !p.expectPeek(tokentype.LBRACE) { return nil } expression.Consequence = p.parseBlockStatement() return expression } // parseForEach parses 'foreach x X { .. block .. }` func (p *Parser) parseForEach() asti.ExpressionI { expression := &ast.ForeachStatement{Token: p.curToken} // get the id p.nextToken() expression.Ident = p.curToken.Literal // If we find a "," we then get a second identifier too. if p.peekTokenIs(tokentype.COMMA) { // // Generally we have: // // foreach IDENT in THING { .. } // // If we have two arguments the first becomes // the index, and the second becomes the IDENT. // // skip the comma p.nextToken() if !p.peekTokenIs(tokentype.IDENT) { p.AddError(fmt.Sprintf("second argument to foreach must be ident, got %v", p.peekToken)) return nil } p.nextToken() // // Record the updated values. // expression.Index = expression.Ident expression.Ident = p.curToken.Literal } // The next token, after the ident(s), should be `in`. if !p.expectPeek(tokentype.IN) { return nil } p.nextToken() // get the thing we're going to iterate over. expression.Value = p.parseExpression(precedence.LOWEST) if expression.Value == nil { return nil } // parse the block p.nextToken() expression.Body = p.parseBlockStatement() return expression } // parseBlockStatement parsea a block. func (p *Parser) parseBlockStatement() *ast.BlockStatement { block := &ast.BlockStatement{Token: p.curToken} block.Statements = []asti.StatementI{} p.nextToken() for !p.curTokenIs(tokentype.RBRACE) { // Don't loop forever if p.curTokenIs(tokentype.EOF) { p.AddError( "unterminated block statement") return nil } stmt := p.parseStatement() if stmt != nil { block.Statements = append(block.Statements, stmt) } p.nextToken() } return block } // parseFunctionLiteral parses a function-literal. func (p *Parser) parseFunctionLiteral() asti.ExpressionI { lit := &ast.FunctionLiteral{Token: p.curToken} if !p.expectPeek(tokentype.LPAREN) { return nil } lit.Defaults, lit.Parameters = p.parseFunctionParameters() if !p.expectPeek(tokentype.LBRACE) { return nil } lit.Body = p.parseBlockStatement() return lit } // parseFunctionDefinition parses the definition of a function. func (p *Parser) parseFunctionDefinition() asti.ExpressionI { p.nextToken() lit := &ast.FunctionDefineLiteral{Token: p.curToken} if !p.expectPeek(tokentype.LPAREN) { return nil } lit.Defaults, lit.Parameters = p.parseFunctionParameters() if !p.expectPeek(tokentype.LBRACE) { return nil } lit.Body = p.parseBlockStatement() return lit } // parseFunctionParameters parses the parameters used for a function. func (p *Parser) parseFunctionParameters() (map[string]asti.ExpressionI, []*ast.Identifier) { // Any default parameters. m := make(map[string]asti.ExpressionI) // The argument-definitions. identifiers := make([]*ast.Identifier, 0) // Is the next parameter ")" ? If so we're done. No args. if p.peekTokenIs(tokentype.RPAREN) { p.nextToken() return m, identifiers } p.nextToken() // Keep going until we find a ")" for !p.curTokenIs(tokentype.RPAREN) { if p.curTokenIs(tokentype.EOF) { p.AddError("unterminated function parameters") return nil, nil } // Get the identifier. ident := &ast.Identifier{Token: p.curToken, Value: p.curToken.Literal} identifiers = append(identifiers, ident) p.nextToken() // If there is "=xx" after the name then that's // the default parameter. if p.curTokenIs(tokentype.ASSIGN) { p.nextToken() // Save the default value. m[ident.Value] = p.parseExpressionStatement().Expression p.nextToken() } // Skip any comma. if p.curTokenIs(tokentype.COMMA) { p.nextToken() } } return m, identifiers } // parseStringLiteral parses a string-literal. func (p *Parser) parseStringLiteral() asti.ExpressionI { return &ast.StringLiteral{Token: p.curToken, Value: p.curToken.Literal} } // parseRegexpLiteral parses a regular-expression. func (p *Parser) parseRegexpLiteral() asti.ExpressionI { flags := "" val := p.curToken.Literal if strings.HasPrefix(val, "(?") { val = strings.TrimPrefix(val, "(?") i := 0 for i < len(val) { if val[i] == ')' { val = val[i+1:] break } else { flags += string(val[i]) } i++ } } return &ast.RegexpLiteral{Token: p.curToken, Value: val, Flags: flags} } // parseBacktickLiteral parses a backtick-expression. func (p *Parser) parseBacktickLiteral() asti.ExpressionI { return &ast.BacktickLiteral{Token: p.curToken, Value: p.curToken.Literal} } // parseArrayLiteral parses an array literal. func (p *Parser) parseArrayLiteral() asti.ExpressionI { array := &ast.ArrayLiteral{Token: p.curToken} array.Elements = p.parseExpressionList(tokentype.RBRACKET) return array } // parsearray elements literal func (p *Parser) parseExpressionList(end tokentype.TokenType) []asti.ExpressionI { list := make([]asti.ExpressionI, 0) if p.peekTokenIs(end) { p.nextToken() return list } p.nextToken() list = append(list, p.parseExpression(precedence.LOWEST)) for p.peekTokenIs(tokentype.COMMA) { p.nextToken() p.nextToken() list = append(list, p.parseExpression(precedence.LOWEST)) } if !p.expectPeek(end) { return nil } return list } // parseInfixExpression parsea an array index expression. func (p *Parser) parseIndexExpression(left asti.ExpressionI) asti.ExpressionI { exp := &ast.IndexExpression{Token: p.curToken, Left: left} p.nextToken() exp.Index = p.parseExpression(precedence.LOWEST) if !p.expectPeek(tokentype.RBRACKET) { return nil } return exp } // parseAssignExpression parses a bare assignment, without a `let`. func (p *Parser) parseAssignExpression(name asti.ExpressionI) asti.ExpressionI { stmt := &ast.AssignStatement{Token: p.curToken} if n, ok := name.(*ast.Identifier); ok { stmt.Name = n } else { p.AddError("expected assign token to be IDENT, got %s instead", name.GetToken().Literal) } oper := p.curToken p.nextToken() stmt.Operator = oper.Type stmt.Value = p.parseExpression(precedence.LOWEST) return stmt } // parseCallExpression parses a function-call expression. func (p *Parser) parseCallExpression(function asti.ExpressionI) asti.ExpressionI { exp := &ast.CallExpression{Token: p.curToken, Function: function} exp.Arguments = p.parseExpressionList(tokentype.RPAREN) return exp } // parseHashLiteral parses a hash literal. func (p *Parser) parseHashLiteral() asti.ExpressionI { hash := &ast.HashLiteral{Token: p.curToken} hash.Pairs = make(map[asti.ExpressionI]asti.ExpressionI) for !p.peekTokenIs(tokentype.RBRACE) { p.nextToken() key := p.parseExpression(precedence.LOWEST) if !p.expectPeek(tokentype.COLON) { return nil } p.nextToken() value := p.parseExpression(precedence.LOWEST) hash.Pairs[key] = value if !p.peekTokenIs(tokentype.RBRACE) && !p.expectPeek(tokentype.COMMA) { return nil } } if !p.expectPeek(tokentype.RBRACE) { return nil } return hash } // parseMethodCallExpression parses an object-based method-call. func (p *Parser) parseMethodCallExpression(obj asti.ExpressionI) asti.ExpressionI { methodCall := &ast.ObjectCallExpression{Token: p.curToken, Object: obj} p.nextToken() name := p.parseIdentifier() p.nextToken() methodCall.Call = p.parseCallExpression(name) return methodCall } // curTokenIs tests if the current token has the given type. func (p *Parser) curTokenIs(t tokentype.TokenType) bool { return p.curToken.Type == t } // peekTokenIs tests if the next token has the given type. func (p *Parser) peekTokenIs(t tokentype.TokenType) bool { return p.peekToken.Type == t } // expectPeek validates the next token is of the given type, // and advances if so. If it is not an error is stored. func (p *Parser) expectPeek(t tokentype.TokenType) bool { if p.peekTokenIs(t) { p.nextToken() return true } p.AddError("expected next token to be %s, got %v", t.Literal(), p.curToken.Literal) return false } // peekPrecedence looks up the next token precedence. func (p *Parser) peekPrecedence() precedence.Precedence { return tokentype.Token2Precedences[p.peekToken.Type] } // curPrecedence looks up the current token precedence. func (p *Parser) curPrecedence() precedence.Precedence { return tokentype.Token2Precedences[p.curToken.Type] }
package main import ( "flag" "fmt" "io/ioutil" "log" "os" "path/filepath" "encoding/json" "github.com/OpenDataHH/BetBerichtCreateJSON/pdb" gobeteiligungsbericht "github.com/OpenDataHH/GoBeteiligungsbericht" ) func main() { xmlFile := flag.String("file", "", "XML Datei Beteiligungsbericht HH") outFolder := flag.String("out", "out", "Ordner in welchen die Dateien geschrieben werden") flag.Parse() if *xmlFile == "" { log.Println("Bitte Flag -file setzen. Lauf wird abgebrochen.") return } writeJSONFiles(*xmlFile, *outFolder) } func writeJSONFiles(inFile string, outFolder string) { conf := LoadConf() infile, err := ioutil.ReadFile(inFile) if err != nil { log.Println(err) return } bericht, err := gobeteiligungsbericht.Decode(infile) if err != nil { log.Println("Decode Error:", err) return } personen := pdb.NewPersonenDB() for _, firma := range bericht.Firmen.Firmen { personen.AddFirma(&firma) } writePersonen(personen, filepath.Join(outFolder, conf.OutputPersonen)) writeFirmen(bericht.Firmen.Firmen, filepath.Join(outFolder, conf.OutputFirmen)) } func writePersonen(personen *pdb.PersonenDB, outFolder string) { for _, p := range personen.Personen { if len(p.FirmenVerbindungen) > 1 { fmt.Println(p.ID) } filename := filepath.Join( outFolder, fmt.Sprintf("%s.json", p.ID), ) writeJSON(p, filename) } } func writeFirmen(firmen []gobeteiligungsbericht.Firma, outFolder string) { for _, f := range firmen { filename := filepath.Join( outFolder, fmt.Sprintf("%s.json", f.ID), ) writeJSON(f, filename) } } func writeJSON(v interface{}, filename string) error { os.MkdirAll(filepath.Dir(filename), 0777) jsonBytes, err := json.MarshalIndent(v, "", " ") if err != nil { return err } err = ioutil.WriteFile(filename, jsonBytes, 0777) if err == nil { log.Printf("Write %s\n", filename) } else { log.Println("Fehler writeJSON: ", err) } return err }
package encryptor import ( "crypto/aes" "encoding/base64" "errors" "github.com/mukesh0513/RxSecure/internal/utils" "github.com/sirupsen/logrus" ) func EcbEncrypt(key []byte, message string) (string, error) { plaintext := utils.PKCS5Padding([]byte(message), aes.BlockSize) if len(plaintext)%aes.BlockSize != 0 { logrus.Info(map[string]interface{}{ "component": "EcbEncrypt", "message": "plaintext is not a multiple of the block size", }) return "", errors.New("plaintext is not a multiple of the block size") } block, cipherErr := aes.NewCipher(key) if cipherErr != nil { logrus.Info(map[string]interface{}{ "component": "EcbEncrypt", "message": "Error creating new cipher", }) return "", errors.New("Error creating new cipher") } cipherText := make([]byte, len(plaintext)) mode := NewECBEncrypter(block) mode.CryptBlocks(cipherText, plaintext) encrypted := base64.StdEncoding.EncodeToString(cipherText) return encrypted, nil }
package docker_registry import ( "context" "fmt" "net/url" "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/werf/werf/pkg/docker" "github.com/werf/werf/pkg/image" ) type genericApi struct { commonApi *api mirrors []string } func newGenericApi(ctx context.Context, options apiOptions) (*genericApi, error) { d := &genericApi{} d.commonApi = newAPI(options) // init registry mirrors if docker cli initialized in context if docker.IsContext(ctx) { info, err := docker.Info(ctx) if err != nil { return nil, fmt.Errorf("unable to get docker system info: %s", err) } if info.RegistryConfig != nil { d.mirrors = info.RegistryConfig.Mirrors } } return d, nil } func (api *genericApi) MutateAndPushImage(ctx context.Context, sourceReference, destinationReference string, mutateConfigFunc func(cfg v1.Config) (v1.Config, error)) error { return api.commonApi.MutateAndPushImage(ctx, sourceReference, destinationReference, mutateConfigFunc) } func (api *genericApi) GetRepoImageConfigFile(ctx context.Context, reference string) (*v1.ConfigFile, error) { mirrorReferenceList, err := api.mirrorReferenceList(reference) if err != nil { return nil, fmt.Errorf("unable to prepare mirror reference list: %s", err) } for _, mirrorReference := range mirrorReferenceList { config, err := api.getRepoImageConfigFile(ctx, mirrorReference) if err != nil { if IsBlobUnknownError(err) || IsManifestUnknownError(err) || IsNameUnknownError(err) { continue } return nil, err } return config, nil } return api.getRepoImageConfigFile(ctx, reference) } func (api *genericApi) getRepoImageConfigFile(_ context.Context, reference string) (*v1.ConfigFile, error) { imageInfo, _, err := api.commonApi.image(reference) if err != nil { return nil, err } return imageInfo.ConfigFile() } func (api *genericApi) GetRepoImage(ctx context.Context, reference string) (*image.Info, error) { mirrorReferenceList, err := api.mirrorReferenceList(reference) if err != nil { return nil, fmt.Errorf("unable to prepare mirror reference list: %s", err) } for _, mirrorReference := range mirrorReferenceList { info, err := api.commonApi.TryGetRepoImage(ctx, mirrorReference) if err != nil { return nil, fmt.Errorf("unable to try getting mirror repo image %q: %s", mirrorReference, err) } if info != nil { return info, nil } } return api.commonApi.GetRepoImage(ctx, reference) } func (api *genericApi) mirrorReferenceList(reference string) ([]string, error) { var referenceList []string referenceParts, err := api.commonApi.ParseReferenceParts(reference) if err != nil { return nil, err } // nothing if container registry is not Docker Hub if referenceParts.registry != name.DefaultRegistry { return nil, nil } for _, mirrorRegistry := range api.mirrors { mirrorRegistryUrl, err := url.Parse(mirrorRegistry) if err != nil { return nil, fmt.Errorf("unable to parse mirror registry url %q: %s", mirrorRegistry, err) } mirrorReference := mirrorRegistryUrl.Host mirrorReference += "/" + referenceParts.repository mirrorReference += ":" + referenceParts.tag if referenceParts.digest != "" { mirrorReference += "@" + referenceParts.digest } referenceList = append(referenceList, mirrorReference) } return referenceList, nil }
package main import ( "flag" "fmt" "os" "os/exec" "sync" ) var parallel bool var count int func init() { flag.IntVar(&count, "n", 10, "启动的容器数量") flag.BoolVar(&parallel, "p", false, "并发启动") } func runContainer() { cmd := exec.Command("python3", "./core.py") cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Start(); err != nil { print(err) } } func main() { flag.Parse() fmt.Printf("count=%d, parallel=%v\n", count, parallel) if parallel { wg := sync.WaitGroup{} wg.Add(count) for i := 0; i < count; i++ { go func() { runContainer() wg.Done() }() } wg.Wait() } else { for i := 0; i < count; i++ { runContainer() } } }
package main import ( "net/http" "github.com/gin-gonic/gin" ) // webServer func webServer() { r := gin.Default() v2 := r.Group("v2") v2.GET("/home", func(c *gin.Context) { c.JSON(http.StatusOK, gin.H{ "message": "Home", }) }) // /login v2.POST("/login", func(c *gin.Context) { username := c.PostForm("username") password := c.PostForm("password") if username == "xiaogebin" && password == "123456" { c.JSON(http.StatusOK, gin.H{ "message": "登录成功", }) } c.JSON(http.StatusUnauthorized, gin.H{ "message": "参数错误", }) }) // /home v2.POST("/home", func(c *gin.Context) { c.JSON(http.StatusOK, gin.H{ "message": "OK", }) }) // /list v2.POST("list", func(c *gin.Context) { c.String(http.StatusOK, "list") }) // /logout v2.POST("/logout", func(c *gin.Context) { // login c.String(http.StatusOK, "logout") }) // 监听8888端口 r.Run(":8888") } func main() { l := NewLogger("INFO", "test.log") l.Info("TEST") l.Error("ERROR TEST") }
package concatenate func Concatenate(x, y string) (string, string) { return "", x + y } func Calladd(x, y int) int { return add(x, y) }
package auth import ( "errors" "log" "net/http" "os" "strconv" "time" "github.com/SermoDigital/jose/crypto" "github.com/SermoDigital/jose/jws" "github.com/SermoDigital/jose/jwt" ) // CreateJWT returns a JWT given a valid userid+password func CreateJWT(username string) ([]byte, error) { var err error signingKey := os.Getenv("SIGNING_PRIVATE_KEY") if signingKey == "" { log.Fatalln("Unable to load SIGNING_PRIVATE_KEY") } bytes := []byte(signingKey) rsaPrivateKey, err := crypto.ParseRSAPrivateKeyFromPEM(bytes) if err != nil { return nil, err } jwtExpiresSeconds, err := strconv.ParseInt( os.Getenv("JWT_EXPIRES_SECONDS"), 10, 32) if err != nil { return nil, err } claims := jws.Claims{} claims.Set("role", "user") claims.SetSubject(username) claims.SetIssuer("CART") claims.SetIssuedAt(time.Now()) claims.SetExpiration(time.Now().Add(time.Second * time.Duration(jwtExpiresSeconds))) return jws.NewJWT(claims, jws.GetSigningMethod("RS256")). Serialize(rsaPrivateKey) } // ValidateJWT validates jwt func ValidateJWT(j jwt.JWT) error { var err error signingKey := os.Getenv("SIGNING_PUB_KEY") if signingKey == "" { log.Fatalln("Unable to load SIGNING_PUB_KEY") } bytes := []byte(signingKey) rsaPublicKey, err := crypto.ParseRSAPublicKeyFromPEM(bytes) if err != nil { log.Printf("err is %v", err.Error()) return err } err = j.Validate(rsaPublicKey, jws.GetSigningMethod("RS256")) if err != nil { return err } return nil } // GetLoggedInUsername extract username from request func GetLoggedInUsername(r *http.Request) (string, error) { j, err := jws.ParseFromHeader(r, jws.Compact) if err != nil { return "", err } payload := j.Payload() authPayload := payload.(map[string]interface{}) loggedInUserEmail := authPayload["sub"].(string) if loggedInUserEmail == "" { return "", errors.New("Could not find Email Address in Token") } return loggedInUserEmail, nil }
package main import ( "testing" "github.com/stretchr/testify/assert" ) func Test_isInChans(t *testing.T) { tests := map[string]struct { all []string sparse []string want []string }{ "should return a slice with true or false emojis": { all: []string{"a", "b", "c", "d"}, sparse: []string{"a", "c"}, want: []string{"✅", "❌", "✅", "❌"}, }, "if all slice is not a superset of sparse slice, should work anyway": { all: []string{"a", "b"}, sparse: []string{"a", "c"}, want: []string{"✅", "❌"}, }, } for ttname, tt := range tests { t.Run(ttname, func(t *testing.T) { assert.Equal(t, tt.want, isInChans(tt.all, tt.sparse, "✅", "❌")) }) } }
package controller import ( "github.com/kataras/iris/context" "go-mysql/customlogger" "go-mysql/model" "go-mysql/router" ) func init () { c := router.CreateNewControllerInstance("clear", "/health") c.Get("", checkHealth) } func checkHealth (ctx context.Context) { logger := customlogger.GetInstance() logger.Println("Check health API's running ...") hasil := model.CheckHealth() ctx.Text(hasil) }
package washoe import () type Street struct { Full string Number int Fraction string Prefix string Street string Type string Suffix string } func (street *Street) String() string { return street.Full }
package credential import "github.com/appootb/substratum/credential" func Init() { if credential.ClientImplementor() == nil { credential.RegisterClientImplementor(&ClientSeed{}) } if credential.ServerImplementor() == nil { credential.RegisterServerImplementor(&ServerSeed{}) } }
package echo import ( "net/http" "testing" "github.com/GoAdminGroup/go-admin/tests/common" "github.com/gavv/httpexpect" ) func TestEcho(t *testing.T) { common.ExtraTest(httpexpect.WithConfig(httpexpect.Config{ Client: &http.Client{ Transport: httpexpect.NewBinder(internalHandler()), Jar: httpexpect.NewJar(), }, Reporter: httpexpect.NewAssertReporter(t), })) }
package main import ( "flag" "fmt" "github.com/joho/godotenv" "goapigen/genjson" "goapigen/genstruct" "os" ) //Program to reverse engineer your mysql database into gorm models func main() { godotenv.Load(os.Getenv("PWD") + "/.env") user := os.Getenv("DB_USERNAME") pass := os.Getenv("DB_PASSWORD") host := os.Getenv("DB_HOSTNAME") port := os.Getenv("DB_PORT") database := os.Getenv("DB_DATABASE") mode := flag.String("mode", "code", "Generate Mode") templatePath := flag.String("template", os.Getenv("PWD")+"/template", "Template Path") outputPath := flag.String("project", os.Getenv("PWD")+"/src", "Project Path") relationMode := flag.Bool("relation", false, "Relation Mode") flag.Parse() fmt.Println("Generate Mode: " + *mode) fmt.Println("Template Path: " + *templatePath) fmt.Println("Project Path: " + *outputPath) if *mode == "json" || *mode == "all" { fmt.Println("Generate:", "goapigen.json") fmt.Println("hostdb:", host) fmt.Println("Connecting to mysql server " + host + ":" + port) genjson.GenJson(user, pass, host, port, database, *relationMode) } if *mode == "code" || *mode == "all" { genstruct.GenStruct(*templatePath, *outputPath) } return }
package vericomp import ( "fmt" "io" "bytes" "math/big" "strconv" "log" "os" "unicode" "vericomp/util" ) const FIELD_BITS = 128 // Terms type term struct { coef interface{} variable string } func t(variable string) term { return ti(1, variable) } func tn(variable string) term { return ti(-1, variable) } func ti(coeff int64, variable string) term { return term{coeff, variable} } func tb(coeff *big.Int, variable string) term { return term{coeff, variable} } func r(terms ...term) []term { return terms } // ConstraintBuilder type ConstraintBuilder struct { nextSubcript, nextInputOutputSub, inputNum int buildSpec, buildPWS, buildQAP, outputTmpl bool specFile, pwsFile, f1IndexFile, cmdsFile *os.File matFiles [3]*os.File matNumCons int numNonZero [3]int varNumBits map[string]int extVars []string extVarIndices map[string]int Zero, One string } func NewConstraintBuilder(specFile, pwsFile, qapAFile, qapBFile, qapCFile, f1IndexFile *os.File, outputTmpl bool) *ConstraintBuilder { cb := new(ConstraintBuilder) cb.buildSpec = specFile != nil cb.buildPWS = pwsFile != nil cb.buildQAP = qapAFile != nil && qapBFile != nil && qapCFile != nil cb.outputTmpl = outputTmpl if cb.buildSpec && (cb.buildPWS || cb.buildQAP) { log.Fatal("You can only output Spec or PWS and QAP, not both") } if cb.outputTmpl { if cb.buildSpec { log.Fatal("You can't build a Spec file as a template") } if !cb.buildPWS || !cb.buildQAP { log.Fatal("You must output PWS and QAP if you're building a template") } } cb.cmdsFile = util.OpenTempFile("cmds") if cb.buildSpec { cb.specFile = specFile } if cb.buildPWS { cb.pwsFile = pwsFile } if cb.buildQAP { cb.matFiles[0] = qapAFile cb.matFiles[1] = qapBFile cb.matFiles[2] = qapCFile } cb.f1IndexFile = f1IndexFile cb.varNumBits = make(map[string]int) cb.extVarIndices = make(map[string]int) cb.Zero = cb.Constant(0) cb.One = cb.Constant(1) return cb } func (b *ConstraintBuilder) pv(varName string) string { if b.outputTmpl && !unicode.IsDigit(rune(varName[0])) { return "${" + varName + "}" } return varName } func (b *ConstraintBuilder) addPolyCmd(resultVar, poly string) { if b.buildSpec { fmt.Fprintf(b.cmdsFile, "( ) * ( ) + ( %v - %v )\n", poly, b.pv(resultVar)) } else { fmt.Fprintf(b.cmdsFile, "P %v = %v E\n", b.pv(resultVar), poly) } } func (b *ConstraintBuilder) getIndex(variable string) int { if variable == "" { return 0 } var index int = -1 index, exists := b.extVarIndices[variable] if exists { index =+ b.nextSubcript + b.nextInputOutputSub } else if variable[0] == 'V' { index, _ = strconv.Atoi(variable[1:]) } else if variable[0] == 'I' { index, _ = strconv.Atoi(variable[1:]) index += b.nextSubcript } else if variable[0] == 'O' { index, _ = strconv.Atoi(variable[1:]) index += b.nextSubcript } else { log.Fatal("getIndex: variable " + variable + " not found!") } return index + 1 } func (b *ConstraintBuilder) addToMat(mat int, terms []term) { for _, t := range terms { if t.coef == 0 || t.variable == b.Zero { continue } var v, c string if b.outputTmpl { if t.variable == "" { v = "0" } else { v = b.pv(t.variable) } c = "${CONS_" + strconv.Itoa(b.matNumCons) + "}" } else { v = strconv.Itoa(b.getIndex(t.variable)) c = strconv.Itoa(b.matNumCons) } fmt.Fprintf(b.matFiles[mat], "%v %v %v\n", v, c, t.coef) b.numNonZero[mat]++ } } func (b *ConstraintBuilder) addCons(aTerms []term, bTerms []term, cTerms []term) { b.matNumCons++ terms := [][]term{aTerms, bTerms, cTerms} for i, t := range terms { if t != nil { b.addToMat(i, t) } } } func (b *ConstraintBuilder) DBGet(index, outValue string) { // We don't add anything to the QAP because the value from the DB is exogenous fmt.Fprintf(b.cmdsFile, "DB_GET %v %v\n", b.pv(index), b.pv(outValue)) } func (b *ConstraintBuilder) DBGetBits(index string, outValues []string) { // We don't add anything to the QAP because the value from the DB is exogenous fmt.Fprintf(b.cmdsFile, "DB_GET_BITS %v %v ", b.pv(index), len(outValues)) for _,v := range outValues { fmt.Fprintf(b.cmdsFile, "%v ", b.pv(v)) } fmt.Fprintln(b.cmdsFile) } func (b *ConstraintBuilder) DBGetConst(index int, outValue string) { b.DBGet(strconv.Itoa(index), outValue) } func (b *ConstraintBuilder) DBPut(index, value string) { // We don't add anything to the QAP because putting a value into the DB is exogenous fmt.Fprintf(b.cmdsFile, "DB_PUT %v %v\n", b.pv(index), b.pv(value)) } func (b *ConstraintBuilder) DBPutBits(index string, values []string) { // We don't add anything to the QAP because the value from the DB is exogenous fmt.Fprintf(b.cmdsFile, "DB_PUT_BITS %v %v ", b.pv(index), len(values)) for _,v := range values { fmt.Fprintf(b.cmdsFile, "%v ", b.pv(v)) } fmt.Fprintln(b.cmdsFile) } func (b *ConstraintBuilder) DBPutConst(index int, value string) { b.DBPut(strconv.Itoa(index), value) } func (b *ConstraintBuilder) SiblingHash(index string, level, numHashBits int) []string { hashBits := make([]string, numHashBits) for i,_ := range hashBits { hashBits[i] = b.NextVarBits(1) } // We don't add anything to the QAP because the sibling hash is exogenous fmt.Fprintf(b.cmdsFile, "DB_GET_SIBLING_HASH %v %v %v\n", b.pv(index), level, b.pv(hashBits[0])) return hashBits } func (b *ConstraintBuilder) blockByHash(get bool, hashBits, values []string) { // We don't add anything to the QAP because the value from the DB is exogenous op := "GET_BLOCK_BY_HASH" numXy := "NUM_Y" xy := "Y" if !get { op = "PUT_BLOCK_BY_HASH" numXy = "NUM_X" xy = "X" } fmt.Fprintf(b.cmdsFile, "%v ", op) for _,v := range hashBits { fmt.Fprintf(b.cmdsFile, "%v ", b.pv(v)) } fmt.Fprintf(b.cmdsFile, "%v %v %v ", numXy, len(values), xy) for _,v := range values { fmt.Fprintf(b.cmdsFile, "%v ", b.pv(v)) } fmt.Fprintln(b.cmdsFile) } func (b *ConstraintBuilder) GetBlockByHash(hashBits, outValues []string) { b.blockByHash(true, hashBits, outValues) } func (b *ConstraintBuilder) PutBlockByHash(hashBits, values []string) { b.blockByHash(false, hashBits, values) } func (b *ConstraintBuilder) FreeBlockByHash(hashBits []string) { // We don't add anything to the QAP because the value from the DB is exogenous fmt.Fprintf(b.cmdsFile, "FREE_BLOCK_BY_HASH ") for _,v := range hashBits { fmt.Fprintf(b.cmdsFile, "%v ", b.pv(v)) } fmt.Fprintln(b.cmdsFile) } func (b *ConstraintBuilder) AssertBit(x string) { //Similar to ensureBit, below, but works when outputting to Spec. // This command doesn't seem to get written to the PWS file if b.buildSpec { v := b.pv(x) fmt.Fprintf(b.cmdsFile, "ASSERT_POLY_ZERO ( %v ) * ( %v ) + ( -1 * %v )\n", v, v, v) } b.ensureBit(x) } func (b *ConstraintBuilder) ensureBit(x string) { if b.buildQAP { // x * x - x = 0 b.addCons(r(t(x)), r(t(x)), r(tn(x))) } // We don't add to the PWS or Spec, because this has to be handled by a PWS or Spec builtin } // Big endian func (b *ConstraintBuilder) sumOfPowers(xi []string, x string, printPoly bool) { numBits := len(xi) Ci := make([]term, numBits + 1) // (x_i * 2^i) + ... + (x_0 * 2^0) - x = 0 for i,_ := range xi { coeff := new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(numBits - 1 - i)), nil) Ci[i] = tb(coeff, xi[i]) } Ci[numBits] = tn(x) if b.buildQAP { b.addCons(nil, nil, Ci) } if printPoly { poly := new(bytes.Buffer) for i,_ := range xi { fmt.Fprintf(poly, "( %v * %v )", Ci[i].coef, b.pv(xi[i])) if i < len(xi) - 1 { fmt.Fprintf(poly, " + ") } } b.addPolyCmd(x, poly.String()) } } // Big endian func (b *ConstraintBuilder) Split(x string, numBits int) []string { xi := make([]string, numBits) for i,_ := range xi { xi[i] = b.NextVarBits(1) b.ensureBit(xi[i]) } fmt.Fprintf(b.cmdsFile, "SI %v into %v bits at %v\n", b.pv(x), numBits, b.pv(xi[0])) b.sumOfPowers(xi, x, false) return xi } // TODO: I'm not sure why the constraints involving the coefficients were commeneted out. /* big endian */ func (b *ConstraintBuilder) splitBase(x string, l int, base int64) []string { xi := make([]string, l) for i := 0; i < l; i++ { xi[i] = b.NextVar() b.ensureBit(xi[i]) // b.addCons(0, []term{term{big.NewInt(1), xi[i]}}) // b.addCons(1, []term{term{big.NewInt(1), xi[i]}}) // b.addCons(2, []term{term{big.NewInt(1), xi[i]}}) //b.cmds = append(b.cmds, fmt.Sprintf("P %v = (%v) * (%v) E\n", xi[i], xi[i], xi[i])) } fmt.Fprintf(b.cmdsFile, "P %v = (", x) // AJF - uncomment Ai := make([]term, 0) for i := 0; i < l; i++ { coef := new(big.Int).Exp(big.NewInt(base), big.NewInt(int64(l - i - 1)), nil) Ai = append(Ai, term{coef, xi[i]}) fmt.Fprintf(b.cmdsFile, "%v * %v + ", coef.String(), xi[i]) // AJF - uncomment } //Ai = append(Ai, term{new(big.Int).Neg(new(big.Int).Exp(big.NewInt(2), big.NewInt(31), nil)), ""}) // b.addCons(0, Ai) // b.addCons(1, []term{term{big.NewInt(1), ""}}) //b.c = append(b.c, []term{term{big.NewInt(1), x},term{new(big.Int).Exp(big.NewInt(2), big.NewInt(31), nil), ""}}) // b.addCons(2, []term{term{big.NewInt(1), x}}) // AJF - b.cmds = append(b.cmds, fmt.Sprintf("SI %v into %v bits at %v", x, l, xi[0])) fmt.Fprintf(b.cmdsFile, "0) * (1) E") // AJF - uncomment fmt.Fprintln(b.cmdsFile) return xi } // Big endian func (b *ConstraintBuilder) Combine(xi []string) string { x := b.NextVar() b.sumOfPowers(xi, x, true) return x } /* big endian */ func (b *ConstraintBuilder) combineBase(xi []string, base int64) string { x := b.NextVar() l := len(xi) fmt.Fprintf(b.cmdsFile, "P %v = ( ", x) Ai := make([]term, 0) for i := 0; i < l; i++ { coef := new(big.Int).Exp(big.NewInt(base), big.NewInt(int64(l - i - 1)), nil) Ai = append(Ai, term{coef, xi[i]}) fmt.Fprintf(b.cmdsFile, "%v * %v + ", coef.String(), xi[i]) } //Ai = append(Ai, term{new(big.Int).Neg(new(big.Int).Exp(big.NewInt(2), big.NewInt(31), nil)), ""}) // b.addCons(0, Ai) // b.addCons(1, []term{term{big.NewInt(1), ""}}) //b.c = append(b.c, []term{term{big.NewInt(1), x},term{new(big.Int).Exp(big.NewInt(2), big.NewInt(31), nil), ""}}) // b.addCons(2, []term{term{big.NewInt(1), x}}) fmt.Fprintf(b.cmdsFile, " 0 ) E") fmt.Fprintln(b.cmdsFile) return x } func (b *ConstraintBuilder) Base257ToBase256(input []string, output []string) string { size := len(input) // Reverse the input because it arrives as little endian but combineBase() is big endian in := make([]string, size) for i, val := range input { in[size-i-1] = val } // TODO: There is likely a problem here. How do we know that the variables in out are in the right range? // out[0] is supposed to be a bit, and the rest are supposed to be < 256 // This could be a problem for all uses of Split(). sum := b.combineBase(in, 257) out := b.splitBase(sum, size+1, 256) // Reverse the output because the caller expects the result to be little endian for i := 0; i < size; i++ { output[i] = out[size-i] } // The first, and largest, element of out is the "carry" bit return out[0] } func (b *ConstraintBuilder) Xor(xi, yi []string) []string { l := len(xi) zi := make([]string, l) for i := 0; i < l; i++ { zi[i] = b.NextVarBits(1) } // -2xy + x + y - z = 0 for i := 0; i < l; i++ { if b.buildQAP { b.addCons(r(ti(-2, xi[i])), r(t(yi[i])), r(t(xi[i]), t(yi[i]), tn(zi[i]))) } // Special case it when we're making a spec file because the compiler backend barfs if b.buildSpec { fmt.Fprintf(b.cmdsFile, "( -2 * %v ) * ( %v ) + ( %v + %v - %v )\n", b.pv(xi[i]), b.pv(yi[i]), b.pv(xi[i]), b.pv(yi[i]), b.pv(zi[i])) } else { b.addPolyCmd(zi[i], fmt.Sprintf("( -2 * %v ) * ( %v ) + %v + %v", b.pv(xi[i]), b.pv(yi[i]), b.pv(xi[i]), b.pv(yi[i]))) } } return zi } func (b *ConstraintBuilder) And(xi,yi []string) []string { l := len(xi) zi := make([]string, l) for i := 0; i < l; i++ { zi[i] = b.NextVarBits(1) } // xy - z = 0 for i := 0; i < l; i++ { if b.buildQAP { b.addCons(r(t(xi[i])), r(t(yi[i])), r(tn(zi[i]))) } b.addPolyCmd(zi[i], fmt.Sprintf("%v * %v", b.pv(xi[i]), b.pv(yi[i]))) } return zi } func (b *ConstraintBuilder) Or(xi,yi []string) []string { l := len(xi) zi := make([]string, l) for i := 0; i < l; i++ { zi[i] = b.NextVarBits(1) } // z = x or y <=> z = 1 - (1-x)(1-y) <=> -xy + x + y - z = 0 for i := 0; i < l; i++ { if b.buildQAP { b.addCons(r(tn(xi[i])), r(t(yi[i])), r(t(xi[i]), t(yi[i]), tn(zi[i]))) } b.addPolyCmd(zi[i], fmt.Sprintf("( -1 * %v ) * ( %v ) + %v + %v", b.pv(xi[i]), b.pv(yi[i]), b.pv(xi[i]), b.pv(yi[i]))) } return zi } func (b *ConstraintBuilder) Not(xi []string) []string { l := len(xi) yi := make([]string, l) for i := 0; i < l; i++ { yi[i] = b.NextVarBits(1) } // 1 - x - y = 0 for i := 0; i < l; i++ { if b.buildQAP { b.addCons(nil, nil, r(t(""), tn(xi[i]), tn(yi[i]))) } b.addPolyCmd(yi[i], fmt.Sprintf("1 - %v", b.pv(xi[i]))) } return yi } func (b *ConstraintBuilder) Leftrotate(xi []string, n int) []string { l := len(xi) yi := make([]string, l) //for i := 0; i < l; i++ { // yi[i] = b.NextVar() //} // TODO need to think about for i := 0; i < l; i++ { /*b.a = append(b.a, []term{term{big.NewInt(1), xi[(i + n) % l]}}) b.b = append(b.b, []term{term{big.NewInt(1), ""}}) b.c = append(b.c, []term{term{big.NewInt(1), yi[i]}})*/ yi[i] = xi[(i + n) % l] //fmt.Printf("P %v = ( %v ) E", yi[i], xi[(i + n) % l]) //fmt.Printf("P %v = %v E\n", yi[(i + n) % l], xi[i]) } return yi } func (b *ConstraintBuilder) Add(values ...string) string { return b.Sum(values) } func (b *ConstraintBuilder) AddConst(v string, c int64) string { y := b.NextVar() if b.buildQAP { b.addCons(nil, nil, r(t(v), ti(c, ""), tn(y))) } b.addPolyCmd(y, fmt.Sprintf("%v + %v", b.pv(v), c)) return y } func (b *ConstraintBuilder) Sum(values []string) string { return b.SumToVar(b.NextVar(), values); } func (b *ConstraintBuilder) SumToVar(result string, values []string) string { z := result Ci := make([]term, len(values) + 1) poly := new(bytes.Buffer) for i, v := range values { Ci[i] = t(v) fmt.Fprintf(poly, "%v", b.pv(v)) if i < len(values) - 1 { fmt.Fprintf(poly, " + ") } } Ci[len(Ci) - 1] = tn(z) if b.buildQAP { b.addCons(nil, nil, Ci) } b.addPolyCmd(z, poly.String()) return z } func (b *ConstraintBuilder) Mult(x, y string) string { z := b.NextVar() if b.buildQAP { b.addCons(r(t(x)), r(t(y)), r(tn(z))) } if b.buildSpec { fmt.Fprintf(b.cmdsFile, "( %v ) * ( %v ) + ( - %v )\n", b.pv(x), b.pv(y), b.pv(z)) } else { b.addPolyCmd(z, fmt.Sprintf("%v * %v", b.pv(x), b.pv(y))) } return z } func (b *ConstraintBuilder) MatrixVectorMul(matrix []int64, vector []string, num_of_rows int, num_of_columns int) []string { z := make([]string, num_of_rows) row := matrix for i := 0; i < num_of_rows; i++ { z[i] = b.NextVar() var Ci []term constPart := big.NewInt(0) poly := new(bytes.Buffer) for i, _ := range vector { /*println(i)*/ if vector[i] == b.Zero || row[i] == 0 { // ignore zero terms } else if vector[i] == b.One { constPart.Add(constPart, big.NewInt(row[i])) } else { Ci = append(Ci, ti(row[i], vector[i])) fmt.Fprintf(poly, "( %v * %v ) + ", b.pv(vector[i]), row[i]) } } Ci = append(Ci, tb(constPart, "")) fmt.Fprintf(poly, "( %v * %v )", b.pv(b.One), constPart) Ci = append(Ci, tn(z[i])) if b.buildQAP { b.addCons(nil, nil, Ci) } /*b.addPolyCmd(z, poly.String())*/ if b.buildSpec { fmt.Fprintf(b.cmdsFile, "( ) * ( ) + ( %v - %v )\n", poly.String(), b.pv(z[i])) /*} else {*/ /*fmt.Fprintf(b.cmdsFile, "P %v = %v E\n", b.pv(z[i]), poly.String())*/ } row = row[num_of_columns:] } fmt.Fprintf(b.cmdsFile, "MATRIX_VEC_MUL NUM_ROWS %v NUM_COLUMNS %v ACTUAL_NUM_COLUMNS %v", num_of_rows, num_of_columns, len(vector)) fmt.Fprintf(b.cmdsFile, " IN_VEC") for _, v := range vector { fmt.Fprintf(b.cmdsFile, " %v", b.pv(v)) } fmt.Fprintf(b.cmdsFile, " OUT_VEC") for _, v := range z { fmt.Fprintf(b.cmdsFile, " %v", b.pv(v)) } fmt.Fprintf(b.cmdsFile, "\n") return z } func (b *ConstraintBuilder) SumOfProducts(v1 []string, v2 []int64) string { z := b.NextVar() var Ci []term constPart := big.NewInt(0) poly := new(bytes.Buffer) for i, _ := range v1 { if v1[i] == b.Zero || v2[i] == 0 { // ignore zero terms } else if v1[i] == b.One { constPart.Add(constPart, big.NewInt(v2[i])) } else { Ci = append(Ci, ti(v2[i], v1[i])) fmt.Fprintf(poly, "( %v * %v ) + ", b.pv(v1[i]), v2[i]) } } Ci = append(Ci, tb(constPart, "")) fmt.Fprintf(poly, "( %v * %v )", b.pv(b.One), constPart) Ci = append(Ci, tn(z)) if b.buildQAP { b.addCons(nil, nil, Ci) } b.addPolyCmd(z, poly.String()) return z } func (b *ConstraintBuilder) Sub(x, y string) string { z := b.NextVar() if b.buildQAP { b.addCons(nil, nil, r(t(x), tn(y), tn(z))) } b.addPolyCmd(z, fmt.Sprintf("%v - %v", b.pv(x), b.pv(y))) return z } func (b *ConstraintBuilder) Mod(x string, n, l int) string { return b.Combine(b.ModBits(x, n, l)) } // TODO: How do we support mod that isn't a power of 2? func (b *ConstraintBuilder) ModBits(x string, n, l int) []string { xi := b.Split(x, n) return xi[n - l:] } // TODO: How are signed constants supposed to be handled? func (b *ConstraintBuilder) SignedConstant(n int) string { return b.Constant(uint(n)) } func (b *ConstraintBuilder) Constant(n uint) string { x := b.NextVar() if b.buildQAP { b.addCons(nil, nil, r(ti(int64(n), ""), tn(x))) } b.addPolyCmd(x, strconv.Itoa(int(n))) return x } func (b *ConstraintBuilder) Assignment(y, x string) { // x * 1 - y = 0 if b.buildQAP { b.addCons(nil, nil, r(t(x), tn(y))) } b.addPolyCmd(y, b.pv(x)) } // TODO: Did I get this right? I tried to implement what's on p.22 in the extended Ginger paper func (b *ConstraintBuilder) IsEqualToZero(x string) string { y := b.NextVar() oneMinusY := b.Sub(b.One, y) m := b.NextVar() c1 := b.Sub(b.Mult(x, m), oneMinusY) b.Assignment(c1, b.Zero) c2 := b.Mult(oneMinusY, x) b.Assignment(c2, b.Zero) return y } func (b *ConstraintBuilder) EqualsBit(x, y string) string { return b.IsEqualToZero(b.Sub(x, y)) } func (b *ConstraintBuilder) ArrayGet(array []string, index string) string { var result string for i,a := range array { m := b.EqualsBit(index, b.Constant(uint(i))) result = b.Add(result, b.Mult(m, a)) } return result } func (b *ConstraintBuilder) IfThenElseArray(choiceBit string, cond1, cond2 []string) []string { result := make([]string, len(cond1)) for i,_ := range result { result[i] = b.IfThenElse(choiceBit, cond1[i], cond2[i]) } return result } func (b *ConstraintBuilder) IfThenElse(choiceBit, cond1, cond2 string) string { result := b.NextVar() // choiceBit * (cond1 - cond2) + cond2 - result = 0 if b.buildQAP { b.addCons(r(t(choiceBit)), r(t(cond1), tn(cond2)), r(t(cond2), tn(result))) } // Special case it when we're making a spec file because the compiler backend barfs if b.buildSpec { fmt.Fprintf(b.cmdsFile, "( %v ) * ( %v - %v ) + ( %v - %v )\n", b.pv(choiceBit), b.pv(cond1), b.pv(cond2), b.pv(cond2), b.pv(result)) } else { b.addPolyCmd(result, fmt.Sprintf("( %v ) * ( %v ) + ( - %v ) * ( %v ) + %v", b.pv(choiceBit), b.pv(cond1), b.pv(choiceBit), b.pv(cond2), b.pv(cond2))) } return result } func (b *ConstraintBuilder) NextVar() string { nextVariable := "V" + strconv.Itoa(b.nextSubcript) b.nextSubcript++ return nextVariable } func (b *ConstraintBuilder) NextVarBits(numBits int) string { nextVar := b.NextVar() b.varNumBits[nextVar] = numBits return nextVar } func (b *ConstraintBuilder) NextInput() string { nextVariable := "I" + strconv.Itoa(b.nextInputOutputSub) b.nextInputOutputSub++ b.inputNum++ return nextVariable } func (b *ConstraintBuilder) NextOutput() string { nextVariable := "O" + strconv.Itoa(b.nextInputOutputSub) b.nextInputOutputSub++ return nextVariable } func (b *ConstraintBuilder) NewExternalVar(name string) string { _, exists := b.extVarIndices[name] if exists { log.Fatal("External variable " + name + " already exists!") } else { b.extVars = append(b.extVars, name) b.extVarIndices[name] = len(b.extVars) } return name } func (b *ConstraintBuilder) getVarNumBits(varName string) int { numBits := b.varNumBits[varName] if numBits == 0 { numBits = FIELD_BITS } return numBits } /* func (b *ConstraintBuilder) printTerm(t term) string { ret := t.coef.String() if t.variable != "" { if ret == "1" { ret = t.variable // } else if "-1" { // ret = "-" + t.variable } else { ret += " * " + t.variable } } return ret } func (b *ConstraintBuilder) printPoly(poly []term) string { ret := b.printTerm(poly[0]) for i := 1; i < len(poly); i++ { t := b.printTerm(poly[i]) if t[0] == '-' { ret += " - " + t[1:] } else { ret += " + " + t } } return ret } func (b *ConstraintBuilder) printPosPoly(poly []term) string { ret := "" for i := 0; i < len(poly); i++ { t := b.printTerm(poly[i]) if t[0] != '-' { ret += " + " + t } } if len(ret) > 0 { return ret[3:] } return ret } func (b *ConstraintBuilder) printNegPoly(poly []term) string { ret := "" for i := 0; i < len(poly); i++ { t := b.printTerm(poly[i]) if t[0] == '-' { ret += " + " + t [1:] } } return ret }*/ func (b *ConstraintBuilder) printTmplHeader(w io.Writer) { if !b.outputTmpl { log.Fatal("We shouldn't be printing a template header if outputTmpl is false") } fmt.Fprintf(w, "#*\n\n") fmt.Fprintf(w, "EXTERNAL VARS:\n") for _, v := range b.extVars { fmt.Fprintf(w, "%v\n", v) } fmt.Fprintf(w, "\nNUM INTERNAL VARS: %v\n", b.nextSubcript) fmt.Fprintf(w, "NUM CONSTRAINTS: %v\n", b.matNumCons) fmt.Fprintf(w, "Aij: %v\n", b.GetAij()) fmt.Fprintf(w, "Bij: %v\n", b.GetBij()) fmt.Fprintf(w, "Cij: %v\n", b.GetCij()) fmt.Fprintf(w, "\n*#\n") } func (b *ConstraintBuilder) WriteFiles() { if b.buildSpec { b.outputSpec(b.specFile) b.specFile.Close() } if b.buildPWS { b.outputPWS(b.pwsFile) b.pwsFile.Close() } if b.buildQAP { for _, m := range b.matFiles { m.Close() } } if b.f1IndexFile != nil { b.outputFIndex(b.f1IndexFile) b.f1IndexFile.Close() } } func (b *ConstraintBuilder) outputPWS(w io.Writer) { if b.buildSpec { log.Fatal("Can't output PWS: buildSpec is true") } if b.outputTmpl { b.printTmplHeader(w) } util.CopyOrDie(w, b.cmdsFile) } func (b *ConstraintBuilder) printVar(w io.Writer, varName string) { fmt.Fprintf(w, "%v //__merkle_%v uint bits %v\n", varName, varName, b.getVarNumBits(varName)) } func (b *ConstraintBuilder) outputSpec(w io.Writer) { if !b.buildSpec { log.Fatal("Can't output spec: buildSpec is false") } // When creating something that can be spliced into an existing spec file, we don't need inputs // and outputs // fmt.Fprintf(w, "START_INPUT\n") // for i := 0; i < b.inputNum; i++ { // b.printVar(w, fmt.Sprintf("I%v", i)) // } // fmt.Fprintf(w, "END_INPUT\n\n") // // fmt.Fprintf(w, "START_OUTPUT\n") // for i := b.inputNum; i < b.nextInputOutputSub; i++ { // b.printVar(w, fmt.Sprintf("O%v", i)) // } // fmt.Fprintf(w, "END_OUTPUT\n\n") fmt.Fprintf(w, "START_VARIABLES\n") for _, v := range b.extVars { b.printVar(w, v) } for i := 0; i < b.nextSubcript; i++ { b.printVar(w, fmt.Sprintf("V%v", i)) } fmt.Fprintf(w, "END_VARIABLES\n\n") fmt.Fprintf(w, "START_CONSTRAINTS\n") util.CopyOrDie(w, b.cmdsFile) fmt.Fprintf(w, "END_CONSTRAINTS\n") } func (b *ConstraintBuilder) outputFIndex(w io.Writer) { for i := 0; i < b.nextSubcript; i++ { fmt.Fprintf(w, "%v ", i) } } func (b *ConstraintBuilder) GetAij() int { return b.numNonZero[0] } func (b *ConstraintBuilder) GetBij() int { return b.numNonZero[1] } func (b *ConstraintBuilder) GetCij() int { return b.numNonZero[2] } func (b *ConstraintBuilder) GetConstraintNum() int { i := 1 for i < (b.matNumCons + 1) { i *= 2 } return i - 1 } func (b *ConstraintBuilder) GetVarsNum() int { return b.nextSubcript } func (b *ConstraintBuilder) GetInputNum() int { return b.inputNum } func (b *ConstraintBuilder) GetOutputNum() int { return b.nextInputOutputSub - b.inputNum } func (b *ConstraintBuilder) PrintParams(w io.Writer) { fmt.Fprintf(w, `num_cons = %v; num_inputs = %v; num_outputs = %v; num_vars = %v; num_aij = %v; num_bij = %v; num_cij = %v; `, b.GetConstraintNum(), b.GetInputNum(), b.GetOutputNum(), b.GetVarsNum(), b.GetAij(), b.GetBij(), b.GetCij()) } func (b *ConstraintBuilder) DiffVars(prevVars int) int { return b.GetVarsNum() - prevVars }
package models import ( "github.com/nsqio/go-nsq" "time" ) // IngestState stores information about the state of ingest operations // for a single bag being ingested into APTrust. The ingest process involves // a number of steps and worker processes. This state object is passed from // one worker to the next, and accompanies the bag through every step of // the process. If ingest fails, this object contains enough information to // tell us why the ingest failed, where it failed, at which step it should be // resumed, and whether there's anything (like partial files) that need to be // cleaned up. type IngestState struct { NSQMessage *nsq.Message `json:"-"` WorkItem *WorkItem WorkItemState *WorkItemState IngestManifest *IngestManifest } // TouchNSQ tells NSQ we're still working on this item. func (ingestState *IngestState) TouchNSQ() { if ingestState.NSQMessage != nil { ingestState.NSQMessage.Touch() } } // FinishNSQ tells NSQ we're done with this message. func (ingestState *IngestState) FinishNSQ() { if ingestState.NSQMessage != nil { ingestState.NSQMessage.Finish() } } // RequeueNSQ tells NSQ to give this item to give this item to another // worker (or perhaps the same worker) after a delay of at least the // specified number of milliseconds. func (ingestState *IngestState) RequeueNSQ(milliseconds int) { if ingestState.NSQMessage != nil { ingestState.NSQMessage.Requeue(time.Duration(milliseconds) * time.Millisecond) } }
package provider import ( "bytes" "fmt" "io/ioutil" "os" "os/exec" "path" "strings" ) var DefaultPath = "/usr/local/lib/summon" // Resolve resolves a filepath to a provider // Checks the CLI arg, environment and then default path func Resolve(providerArg string) (string, error) { provider := providerArg if provider == "" { provider = os.Getenv("SUMMON_PROVIDER") } if provider == "" { providers, _ := ioutil.ReadDir(DefaultPath) if len(providers) == 1 { provider = providers[0].Name() } else if len(providers) > 1 { return "", fmt.Errorf("More than one provider found in %s, please specify one\n", DefaultPath) } } provider = expandPath(provider) if provider == "" { return "", fmt.Errorf("Could not resolve a provider!") } info, err := os.Stat(provider) if err != nil { return "", err } if (info.Mode() & 0111) == 0 { return "", fmt.Errorf("%s is not executable", provider) } return provider, nil } // Call shells out to a provider and return its output // If call succeeds, stdout is returned with no error // If call fails, "" is return with error containing stderr func Call(provider, specPath string) (string, error) { var ( stdOut bytes.Buffer stdErr bytes.Buffer ) cmd := exec.Command(provider, specPath) cmd.Stdout = &stdOut cmd.Stderr = &stdErr err := cmd.Run() if err != nil { return "", fmt.Errorf(stdErr.String()) } return strings.TrimSpace(stdOut.String()), nil } // Given a naked filename, returns a path to executable prefixed with DefaultPath // This is so that "./provider" will work as expected. func expandPath(provider string) string { // Base returns just the last path segment. // If it's different, that means it's a (rel or abs) path if path.Base(provider) != provider { return provider } return path.Join(DefaultPath, provider) }
package lexer import ( "theduke/token" "unicode/utf8" "unicode" ) type Lexer struct { input string position int // current position in input readPosition int // current reading position in input ch rune // current char under examination } func New(input string) *Lexer { l := &Lexer{input: input} l.readChar() return l } func (l *Lexer) readChar() { var width int if l.readPosition >= len(l.input) { l.ch = 0 } else { l.ch, width = utf8.DecodeRuneInString(l.input[l.readPosition:]) } l.position = l.readPosition l.readPosition += width } func (l *Lexer) peekChar() rune { if l.readPosition >= len(l.input) { return 0 } ch, _ := utf8.DecodeRuneInString(l.input[l.readPosition:]) return ch } func (l *Lexer) NextToken() (tok token.Token) { l.skipWhiteSpace() if l.ch == 0 { return token.Token{Type: token.EOF, Literal:""} } if token.IsOperator(l.ch) { tok.Literal = l.readOperator() tok.Type = token.LookupOperator(tok.Literal) } else if token.IsIdentifier(l.ch) { tok.Literal = l.readIdentifier() tok.Type = token.LookupIdent(tok.Literal) } else if token.IsNumber(l.ch) { tok.Type = token.INT tok.Literal = l.readNumber() } else if token.IsDelimiter(l.ch) { tt := token.LookupDelimiter(l.ch) tok = token.New(tt, l.ch) l.readChar() // advance past delimiter } else { tok = token.New(token.ILLEGAL, l.ch) l.readChar() // skip past illegal token } return } func (l *Lexer) readOperator() string { startPos := l.position for token.IsOperator(l.ch) { l.readChar() } return l.input[startPos:l.position] } func (l *Lexer) skipWhiteSpace() { for (unicode.IsSpace(l.ch)) { l.readChar() } } func (l *Lexer) readNumber() string { startPos := l.position for token.IsNumber(l.ch) { l.readChar() } return l.input[startPos:l.position] } func (l *Lexer) readIdentifier() string { startPos := l.position for token.IsIdentifier(l.ch) { l.readChar() } return l.input[startPos:l.position] }
package libserver import ( "bytes" "context" "crypto/tls" "encoding/json" "fmt" "io/ioutil" "net/http" "os" "os/signal" "strconv" "strings" "time" "github.com/helloferdie/stdgo/libserver/claim" "github.com/helloferdie/stdgo/libresponse" "github.com/helloferdie/stdgo/libslice" "github.com/helloferdie/stdgo/libstring" "github.com/helloferdie/stdgo/libvalidator" "github.com/helloferdie/stdgo/logger" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" ) // Host - type Host struct { Echo *echo.Echo } // Initialize - initialize standard config func Initialize(e *echo.Echo) { e.HTTPErrorHandler = ErrorHandler e.Use(middleware.Recover()) e.Use(logger.EchoLogger) e.GET("/ping", Ping) } // StartHTTP - Start server in HTTP func StartHTTP(svr *echo.Echo) { // Start server go func() { if err := svr.Start(":" + os.Getenv("port")); err != http.ErrServerClosed { logger.MakeLogEntry(nil, false).Error(err) logger.MakeLogEntry(nil, false).Error("Fail start HTTP server") logger.MakeLogEntry(nil, false).Error("Shutting down the server") os.Exit(1) } }() // Wait for interrupt signal to gracefully shutdown the server with a timeout of 10 seconds. quit := make(chan os.Signal, 1) signal.Notify(quit, os.Interrupt) <-quit ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := svr.Shutdown(ctx); err != nil { logger.MakeLogEntry(nil, false).Error("Fail shutting down server") os.Exit(1) } else { logger.MakeLogEntry(nil, false).Info("Shutdown HTTP server - done") } } // StartHTTPS - Start server in HTTPS func StartHTTPS(svr *echo.Echo, svrInternal *echo.Echo) { sslCertificate := os.Getenv("ssl_certificate") sslKey := os.Getenv("ssl_key") var err error var cert []byte if cert, err = ioutil.ReadFile(sslCertificate); err != nil { logger.MakeLogEntry(nil, false).Error("Fail to read certificate file") os.Exit(1) } var key []byte if key, err = ioutil.ReadFile(sslKey); err != nil { logger.MakeLogEntry(nil, false).Error("Fail to read key file") os.Exit(1) } s := svr.TLSServer s.TLSConfig = new(tls.Config) s.TLSConfig.MinVersion = tls.VersionTLS12 s.TLSConfig.CurvePreferences = []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256} s.TLSConfig.PreferServerCipherSuites = true s.TLSConfig.CipherSuites = []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_RSA_WITH_AES_256_CBC_SHA, } s.TLSConfig.Certificates = make([]tls.Certificate, 1) if s.TLSConfig.Certificates[0], err = tls.X509KeyPair(cert, key); err != nil { logger.MakeLogEntry(nil, false).Error("Fail to match certificate with key file") os.Exit(1) } s.Addr = ":" + os.Getenv("ssl_port") // Start redirect server svrRedir := echo.New() svrRedir.Pre(middleware.HTTPSRedirect()) go func() { if err := svrRedir.Start(":" + os.Getenv("port")); err != nil { logger.MakeLogEntry(nil, false).Error(err) logger.MakeLogEntry(nil, false).Error("Fail start HTTPS redirect server") logger.MakeLogEntry(nil, false).Error("Shutting down server") os.Exit(1) } }() // Start HTTPS server go func() { //svr if err := svr.StartServer(s); err != http.ErrServerClosed { logger.MakeLogEntry(nil, false).Error(err) logger.MakeLogEntry(nil, false).Error("Shutting down server") os.Exit(1) } }() // Start internal http server if svrInternal != nil { go func() { if err := svrInternal.Start(":" + os.Getenv("ssl_port_internal")); err != http.ErrServerClosed { logger.MakeLogEntry(nil, false).Error(err) logger.MakeLogEntry(nil, false).Error("Fail start internal server") logger.MakeLogEntry(nil, false).Error("Shutting down server") os.Exit(1) } }() } // Wait for interrupt signal to gracefully shutdown the server with a timeout of 10 seconds. quit := make(chan os.Signal, 1) signal.Notify(quit, os.Interrupt) <-quit ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := svrRedir.Shutdown(ctx); err != nil { logger.MakeLogEntry(nil, false).Error("Fail shutting down HTTPS redirect server") os.Exit(1) } else { logger.MakeLogEntry(nil, false).Info("Shutdown HTTPS redirect server - done") } if err := svrRedir.Shutdown(ctx); err != nil { logger.MakeLogEntry(nil, false).Error("Fail shutting down HTTPS server") os.Exit(1) } else { logger.MakeLogEntry(nil, false).Info("Shutdown HTTPS server - done") } if svrInternal != nil { if err := svrInternal.Shutdown(ctx); err != nil { logger.MakeLogEntry(nil, false).Error("Fail shutting down internal server") os.Exit(1) } else { logger.MakeLogEntry(nil, false).Info("Shutdown internal server - done") } } /* For Sub Domain sslDomain := os.Getenv("ssl_domain") + ":" + os.Getenv("port") hosts := map[string]*Host{} hosts[sslDomain] = &Host{svr} e1 := echo.New() e1.Any("/*", func(c echo.Context) (err error) { req := c.Request() res := c.Response() host := hosts[req.Host] if len(hosts) == 1 { host = hosts[sslDomain] } if host == nil { res := libresponse.GetDefault() res.Success = false res.Code = 404 return Response(c, res) } host.Echo.ServeHTTP(res, req) return }) // Start SSL server go func() { if err := e1.StartServer(s); err != nil { logger.MakeLogEntry(nil, false).Error("Shutting down the server") os.Exit(1) } }() // Wait for interrupt signal to gracefully shutdown the server with a timeout of 10 seconds. quit := make(chan os.Signal, 1) signal.Notify(quit, os.Interrupt) <-quit ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := e1.Shutdown(ctx); err != nil { logger.MakeLogEntry(nil, false).Error("Fail start SSL server") logger.MakeLogEntry(nil, false).Error("Fail shutting down the server") os.Exit(1) }*/ } // Response - func Response(c echo.Context, res *libresponse.Default) (err error) { locale := os.Getenv("locale") if locale == "1" { res = ResponseLocale(res, c.Request().Header.Get("Accept-Language")) // Remove locale variable databytes, _ := json.Marshal(res) m := map[string]interface{}{} json.Unmarshal(databytes, &m) delete(m, "message_var") delete(m, "error_var") return c.JSON(int(res.Code), m) } return c.JSON(int(res.Code), res) } // ResponseProxy - func ResponseProxy(res *http.Response) error { // Bypass process proxy response (response) if res.Header.Get("X-Proxy-Locale") == "0" { return nil } // Process proxy response (request) if res.Request.Header.Get("X-Locale") != "0" { resJSON := libresponse.GetDefault() resBody, errRead := ioutil.ReadAll(res.Body) if errRead == nil { errJSON := json.Unmarshal(resBody, &resJSON) if errJSON != nil { resJSON = libresponse.GetDefault() resJSON.Code = 502 resJSON.Message = "general.error_gateway" resJSON.Error = "general.error_response_not_valid" } } else { resJSON.Code = 502 resJSON.Message = "general.error_gateway" resJSON.Error = "general.error_response_read" } resJSON = ResponseLocale(resJSON, res.Request.Header.Get("Accept-Language")) databytes, _ := json.Marshal(resJSON) m := map[string]interface{}{} json.Unmarshal(databytes, &m) delete(m, "message_var") delete(m, "error_var") resByte, _ := json.Marshal(m) resLen := int64(len(string(resByte))) resLenStr := strconv.FormatInt(resLen, 10) res.Body = ioutil.NopCloser(bytes.NewBuffer(resByte)) res.ContentLength = resLen res.Header.Set("Content-Length", resLenStr) res.Header.Set("Content-Type", "application/json;") } return nil } // ResponseLocale - func ResponseLocale(data *libresponse.Default, lang string) *libresponse.Default { if data.Message != "" { if data.Message[0:1] == "!" { data.MessageLocale = libstring.Ucfirst(data.Message[1:]) } else { data.MessageLocale = libstring.Ucfirst(LoadLocale(data.Message, lang, data.MessageVar)) } } if data.Error != "" { if data.Error[0:1] == "!" { data.ErrorLocale = libstring.Ucfirst(data.Error[1:]) } else { data.ErrorLocale = libstring.Ucfirst(LoadLocale(data.Error, lang, data.ErrorVar)) } } if data.Code == 422 { jsonString, _ := json.Marshal(data.Data) listData := map[string]libvalidator.VarValidationError{} err := json.Unmarshal(jsonString, &listData) if err == nil && len(listData) > 0 { listNewData := map[string]interface{}{} for k, i := range listData { listNewData[k] = map[string]string{ "error": i.Error, "error_locale": libstring.Ucfirst(LoadLocale(i.Error, lang, i.ErrorVar)), } } data.Data = listNewData } } return data } // LoadLocale - func LoadLocale(syntax string, lang string, params []interface{}) string { dirLocale := os.Getenv("dir_locale") defaultLocale := os.Getenv("default_locale") if defaultLocale == "" { defaultLocale = "en" } split := strings.Split(syntax, ".") if len(split) < 2 { return LoadLocale("general.error_localization_syntax_not_valid", lang, []interface{}{syntax}) } filename := "/" + split[0] + ".json" jsonFile, err := os.Open(dirLocale + "/" + lang + filename) if err != nil { jsonFile, err = os.Open(dirLocale + "/" + defaultLocale + filename) if err != nil { return LoadLocale("general.error_localization_file_not_found", lang, []interface{}{syntax}) } } defer jsonFile.Close() byteValue, _ := ioutil.ReadAll(jsonFile) var result map[string]interface{} json.Unmarshal([]byte(byteValue), &result) val, exist := result[split[1]] if exist { localeParams := []interface{}{} escapeSyntax := []string{ "general.error_localization_syntax_not_valid", "general.error_localization_syntax_not_found", "general.error_localization_file_not_found", } for _, v := range params { t, ok := v.(string) if !ok { localeParams = append(localeParams, v) } else { if t != "" && t[0:1] == "!" { localeParams = append(localeParams, t[1:]) } else { _, inSlice := libslice.Contains(syntax, escapeSyntax) if inSlice { localeParams = append(localeParams, t) } else { localeParams = append(localeParams, LoadLocale(t, lang, nil)) } } } } return fmt.Sprintf(val.(string), localeParams...) } if lang != defaultLocale && syntax == "general.error_localization_syntax_not_found" { lang = defaultLocale } return LoadLocale("general.error_localization_syntax_not_found", lang, []interface{}{syntax}) } // RequestLog - func RequestLog(c echo.Context, stdout bool) error { // Read the Body content var bodyBytes []byte if c.Request().Body != nil { bodyBytes, _ = ioutil.ReadAll(c.Request().Body) } err := ioutil.WriteFile(os.Getenv("log_dir")+"/log_response.log", bodyBytes, 0777) if err == nil { // Print out if stdout { fmt.Println(string(bodyBytes)) } // Restore the io.ReadCloser to its original state c.Request().Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) } return err } // FormatOutputDefault - func FormatOutputDefault(c echo.Context) map[string]interface{} { m := map[string]interface{}{ "tz": "UTC", "header": nil, } loc, err := time.LoadLocation(c.Request().Header.Get("Accept-TZ")) if err == nil { m["tz"] = loc.String() } tmp := new(libresponse.Header) tmp.Authorization = c.Request().Header.Get("Authorization") tmp.AcceptLanguage = c.Request().Header.Get("Accept-Language") tmp.AcceptTimezone = c.Request().Header.Get("Accept-TZ") tmp.RealIP = GetRealIP(c) tmp.RequestURL = c.Request().URL.String() claims := claim.GetJWTClaims(c) if claims != nil { tmp.Claims = claims v, ok := claims["access"].(string) if ok { tmp.Access = v tmp.IsLogin = true tmp.UserID = int64(claims["user_id"].(float64)) } } m["header"] = tmp return m } // GetRealIP - func GetRealIP(c echo.Context) string { if c != nil { ip := c.Request().Header.Get("X-Real-Ip") if ip == "" { ip = c.Request().RemoteAddr } return ip } return "" } // SkipperRoute - func SkipperRoute(c echo.Context, root string, list []string) bool { arrURI := strings.Split(strings.Replace(c.Request().RequestURI, root, "", 1), "/") if len(arrURI) >= 2 { _, inSlice := libslice.Contains(arrURI[1], list) if inSlice { return true } } return false }
package _114_Flatten_Binary_Tree_to_Linked_List type TreeNode struct { Val int Left *TreeNode Right *TreeNode } func flatten(root *TreeNode) { //flattenRecursion(root) flattenIteration(root) } func flattenIteration(root *TreeNode) { cur := root for cur != nil { lp := cur.Left if cur.Left != nil { for lp.Right != nil { lp = lp.Right } lp.Right = cur.Right cur.Right = cur.Left cur.Left = nil } cur = cur.Right } } func flattenRecursion(root *TreeNode) { if root == nil { return } if root.Left != nil { flattenRecursion(root.Left) } if root.Right != nil { flattenRecursion(root.Right) } tmp := root.Right root.Right = root.Left root.Left = nil x := root for x.Right != nil { x = x.Right } x.Right = tmp }
/* Copyright 2021 The KubeVela Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package query import ( "context" "encoding/json" "fmt" "os" "sort" "strconv" "time" appsv1 "k8s.io/api/apps/v1" v12 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" types2 "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/duration" "k8s.io/klog/v2" "k8s.io/kubectl/pkg/util/podutils" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" velatypes "github.com/oam-dev/kubevela/apis/types" "github.com/oam-dev/kubevela/pkg/multicluster" "github.com/oam-dev/kubevela/pkg/oam" "github.com/oam-dev/kubevela/pkg/velaql/providers/query/types" helmreleaseapi "github.com/fluxcd/helm-controller/api/v2beta1" helmrepoapi "github.com/fluxcd/source-controller/api/v1beta2" ) const ( // DefaultMaxDepth is the default max depth for query iterator // check maxDepth function to get the customized val for max depth DefaultMaxDepth = 5 ) // relationshipKey is the configmap key of relationShip rule var relationshipKey = "rules" // RuleList the rule list type RuleList []ChildrenResourcesRule // GetRule get the rule by the resource type func (rl *RuleList) GetRule(grt GroupResourceType) (*ChildrenResourcesRule, bool) { for i, r := range *rl { if r.GroupResourceType == grt { return &(*rl)[i], true } } return nil, false } // globalRule define the whole relationShip rule var globalRule RuleList func init() { globalRule = append(globalRule, ChildrenResourcesRule{ GroupResourceType: GroupResourceType{Group: "apps", Kind: "Deployment"}, SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "ReplicaSet"}, listOptions: defaultWorkloadLabelListOption, }, }), }, ChildrenResourcesRule{ SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "v1", Kind: "Pod"}, listOptions: defaultWorkloadLabelListOption, }, }), GroupResourceType: GroupResourceType{Group: "apps", Kind: "ReplicaSet"}, }, ChildrenResourcesRule{ SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "v1", Kind: "Pod"}, listOptions: defaultWorkloadLabelListOption, }, }), GroupResourceType: GroupResourceType{Group: "apps", Kind: "StatefulSet"}, }, ChildrenResourcesRule{ SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "v1", Kind: "Pod"}, listOptions: defaultWorkloadLabelListOption, }, }), GroupResourceType: GroupResourceType{Group: "apps", Kind: "DaemonSet"}, }, ChildrenResourcesRule{ GroupResourceType: GroupResourceType{Group: "batch", Kind: "Job"}, SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "v1", Kind: "Pod"}, listOptions: defaultWorkloadLabelListOption, }, }), }, ChildrenResourcesRule{ GroupResourceType: GroupResourceType{Group: "", Kind: "Service"}, SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "discovery.k8s.io/v1beta1", Kind: "EndpointSlice"}, }, { ResourceType: ResourceType{APIVersion: "discovery.k8s.io/v1", Kind: "EndpointSlice"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "Endpoints"}, listOptions: service2EndpointListOption, }, }), }, ChildrenResourcesRule{ GroupResourceType: GroupResourceType{Group: "helm.toolkit.fluxcd.io", Kind: "HelmRelease"}, SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "Deployment"}, }, { ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "StatefulSet"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "ConfigMap"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "Secret"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "Service"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "PersistentVolumeClaim"}, }, { ResourceType: ResourceType{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, }, { ResourceType: ResourceType{APIVersion: "gateway.networking.k8s.io/v1beta1", Kind: "HTTPRoute"}, }, { ResourceType: ResourceType{APIVersion: "gateway.networking.k8s.io/v1beta1", Kind: "Gateway"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "ServiceAccount"}, }, { ResourceType: ResourceType{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "Role"}, }, { ResourceType: ResourceType{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "RoleBinding"}, }, }), DefaultGenListOptionFunc: helmRelease2AnyListOption, DisableFilterByOwnerReference: true, }, ChildrenResourcesRule{ GroupResourceType: GroupResourceType{Group: "kustomize.toolkit.fluxcd.io", Kind: "Kustomization"}, SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "Deployment"}, }, { ResourceType: ResourceType{APIVersion: "apps/v1", Kind: "StatefulSet"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "ConfigMap"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "Secret"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "Service"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "PersistentVolumeClaim"}, }, { ResourceType: ResourceType{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, }, { ResourceType: ResourceType{APIVersion: "gateway.networking.k8s.io/v1beta1", Kind: "HTTPRoute"}, }, { ResourceType: ResourceType{APIVersion: "gateway.networking.k8s.io/v1beta1", Kind: "Gateway"}, }, { ResourceType: ResourceType{APIVersion: "v1", Kind: "ServiceAccount"}, }, { ResourceType: ResourceType{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "Role"}, }, { ResourceType: ResourceType{APIVersion: "rbac.authorization.k8s.io/v1", Kind: "RoleBinding"}, }, }), DefaultGenListOptionFunc: kustomization2AnyListOption, DisableFilterByOwnerReference: true, }, ChildrenResourcesRule{ SubResources: buildSubResources([]*SubResourceSelector{ { ResourceType: ResourceType{APIVersion: "batch/v1", Kind: "Job"}, listOptions: cronJobLabelListOption, }, }), GroupResourceType: GroupResourceType{Group: "batch", Kind: "CronJob"}, }, ) } // GroupResourceType define the parent resource type type GroupResourceType struct { Group string `json:"group"` Kind string `json:"kind"` } // ResourceType define the children resource type type ResourceType struct { APIVersion string `json:"apiVersion,omitempty"` Kind string `json:"kind,omitempty"` } // customRule define the customize rule created by user type customRule struct { ParentResourceType *GroupResourceType `json:"parentResourceType,omitempty"` ChildrenResourceType []CustomSelector `json:"childrenResourceType,omitempty"` } // CustomSelector the custom resource selector configuration in configmap. support set the default label selector policy type CustomSelector struct { ResourceType `json:",inline"` // defaultLabelSelector means read the label selector condition from the spec.selector. DefaultLabelSelector bool `json:"defaultLabelSelector"` } // ChildrenResourcesRule define the relationShip between parentObject and children resource type ChildrenResourcesRule struct { // GroupResourceType the root resource type GroupResourceType GroupResourceType // every subResourceType can have a specified genListOptionFunc. SubResources *SubResources // if specified genListOptionFunc is nil will use use default genListOptionFunc to generate listOption. DefaultGenListOptionFunc genListOptionFunc // DisableFilterByOwnerReference means don't use parent resource's UID filter the result. DisableFilterByOwnerReference bool } func buildSubResources(crs []*SubResourceSelector) *SubResources { var cr SubResources = crs return &cr } func buildSubResourceSelector(cus CustomSelector) *SubResourceSelector { cr := SubResourceSelector{ ResourceType: cus.ResourceType, } if cus.DefaultLabelSelector { cr.listOptions = defaultWorkloadLabelListOption } return &cr } // SubResources the sub resource definitions type SubResources []*SubResourceSelector // Get get the sub resource by the resource type func (c *SubResources) Get(rt ResourceType) *SubResourceSelector { for _, r := range *c { if r.ResourceType == rt { return r } } return nil } // Put add a sub resource to the list func (c *SubResources) Put(cr *SubResourceSelector) { *c = append(*c, cr) } // SubResourceSelector the sub resource selector configuration type SubResourceSelector struct { ResourceType listOptions genListOptionFunc } type genListOptionFunc func(unstructured.Unstructured) (client.ListOptions, error) // WorkloadUnstructured the workload unstructured, such as Deployment、Job、StatefulSet、ReplicaSet and DaemonSet type WorkloadUnstructured struct { unstructured.Unstructured } // GetSelector get the selector from the field path func (w *WorkloadUnstructured) GetSelector(fields ...string) (labels.Selector, error) { value, exist, err := unstructured.NestedFieldNoCopy(w.Object, fields...) if err != nil { return nil, err } if !exist { return labels.Everything(), nil } if v, ok := value.(map[string]interface{}); ok { var selector v1.LabelSelector if err := runtime.DefaultUnstructuredConverter.FromUnstructured(v, &selector); err != nil { return nil, err } return v1.LabelSelectorAsSelector(&selector) } return labels.Everything(), nil } func (w *WorkloadUnstructured) convertLabel2Selector(fields ...string) (labels.Selector, error) { value, exist, err := unstructured.NestedFieldNoCopy(w.Object, fields...) if err != nil { return nil, err } if !exist { return labels.Everything(), nil } if v, ok := value.(map[string]interface{}); ok { var selector v1.LabelSelector if err := runtime.DefaultUnstructuredConverter.FromUnstructured(v, &selector.MatchLabels); err != nil { return nil, err } return v1.LabelSelectorAsSelector(&selector) } return labels.Everything(), nil } var defaultWorkloadLabelListOption genListOptionFunc = func(obj unstructured.Unstructured) (client.ListOptions, error) { workload := WorkloadUnstructured{obj} deploySelector, err := workload.GetSelector("spec", "selector") if err != nil { return client.ListOptions{}, err } return client.ListOptions{Namespace: obj.GetNamespace(), LabelSelector: deploySelector}, nil } var service2EndpointListOption = func(obj unstructured.Unstructured) (client.ListOptions, error) { svc := v12.Service{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &svc) if err != nil { return client.ListOptions{}, err } stsSelector, err := v1.LabelSelectorAsSelector(&v1.LabelSelector{MatchLabels: svc.Labels}) if err != nil { return client.ListOptions{}, err } return client.ListOptions{Namespace: svc.Namespace, LabelSelector: stsSelector}, nil } var cronJobLabelListOption = func(obj unstructured.Unstructured) (client.ListOptions, error) { workload := WorkloadUnstructured{obj} cronJobSelector, err := workload.convertLabel2Selector("spec", "jobTemplate", "metadata", "labels") if err != nil { return client.ListOptions{}, err } return client.ListOptions{Namespace: obj.GetNamespace(), LabelSelector: cronJobSelector}, nil } var helmRelease2AnyListOption = func(obj unstructured.Unstructured) (client.ListOptions, error) { hrSelector, err := v1.LabelSelectorAsSelector(&v1.LabelSelector{MatchLabels: map[string]string{ "helm.toolkit.fluxcd.io/name": obj.GetName(), "helm.toolkit.fluxcd.io/namespace": obj.GetNamespace(), }}) if err != nil { return client.ListOptions{}, err } return client.ListOptions{LabelSelector: hrSelector}, nil } var kustomization2AnyListOption = func(obj unstructured.Unstructured) (client.ListOptions, error) { kusSelector, err := v1.LabelSelectorAsSelector(&v1.LabelSelector{MatchLabels: map[string]string{ "kustomize.toolkit.fluxcd.io/name": obj.GetName(), "kustomize.toolkit.fluxcd.io/namespace": obj.GetNamespace(), }}) if err != nil { return client.ListOptions{}, err } return client.ListOptions{LabelSelector: kusSelector}, nil } type healthyCheckFunc func(obj unstructured.Unstructured) (*types.HealthStatus, error) var checkPodStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) { var pod v12.Pod err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pod) if err != nil { return nil, fmt.Errorf("failed to convert unstructured Pod to typed: %w", err) } getFailMessage := func(ctr *v12.ContainerStatus) string { if ctr.State.Terminated != nil { if ctr.State.Terminated.Message != "" { return ctr.State.Terminated.Message } if ctr.State.Terminated.Reason == "OOMKilled" { return ctr.State.Terminated.Reason } if ctr.State.Terminated.ExitCode != 0 { return fmt.Sprintf("container %q failed with exit code %d", ctr.Name, ctr.State.Terminated.ExitCode) } } return "" } switch pod.Status.Phase { case v12.PodSucceeded: return &types.HealthStatus{ Status: types.HealthStatusHealthy, Reason: pod.Status.Reason, Message: pod.Status.Message, }, nil case v12.PodRunning: switch pod.Spec.RestartPolicy { case v12.RestartPolicyAlways: // if pod is ready, it is automatically healthy if podutils.IsPodReady(&pod) { return &types.HealthStatus{ Status: types.HealthStatusHealthy, Reason: "all containers are ready", }, nil } // if it's not ready, check to see if any container terminated, if so, it's unhealthy for _, ctrStatus := range pod.Status.ContainerStatuses { if ctrStatus.LastTerminationState.Terminated != nil { return &types.HealthStatus{ Status: types.HealthStatusUnHealthy, Reason: pod.Status.Reason, Message: pod.Status.Message, }, nil } } // otherwise we are progressing towards a ready state return &types.HealthStatus{ Status: types.HealthStatusProgressing, Reason: pod.Status.Reason, Message: pod.Status.Message, }, nil case v12.RestartPolicyOnFailure, v12.RestartPolicyNever: // pods set with a restart policy of OnFailure or Never, have a finite life. // These pods are typically resource hooks. Thus, we consider these as Progressing // instead of healthy. return &types.HealthStatus{ Status: types.HealthStatusProgressing, Reason: pod.Status.Reason, Message: pod.Status.Message, }, nil } case v12.PodPending: return &types.HealthStatus{ Status: types.HealthStatusProgressing, Message: pod.Status.Message, }, nil case v12.PodFailed: if pod.Status.Message != "" { // Pod has a nice error message. Use that. return &types.HealthStatus{Status: types.HealthStatusUnHealthy, Message: pod.Status.Message}, nil } for _, ctr := range append(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses...) { if msg := getFailMessage(ctr.DeepCopy()); msg != "" { return &types.HealthStatus{Status: types.HealthStatusUnHealthy, Message: msg}, nil } } return &types.HealthStatus{Status: types.HealthStatusUnHealthy, Message: ""}, nil default: } return &types.HealthStatus{ Status: types.HealthStatusUnKnown, Reason: string(pod.Status.Phase), Message: pod.Status.Message, }, nil } var checkHelmReleaseStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) { helmRelease := &helmreleaseapi.HelmRelease{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &helmRelease) if err != nil { return nil, fmt.Errorf("failed to convert unstructured helmRelease to typed: %w", err) } if len(helmRelease.Status.Conditions) != 0 { for _, condition := range helmRelease.Status.Conditions { if condition.Type == "Ready" { if condition.Status == v1.ConditionTrue { return &types.HealthStatus{ Status: types.HealthStatusHealthy, }, nil } return &types.HealthStatus{ Status: types.HealthStatusUnHealthy, Message: condition.Message, }, nil } } } return &types.HealthStatus{ Status: types.HealthStatusUnKnown, }, nil } var checkHelmRepoStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) { helmRepo := helmrepoapi.HelmRepository{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &helmRepo) if err != nil { return nil, fmt.Errorf("failed to convert unstructured helmRelease to typed: %w", err) } if len(helmRepo.Status.Conditions) != 0 { for _, condition := range helmRepo.Status.Conditions { if condition.Type == "Ready" { if condition.Status == v1.ConditionTrue { return &types.HealthStatus{ Status: types.HealthStatusHealthy, Message: condition.Message, }, nil } return &types.HealthStatus{ Status: types.HealthStatusUnHealthy, Message: condition.Message, }, nil } } } return &types.HealthStatus{ Status: types.HealthStatusUnKnown, }, nil } var checkReplicaSetStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) { replicaSet := appsv1.ReplicaSet{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &replicaSet) if err != nil { return nil, fmt.Errorf("failed to convert unstructured ReplicaSet to typed: %w", err) } if replicaSet.Generation <= replicaSet.Status.ObservedGeneration { cond := getAppsv1ReplicaSetCondition(replicaSet.Status, appsv1.ReplicaSetReplicaFailure) if cond != nil && cond.Status == v12.ConditionTrue { return &types.HealthStatus{ Status: types.HealthStatusUnHealthy, Reason: cond.Reason, Message: cond.Message, }, nil } else if replicaSet.Spec.Replicas != nil && replicaSet.Status.AvailableReplicas < *replicaSet.Spec.Replicas { return &types.HealthStatus{ Status: types.HealthStatusProgressing, Message: fmt.Sprintf("Waiting for rollout to finish: %d out of %d new replicas are available...", replicaSet.Status.AvailableReplicas, *replicaSet.Spec.Replicas), }, nil } } else { return &types.HealthStatus{ Status: types.HealthStatusProgressing, Message: "Waiting for rollout to finish: observed replica set generation less then desired generation", }, nil } return &types.HealthStatus{ Status: types.HealthStatusHealthy, }, nil } func getAppsv1ReplicaSetCondition(status appsv1.ReplicaSetStatus, condType appsv1.ReplicaSetConditionType) *appsv1.ReplicaSetCondition { for i := range status.Conditions { c := status.Conditions[i] if c.Type == condType { return &c } } return nil } var checkPVCHealthStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) { pvc := v12.PersistentVolumeClaim{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pvc) if err != nil { return nil, fmt.Errorf("failed to convert unstructured PVC to typed: %w", err) } var status types.HealthStatusCode switch pvc.Status.Phase { case v12.ClaimLost: status = types.HealthStatusUnHealthy case v12.ClaimPending: status = types.HealthStatusProgressing case v12.ClaimBound: status = types.HealthStatusHealthy default: status = types.HealthStatusUnKnown } return &types.HealthStatus{Status: status}, nil } var checkServiceStatus = func(obj unstructured.Unstructured) (*types.HealthStatus, error) { svc := v12.Service{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &svc) if err != nil { return nil, fmt.Errorf("failed to convert unstructured service to typed: %w", err) } health := types.HealthStatus{Status: types.HealthStatusHealthy} if svc.Spec.Type == v12.ServiceTypeLoadBalancer { if len(svc.Status.LoadBalancer.Ingress) > 0 { health.Status = types.HealthStatusHealthy } else { health.Status = types.HealthStatusProgressing } } return &health, nil } // CheckResourceStatus return object status data func CheckResourceStatus(obj unstructured.Unstructured) (*types.HealthStatus, error) { group := obj.GroupVersionKind().Group kind := obj.GroupVersionKind().Kind var checkFunc healthyCheckFunc switch group { case "": switch kind { case "Pod": checkFunc = checkPodStatus case "Service": checkFunc = checkServiceStatus case "PersistentVolumeClaim": checkFunc = checkPVCHealthStatus } case "apps": switch kind { case "ReplicaSet": checkFunc = checkReplicaSetStatus default: } case "helm.toolkit.fluxcd.io": switch kind { case "HelmRelease": checkFunc = checkHelmReleaseStatus default: } case "source.toolkit.fluxcd.io": switch kind { case "HelmRepository": checkFunc = checkHelmRepoStatus default: } default: } if checkFunc != nil { return checkFunc(obj) } return &types.HealthStatus{Status: types.HealthStatusHealthy}, nil } type additionalInfoFunc func(obj unstructured.Unstructured) (map[string]interface{}, error) func additionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) { group := obj.GroupVersionKind().Group kind := obj.GroupVersionKind().Kind var infoFunc additionalInfoFunc switch group { case "": switch kind { case "Pod": infoFunc = podAdditionalInfo case "Service": infoFunc = svcAdditionalInfo } case "apps": switch kind { case "Deployment": infoFunc = deploymentAdditionalInfo case "StatefulSet": infoFunc = statefulSetAdditionalInfo default: } default: } if infoFunc != nil { return infoFunc(obj) } return nil, nil } func svcAdditionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) { svc := v12.Service{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &svc) if err != nil { return nil, fmt.Errorf("failed to convert unstructured svc to typed: %w", err) } if svc.Spec.Type == v12.ServiceTypeLoadBalancer { var eip string for _, ingress := range svc.Status.LoadBalancer.Ingress { if len(ingress.IP) != 0 { eip = ingress.IP } } if len(eip) == 0 { eip = "pending" } return map[string]interface{}{ "EIP": eip, }, nil } return nil, nil } // the logic of this func totaly copy from the source-code of kubernetes tableConvertor // https://github.com/kubernetes/kubernetes/blob/ea0764452222146c47ec826977f49d7001b0ea8c/pkg/printers/internalversion/printers.go#L740 // The result is same with the output of kubectl. // nolint func podAdditionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) { pod := v12.Pod{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pod) if err != nil { return nil, fmt.Errorf("failed to convert unstructured Pod to typed: %w", err) } hasPodReadyCondition := func(conditions []v12.PodCondition) bool { for _, condition := range conditions { if condition.Type == v12.PodReady && condition.Status == v12.ConditionTrue { return true } } return false } restarts := 0 totalContainers := len(pod.Spec.Containers) readyContainers := 0 reason := string(pod.Status.Phase) if pod.Status.Reason != "" { reason = pod.Status.Reason } initializing := false for i := range pod.Status.InitContainerStatuses { container := pod.Status.InitContainerStatuses[i] restarts += int(container.RestartCount) switch { case container.State.Terminated != nil && container.State.Terminated.ExitCode == 0: continue case container.State.Terminated != nil: // initialization is failed if len(container.State.Terminated.Reason) == 0 { if container.State.Terminated.Signal != 0 { reason = fmt.Sprintf("Init:Signal:%d", container.State.Terminated.Signal) } else { reason = fmt.Sprintf("Init:ExitCode:%d", container.State.Terminated.ExitCode) } } else { reason = "Init:" + container.State.Terminated.Reason } initializing = true case container.State.Waiting != nil && len(container.State.Waiting.Reason) > 0 && container.State.Waiting.Reason != "PodInitializing": reason = "Init:" + container.State.Waiting.Reason initializing = true default: reason = fmt.Sprintf("Init:%d/%d", i, len(pod.Spec.InitContainers)) initializing = true } break } if !initializing { restarts = 0 hasRunning := false for i := len(pod.Status.ContainerStatuses) - 1; i >= 0; i-- { container := pod.Status.ContainerStatuses[i] restarts += int(container.RestartCount) if container.State.Waiting != nil && container.State.Waiting.Reason != "" { reason = container.State.Waiting.Reason } else if container.State.Terminated != nil && container.State.Terminated.Reason != "" { reason = container.State.Terminated.Reason } else if container.State.Terminated != nil && container.State.Terminated.Reason == "" { if container.State.Terminated.Signal != 0 { reason = fmt.Sprintf("Signal:%d", container.State.Terminated.Signal) } else { reason = fmt.Sprintf("ExitCode:%d", container.State.Terminated.ExitCode) } } else if container.Ready && container.State.Running != nil { hasRunning = true readyContainers++ } } // change pod status back to "Running" if there is at least one container still reporting as "Running" status if reason == "Completed" && hasRunning { if hasPodReadyCondition(pod.Status.Conditions) { reason = "Running" } else { reason = "NotReady" } } } if pod.DeletionTimestamp != nil && pod.Status.Reason == "NodeLost" { reason = "Unknown" } else if pod.DeletionTimestamp != nil { reason = "Terminating" } return map[string]interface{}{ "Ready": fmt.Sprintf("%d/%d", readyContainers, totalContainers), "Status": reason, "Restarts": restarts, "Age": translateTimestampSince(pod.CreationTimestamp), }, nil } func deploymentAdditionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) { deployment := appsv1.Deployment{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &deployment) if err != nil { return nil, fmt.Errorf("failed to convert unstructured Deployment to typed: %w", err) } readyReplicas := deployment.Status.ReadyReplicas desiredReplicas := deployment.Spec.Replicas updatedReplicas := deployment.Status.UpdatedReplicas availableReplicas := deployment.Status.AvailableReplicas return map[string]interface{}{ "Ready": fmt.Sprintf("%d/%d", readyReplicas, *desiredReplicas), "Update": updatedReplicas, "Available": availableReplicas, "Age": translateTimestampSince(deployment.CreationTimestamp), }, nil } func statefulSetAdditionalInfo(obj unstructured.Unstructured) (map[string]interface{}, error) { statefulSet := appsv1.StatefulSet{} err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &statefulSet) if err != nil { return nil, fmt.Errorf("failed to convert unstructured StatefulSet to typed: %w", err) } readyReplicas := statefulSet.Status.ReadyReplicas desiredReplicas := statefulSet.Spec.Replicas return map[string]interface{}{ "Ready": fmt.Sprintf("%d/%d", readyReplicas, *desiredReplicas), "Age": translateTimestampSince(statefulSet.CreationTimestamp), }, nil } func fetchObjectWithResourceTreeNode(ctx context.Context, cluster string, k8sClient client.Client, resource types.ResourceTreeNode) (*unstructured.Unstructured, error) { o := unstructured.Unstructured{} o.SetAPIVersion(resource.APIVersion) o.SetKind(resource.Kind) o.SetNamespace(resource.Namespace) o.SetName(resource.Name) err := k8sClient.Get(multicluster.ContextWithClusterName(ctx, cluster), types2.NamespacedName{Namespace: resource.Namespace, Name: resource.Name}, &o) if err != nil { return nil, err } return &o, nil } func listItemByRule(clusterCTX context.Context, k8sClient client.Client, resource ResourceType, parentObject unstructured.Unstructured, specifiedFunc genListOptionFunc, defaultFunc genListOptionFunc, disableFilterByOwner bool) ([]unstructured.Unstructured, error) { itemList := unstructured.UnstructuredList{} itemList.SetAPIVersion(resource.APIVersion) itemList.SetKind(fmt.Sprintf("%sList", resource.Kind)) var err error if specifiedFunc == nil && defaultFunc == nil { // if the relationShip between parent and child hasn't defined by any genListOption, list all subResource and filter by ownerReference UID err = k8sClient.List(clusterCTX, &itemList) if err != nil { return nil, err } var res []unstructured.Unstructured for _, item := range itemList.Items { for _, reference := range item.GetOwnerReferences() { if reference.UID == parentObject.GetUID() { res = append(res, item) } } } sort.Slice(res, func(i, j int) bool { return res[i].GetName() < res[j].GetName() }) return res, nil } var listOptions client.ListOptions if specifiedFunc != nil { // specified func will override the default func listOptions, err = specifiedFunc(parentObject) if err != nil { return nil, err } } else { listOptions, err = defaultFunc(parentObject) if err != nil { return nil, err } } err = k8sClient.List(clusterCTX, &itemList, &listOptions) if err != nil { return nil, err } if !disableFilterByOwner { var res []unstructured.Unstructured for _, item := range itemList.Items { if len(item.GetOwnerReferences()) == 0 { res = append(res, item) } for _, reference := range item.GetOwnerReferences() { if reference.UID == parentObject.GetUID() { res = append(res, item) } } } return res, nil } sort.Slice(itemList.Items, func(i, j int) bool { return itemList.Items[i].GetName() < itemList.Items[j].GetName() }) return itemList.Items, nil } func iterateListSubResources(ctx context.Context, cluster string, k8sClient client.Client, parentResource types.ResourceTreeNode, depth int, filter func(node types.ResourceTreeNode) bool) ([]*types.ResourceTreeNode, error) { if depth > maxDepth() { klog.Warningf("listing application resource tree has reached the max-depth %d parentObject is %v", depth, parentResource) return nil, nil } parentObject, err := fetchObjectWithResourceTreeNode(ctx, cluster, k8sClient, parentResource) if err != nil { return nil, err } group := parentObject.GetObjectKind().GroupVersionKind().Group kind := parentObject.GetObjectKind().GroupVersionKind().Kind if rule, ok := globalRule.GetRule(GroupResourceType{Group: group, Kind: kind}); ok { var resList []*types.ResourceTreeNode for i := range *rule.SubResources { resource := (*rule.SubResources)[i].ResourceType specifiedFunc := (*rule.SubResources)[i].listOptions clusterCTX := multicluster.ContextWithClusterName(ctx, cluster) items, err := listItemByRule(clusterCTX, k8sClient, resource, *parentObject, specifiedFunc, rule.DefaultGenListOptionFunc, rule.DisableFilterByOwnerReference) if err != nil { if meta.IsNoMatchError(err) || runtime.IsNotRegisteredError(err) || kerrors.IsNotFound(err) { klog.Warningf("ignore list resources: %s as %v", resource.Kind, err) continue } return nil, err } for i, item := range items { rtn := types.ResourceTreeNode{ APIVersion: item.GetAPIVersion(), Kind: item.GroupVersionKind().Kind, Namespace: item.GetNamespace(), Name: item.GetName(), UID: item.GetUID(), Cluster: cluster, Object: items[i], } if _, ok := globalRule.GetRule(GroupResourceType{Group: item.GetObjectKind().GroupVersionKind().Group, Kind: item.GetObjectKind().GroupVersionKind().Kind}); ok { childrenRes, err := iterateListSubResources(ctx, cluster, k8sClient, rtn, depth+1, filter) if err != nil { return nil, err } rtn.LeafNodes = childrenRes } if !filter(rtn) && len(rtn.LeafNodes) == 0 { continue } healthStatus, err := CheckResourceStatus(item) if err != nil { return nil, err } rtn.HealthStatus = *healthStatus addInfo, err := additionalInfo(item) if err != nil { return nil, err } rtn.CreationTimestamp = item.GetCreationTimestamp().Time if !item.GetDeletionTimestamp().IsZero() { rtn.DeletionTimestamp = item.GetDeletionTimestamp().Time } rtn.AdditionalInfo = addInfo resList = append(resList, &rtn) } } return resList, nil } return nil, nil } // mergeCustomRules merge user defined resource topology rules with the system ones func mergeCustomRules(ctx context.Context, k8sClient client.Client) error { rulesList := v12.ConfigMapList{} if err := k8sClient.List(ctx, &rulesList, client.InNamespace(velatypes.DefaultKubeVelaNS), client.HasLabels{oam.LabelResourceRules}); err != nil { return client.IgnoreNotFound(err) } for _, item := range rulesList.Items { ruleStr := item.Data[relationshipKey] var ( customRules []*customRule format string err error ) if item.Labels != nil { format = item.Labels[oam.LabelResourceRuleFormat] } switch format { case oam.ResourceTopologyFormatJSON: err = json.Unmarshal([]byte(ruleStr), &customRules) case oam.ResourceTopologyFormatYAML, "": err = yaml.Unmarshal([]byte(ruleStr), &customRules) } if err != nil { // don't let one miss-config configmap brake whole process klog.Errorf("relationship rule configmap %s miss config %v", item.Name, err) continue } for _, rule := range customRules { if cResource, ok := globalRule.GetRule(*rule.ParentResourceType); ok { for i, resourceType := range rule.ChildrenResourceType { if cResource.SubResources.Get(resourceType.ResourceType) == nil { cResource.SubResources.Put(buildSubResourceSelector(rule.ChildrenResourceType[i])) } } } else { var subResources []*SubResourceSelector for i := range rule.ChildrenResourceType { subResources = append(subResources, buildSubResourceSelector(rule.ChildrenResourceType[i])) } globalRule = append(globalRule, ChildrenResourcesRule{ GroupResourceType: *rule.ParentResourceType, DefaultGenListOptionFunc: nil, SubResources: buildSubResources(subResources)}) } } } return nil } func translateTimestampSince(timestamp v1.Time) string { if timestamp.IsZero() { return "<unknown>" } return duration.HumanDuration(time.Since(timestamp.Time)) } // check if max depth is provided or return the default max depth func maxDepth() int { maxDepth, err := strconv.Atoi(os.Getenv("KUBEVELA_QUERYTREE_MAXDEPTH")) if err != nil || maxDepth <= 0 { return DefaultMaxDepth } return maxDepth }
package client import ( "context" "fmt" "io" "strings" "github.com/wish/ctl/pkg/client/logsync" v1 "k8s.io/api/core/v1" "k8s.io/client-go/rest" ) // LogPodOverContexts retrieves logs of a single pod (uses first found if multiple) func (c *Client) LogPodOverContexts(contexts []string, namespace, name, container string, options LogOptions) (*rest.Request, error) { pod, container, err := c.FindPodWithContainer(contexts, namespace, name, container, ListOptions{options.LabelMatch, options.StatusMatch, nil}) if err != nil { return nil, err } cl, err := c.getContextInterface(pod.Context) if err != nil { panic(err.Error()) } req := cl.CoreV1().Pods(pod.Namespace).GetLogs(name, &v1.PodLogOptions{Container: container, Follow: options.Follow}) return req, nil } // LogPodsOverContexts retrieves logs of multiple pods (uses first found if multiple) func (c *Client) LogPodsOverContexts(contexts []string, namespace, container string, options LogOptions) (io.Reader, error) { pods, err := c.ListPodsOverContexts(contexts, namespace, ListOptions{options.LabelMatch, options.StatusMatch, options.Search}) if err != nil { return nil, err } fmt.Printf("Found %d pods\n", len(pods)) readers := make([]io.Reader, len(pods)) // Processing of log string var processor func(string) string if options.Timestamps { processor = func(s string) string { return s + "\n" } } else { processor = func(s string) string { return s[strings.Index(s, " ")+1:] + "\n" } } // Choose container for i, pod := range pods { var req *rest.Request cl, _ := c.getContextInterface(pod.Context) // detect container if container == "" { req = cl.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{Container: pod.Spec.Containers[0].Name, Follow: options.Follow, Timestamps: true}) } else { req = cl.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{Container: container, Follow: options.Follow, Timestamps: true}) } readCloser, err := req.Stream(context.TODO()) if err != nil { return nil, err } readers[i] = readCloser } fmt.Printf("Opened %d connections to pods\n", len(readers)) return logsync.Sync(readers, processor), nil } // LogPod retrieves logs from a container of a pod. // Operates on the first container if none specified. // TODO: The usage of this function is odd (support all containers???) func (c *Client) LogPod(context, namespace, name, container string, options LogOptions) (*rest.Request, error) { cl, err := c.getContextInterface(context) if err != nil { panic(err.Error()) } // Find first container if container == "" || namespace == "" { pod, err := c.findPod([]string{context}, namespace, name, ListOptions{options.LabelMatch, options.StatusMatch, nil}) if err != nil { return nil, err } if container == "" { container = pod.Spec.Containers[0].Name } namespace = pod.Namespace } req := cl.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{Container: container, Follow: options.Follow}) return req, nil }
package main import "fmt" type MobileAlertState interface { alert() } type AlertStateContext struct { currentState MobileAlertState } func NewAlertStateContext() *AlertStateContext { return &AlertStateContext{currentState: &Vibration{}} } func (ctx *AlertStateContext) SetState(state MobileAlertState) { ctx.currentState = state } func (ctx *AlertStateContext) Alert() { ctx.currentState.alert() } type Vibration struct{} func (v *Vibration) alert() { fmt.Println("Vibrating...") } type Silence struct{} func (s *Silence) alert() { fmt.Println("Silent...") } func main() { stateContext := NewAlertStateContext() stateContext.Alert() stateContext.Alert() stateContext.Alert() stateContext.SetState(&Silence{}) stateContext.Alert() stateContext.Alert() stateContext.Alert() }
// This file was generated for SObject UserAppMenuItem, API Version v43.0 at 2018-07-30 03:47:34.623499103 -0400 EDT m=+20.966892275 package sobjects import ( "fmt" "strings" ) type UserAppMenuItem struct { BaseSObject AppMenuItemId string `force:",omitempty"` ApplicationId string `force:",omitempty"` Description string `force:",omitempty"` IconUrl string `force:",omitempty"` Id string `force:",omitempty"` InfoUrl string `force:",omitempty"` IsUsingAdminAuthorization bool `force:",omitempty"` IsVisible bool `force:",omitempty"` Label string `force:",omitempty"` LogoUrl string `force:",omitempty"` MobileStartUrl string `force:",omitempty"` Name string `force:",omitempty"` SortOrder int `force:",omitempty"` StartUrl string `force:",omitempty"` Type string `force:",omitempty"` UserSortOrder int `force:",omitempty"` } func (t *UserAppMenuItem) ApiName() string { return "UserAppMenuItem" } func (t *UserAppMenuItem) String() string { builder := strings.Builder{} builder.WriteString(fmt.Sprintf("UserAppMenuItem #%s - %s\n", t.Id, t.Name)) builder.WriteString(fmt.Sprintf("\tAppMenuItemId: %v\n", t.AppMenuItemId)) builder.WriteString(fmt.Sprintf("\tApplicationId: %v\n", t.ApplicationId)) builder.WriteString(fmt.Sprintf("\tDescription: %v\n", t.Description)) builder.WriteString(fmt.Sprintf("\tIconUrl: %v\n", t.IconUrl)) builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id)) builder.WriteString(fmt.Sprintf("\tInfoUrl: %v\n", t.InfoUrl)) builder.WriteString(fmt.Sprintf("\tIsUsingAdminAuthorization: %v\n", t.IsUsingAdminAuthorization)) builder.WriteString(fmt.Sprintf("\tIsVisible: %v\n", t.IsVisible)) builder.WriteString(fmt.Sprintf("\tLabel: %v\n", t.Label)) builder.WriteString(fmt.Sprintf("\tLogoUrl: %v\n", t.LogoUrl)) builder.WriteString(fmt.Sprintf("\tMobileStartUrl: %v\n", t.MobileStartUrl)) builder.WriteString(fmt.Sprintf("\tName: %v\n", t.Name)) builder.WriteString(fmt.Sprintf("\tSortOrder: %v\n", t.SortOrder)) builder.WriteString(fmt.Sprintf("\tStartUrl: %v\n", t.StartUrl)) builder.WriteString(fmt.Sprintf("\tType: %v\n", t.Type)) builder.WriteString(fmt.Sprintf("\tUserSortOrder: %v\n", t.UserSortOrder)) return builder.String() } type UserAppMenuItemQueryResponse struct { BaseQuery Records []UserAppMenuItem `json:"Records" force:"records"` }
package oss // CreateObject is func (t *Oss) CreateObject() { } // DeleteObject is func (t *Oss) DeleteObject() { } // UpdateObject is func (t *Oss) UpdateObject() { } // GetObject is func (t *Oss) GetObject() { }
package metadata import ( "incognito-chain/common" ) type PDEContributionResponse struct { MetadataBase ContributionStatus string RequestedTxID common.Hash TokenIDStr string SharedRandom []byte } func NewPDEContributionResponse( contributionStatus string, requestedTxID common.Hash, tokenIDStr string, metaType int, ) *PDEContributionResponse { metadataBase := MetadataBase{ Type: metaType, } return &PDEContributionResponse{ ContributionStatus: contributionStatus, RequestedTxID: requestedTxID, TokenIDStr: tokenIDStr, MetadataBase: metadataBase, } } func (iRes PDEContributionResponse) Hash() *common.Hash { record := iRes.RequestedTxID.String() record += iRes.TokenIDStr record += iRes.ContributionStatus record += iRes.MetadataBase.Hash().String() if iRes.SharedRandom != nil && len(iRes.SharedRandom) > 0 { record += string(iRes.SharedRandom) } // final hash hash := common.HashH([]byte(record)) return &hash } func (iRes *PDEContributionResponse) SetSharedRandom(r []byte) { iRes.SharedRandom = r }
package server import ( "bloom-clock/operations" "encoding/json" "fmt" "io/ioutil" "log" "net" "strconv" "github.com/spencerkimball/cbfilter" ) type Message struct { From int To int Type string Element string BloomClock []byte Broadcast bool } var neighborNodes []int func Client(from, port int, element, messageType string, broadcast bool, bloomClock []byte) error { if messageType != "Send Bloom Clock" { err := clientSendHas(from, port, element, messageType, broadcast, bloomClock) if err != nil { return err } // if we have to compare then we should send two udp request } else if messageType == "Send Bloom Clock" { bc1, err := clientGetBloomClock(from, port, "Send Bloom Clock") if err != nil { return err } port2, err := strconv.Atoi(element) if err != nil { return err } bc2, err := clientGetBloomClock(from, port2, "Send Bloom Clock") if err != nil { return err } comparable, n, m := operations.Compare(bc1, bc2) fmt.Printf("The bloom clocks of %d and %d are comparable: %v \n", port, port2, comparable) fmt.Printf("First is different from second in %d positions \n", n) fmt.Printf("Second is different from first in %d positions \n", m) } return nil } func Server(port int, fNode *cbfilter.Filter, neigh []string) { for _, node := range neigh { nodeID, err := strconv.Atoi(node) if err != nil { log.Fatal(err) } neighborNodes = append(neighborNodes, nodeID) } addr := &net.UDPAddr{IP: []byte{127, 0, 0, 1}, Port: port, Zone: ""} ServerConn, err := net.ListenUDP("udp", addr) if err != nil { log.Fatal(err) } defer ServerConn.Close() buffer := make([]byte, 1024) // continue instead of failing for { n, remoteAddress, err := ServerConn.ReadFromUDP(buffer) if err != nil { log.Fatal(err) } var m Message err = json.Unmarshal(buffer[0:n], &m) if err != nil { log.Fatal(err) } if m.Type == "Sending Element" { if m.Element != "" { fNode.AddKey(m.Element) } else { fNode.Data = operations.MergerBloomClock(fNode.Data, m.BloomClock) } fmt.Println(fNode.Data) if m.BloomClock != nil { comparable, _, _ := operations.Compare(fNode.Data, m.BloomClock) fmt.Printf("The bloom clocks of %d and %d are comparable: %v \n", addr.Port, m.From, comparable) } err = operations.WriteToCSV(port, m.Element, fmt.Sprint(fNode.Data)) if err != nil { log.Fatal(err) } broadCastedTo := "" if m.Broadcast { if len(neighborNodes) > 2 { operations.Shuffle(neighborNodes) for _, neighbor := range neighborNodes[:len(neighborNodes)-2] { err = Client(port, neighbor, "", m.Type, false, fNode.Data) if err != nil { log.Fatal(err) } broadCastedTo += ", " + strconv.Itoa(neighbor) } broadCastedTo = broadCastedTo[2:] } } go sendResponse(ServerConn, remoteAddress, "I got the element, and broadcasted to: "+broadCastedTo) } else if m.Type == "Does it have" { has := fNode.HasKey(m.Element) go sendResponse(ServerConn, remoteAddress, strconv.FormatBool(has)) } else if m.Type == "Send Bloom Clock" { go sendBloomClock(ServerConn, remoteAddress, fNode.Data) } else if m.Type == "Send CSV" { to, err := strconv.Atoi(m.Element) if err != nil { log.Fatal(err) } sendCSV(port, to, "Receive CSV") go sendResponse(ServerConn, remoteAddress, "I send the csv file") } else if m.Type == "Receive CSV" { err = ioutil.WriteFile("new"+strconv.Itoa(port)+".csv", m.BloomClock, 0644) if err != nil { log.Fatal(err) } } } } func sendResponse(serr *net.UDPConn, addr *net.UDPAddr, mssg string) { _, err := serr.WriteToUDP([]byte(mssg), addr) if err != nil { log.Fatal(err) } } func sendBloomClock(serr *net.UDPConn, addr *net.UDPAddr, bc []byte) { _, err := serr.WriteToUDP(bc, addr) if err != nil { log.Fatal(err) } } func clientSendHas(from, port int, element, messageType string, broadcast bool, bloomClock []byte) error { ClientConn, err := net.DialUDP("udp", nil, &net.UDPAddr{IP: []byte{0, 0, 0, 0}, Port: port, Zone: ""}) if err != nil { return nil } defer ClientConn.Close() m := Message{From: from, To: port, Element: element, Type: messageType, Broadcast: broadcast, BloomClock: bloomClock} b, err := json.Marshal(&m) if err != nil { return err } ClientConn.Write(b) buffer := make([]byte, 1024) n, _, err := ClientConn.ReadFromUDP(buffer) if err != nil { return err } fmt.Println(string(buffer[0:n])) return nil } func clientGetBloomClock(from, port int, messageType string) ([]byte, error) { ClientConn, err := net.DialUDP("udp", nil, &net.UDPAddr{IP: []byte{0, 0, 0, 0}, Port: port, Zone: ""}) if err != nil { return []byte{}, nil } defer ClientConn.Close() m := Message{To: port, Type: messageType, From: from} b, err := json.Marshal(&m) if err != nil { return []byte{}, err } ClientConn.Write(b) buffer := make([]byte, 1024) n, _, err := ClientConn.ReadFromUDP(buffer) if err != nil { return []byte{}, err } return buffer[0:n], nil } func sendCSV(from, to int, messageType string) { ClientConn, err := net.DialUDP("udp", nil, &net.UDPAddr{IP: []byte{0, 0, 0, 0}, Port: to, Zone: ""}) if err != nil { log.Fatal(err) } defer ClientConn.Close() data, err := ioutil.ReadFile(strconv.Itoa(from) + ".csv") if err != nil { log.Fatal(err) } m := Message{Type: messageType, BloomClock: data} b, err := json.Marshal(&m) if err != nil { log.Fatal(err) } ClientConn.Write(b) return }
package 数组 var diagonalsOwningSameHash map[float64][]*Diagonal const INF = 100000000000000.0 func minAreaFreeRect(points [][]int) float64 { diagonalsOwningSameHash = make(map[float64][]*Diagonal) coordinates := make([]*Coordinate, 0) diagonals := make([]*Diagonal, 0) minArea := INF for _, point := range points { coordinates = append(coordinates, NewCoordinate(float64(point[0]), float64(point[1]))) } for i := 0; i < len(coordinates); i++ { for t := i + 1; t < len(coordinates); t++ { ci, ct := coordinates[i], coordinates[t] diagonals = append(diagonals, NewDiagonal(ci, ct)) } } for _, diagonal := range diagonals { sameHashDiagonals := diagonalsOwningSameHash[getHash(diagonal)] diagonalsOwningSameHash[getHash(diagonal)] = append(diagonalsOwningSameHash[getHash(diagonal)], diagonal) for _, another := range sameHashDiagonals { minArea = min(minArea, getMatrixSquare(diagonal, another)) } } if minArea == INF { return 0 } return minArea } func getHash(d *Diagonal) float64 { const maxX = 40000 const maxY = 40000 mc := getMiddleCoordinate(d.Coordinate1, d.Coordinate2) dis := getDistance(d.Coordinate1, d.Coordinate2) hash := mc.X + mc.Y*(maxX+1) + dis*(maxY*maxX+1) return hash } func getMiddleCoordinate(c1, c2 *Coordinate) *Coordinate { return NewCoordinate((c1.X+c2.X)/2, (c1.Y+c2.Y)/2) } func getMatrixSquare(d1, d2 *Diagonal) float64 { return getDistance(d1.Coordinate1, d2.Coordinate1) * getDistance(d1.Coordinate1, d2.Coordinate2) } func getDistance(c1, c2 *Coordinate) float64 { return math.Sqrt((c1.X-c2.X)*(c1.X-c2.X) + (c1.Y-c2.Y)*(c1.Y-c2.Y)) } // ----------- Diagonal ---------- type Diagonal struct { Coordinate1 *Coordinate Coordinate2 *Coordinate } func NewDiagonal(c1, c2 *Coordinate) *Diagonal { return &Diagonal{c1, c2} } // ----------- Coordinate ---------- type Coordinate struct { X float64 Y float64 } func NewCoordinate(x, y float64) *Coordinate { return &Coordinate{x, y} } // ----------- Utils ---------- func min(arr ...float64) float64 { if len(arr) == 1 { return arr[0] } a := arr[0] b := min(arr[1:]...) if a > b { return b } return a } /* 题目链接: https://leetcode-cn.com/problems/minimum-area-rectangle-ii/ 总结: 1. 这题采用 对角线 来唯一标识矩形。将属于同一矩形的对角线找出,就能得出矩形的面积了。 */
package calendar import ( "strconv" "strings" "time" "github.com/kudrykv/latex-yearly-planner/app/components/hyper" ) type Calendar struct { wd time.Weekday weeks Weeklies month time.Month } func (c Calendar) WeekLayout(weekNum bool) string { line := strings.Repeat("c", 7) if !weekNum { return line } return "c|" + line } func (c Calendar) WeekHeader(weekNum bool) string { names := append(make([]string, 0, 8), "W") for i := time.Sunday; i < 7; i++ { names = append(names, ((c.wd + i) % 7).String()[:1]) } if !weekNum { names = names[1:] } return strings.Join(names, " & ") } func (c Calendar) WeekHeaderFull(weekNum bool) string { names := make([]string, 0, 7) for i := time.Sunday; i < 7; i++ { names = append(names, "\\hfil{}"+((c.wd+i)%7).String()) } out := strings.Join(names, " & ") if weekNum { out = "& " + out } return out } func (c Calendar) WeekHeaderLen(weekNum bool) int { if weekNum { return 8 } return 7 } func (c Calendar) MonthName() time.Month { return c.month } func (c Calendar) Matrix() Weeklies { return c.weeks } func (c Calendar) MatrixTexed(withWeeks, weeksLong, squareDays bool, today interface{}) string { mx := c.Matrix() out := make([]string, 0, len(mx)) weeks := c.weeksInMonth(mx) for i, weekly := range mx { row := make([]string, 0, 8) if withWeeks { row = append(row, c.week(weeks, weeksLong, i)) } row = append(row, c.daysRow(weekly, squareDays, today)...) out = append(out, strings.Join(row, " & ")) } if squareDays { return strings.Join(out, "\\\\ \\hline\n") + "\\\\ \\hline" } return strings.Join(out, " \\\\\n") } func (c Calendar) daysRow(weekly Weekly, squareDays bool, today interface{}) []string { row := make([]string, 0, 7) for _, dayTime := range weekly { switch td, ok := today.(DayTime); { case dayTime.IsZero(): row = append(row, "") case squareDays: row = append(row, dayTime.SquareLink()) case ok && dayTime.Equal(td.Time): row = append(row, td.SelectedCell()) default: row = append(row, dayTime.Link()) } } return row } func (c Calendar) week(weeks []int, weeksLong bool, i int) string { prefix := "" text := "" if c.MonthName() == time.January && weeks[i] > 50 { prefix = "fw" } if weeksLong { text = "Week " } text += strconv.Itoa(weeks[i]) if weeksLong { text = `\rotatebox[origin=tr]{90}{\makebox[\myLenMonthlyCellHeight][c]{` + text + `}}` } return hyper.Link(prefix+"Week "+strconv.Itoa(weeks[i]), text) } func (c Calendar) weeksInMonth(mx Weeklies) []int { weeks := make([]int, 0, len(mx)) weeks = append(weeks, mx[0].WeekNumber()) for i := 1; i < len(mx); i++ { wn := mx[i].WeekNumber() if wn == weeks[i-1] { wn++ } weeks = append(weeks, wn) } return weeks }
package payment type CreditAccount struct { accountNumber string accountOwner string } func (c CreditAccount) AccountNumber() string { return c.accountNumber } func (c CreditAccount) AccountOwner() string { return c.accountOwner } func (c CreditAccount) AvailableCredit() float32 { return 1000 }
// Copyright 2016-2019 Authors of Cilium // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package api import ( "fmt" "reflect" "strings" ) // Rules is a collection of api.Rule. // // All rules must be evaluated in order to come to a conclusion. While // it is sufficient to have a single fromEndpoints rule match, none of // the fromRequires may be violated at the same time. type Rules []*Rule func (rs Rules) String() string { strRules := make([]string, 0, len(rs)) for _, r := range rs { strRules = append(strRules, fmt.Sprintf("%+v", r)) } return "[" + strings.Join(strRules, ",\n") + "]" } // DeepEquals returns true if the specified rules are deeply the same. func (rs Rules) DeepEquals(rs2 Rules) bool { return reflect.DeepEqual(rs, rs2) }
// +build integration package main import ( "context" "crypto/sha256" "fmt" "log" "os" "sync" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/osbuild/osbuild-composer/internal/boot/azuretest" "github.com/osbuild/osbuild-composer/internal/cloud/gcp" "github.com/osbuild/osbuild-composer/internal/test" ) func cleanupGCP(testID string, wg *sync.WaitGroup) { defer wg.Done() log.Println("[GCP] Running clean up") GCPRegion, ok := os.LookupEnv("GCP_REGION") if !ok { log.Println("[GCP] Error: 'GCP_REGION' is not set in the environment.") return } GCPBucket, ok := os.LookupEnv("GCP_BUCKET") if !ok { log.Println("[GCP] Error: 'GCP_BUCKET' is not set in the environment.") return } // max 62 characters // Must be a match of regex '[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}' // use sha224sum to get predictable testID without invalid characters testIDhash := fmt.Sprintf("%x", sha256.Sum224([]byte(testID))) // Resource names to clean up GCPInstance := fmt.Sprintf("vm-%s", testIDhash) GCPImage := fmt.Sprintf("image-%s", testIDhash) // It does not matter if there was any error. If the credentials file was // read successfully then 'creds' should be non-nil, otherwise it will be // nil. Both values are acceptable for creating a new "GCP" instance. // If 'creds' is nil, then GCP library will try to authenticate using // the instance permissions. creds, err := gcp.GetCredentialsFromEnv() if err != nil { log.Printf("[GCP] Error: %v. This may not be an issue.", err) } // If this fails, there is no point in continuing g, err := gcp.New(creds) if err != nil { log.Printf("[GCP] Error: %v", err) return } ctx := context.Background() // Try to delete potentially running instance // api.sh chooses a random GCP Zone from the set Region. Since we // don't know which one it is, iterate over all Zones in the Region // and try to delete the instance. Unless the instance has set // "VmDnsSetting:ZonalOnly", which we don't do, this is safe and the // instance name must be unique for the whole GCP project. GCPZones, err := g.ComputeZonesInRegion(ctx, GCPRegion) if err != nil { log.Printf("[GCP] Error: Failed to get available Zones for the '%s' Region: %v", GCPRegion, err) return } for _, GCPZone := range GCPZones { log.Printf("[GCP] 🧹 Deleting VM instance %s in %s. "+ "This should fail if the test succeeded.", GCPInstance, GCPZone) err = g.ComputeInstanceDelete(ctx, GCPZone, GCPInstance) if err == nil { // If an instance with the given name was successfully deleted in one of the Zones, we are done. break } else { log.Printf("[GCP] Error: %v", err) } } // Try to clean up storage of cache objects after image import job log.Println("[GCP] 🧹 Cleaning up cache objects from storage after image " + "import. This should fail if the test succeeded.") cacheObjects, errs := g.StorageImageImportCleanup(ctx, GCPImage) for _, err = range errs { log.Printf("[GCP] Error: %v", err) } for _, cacheObject := range cacheObjects { log.Printf("[GCP] 🧹 Deleted image import job file %s", cacheObject) } // Try to find the potentially uploaded Storage objects using custom metadata objects, err := g.StorageListObjectsByMetadata(ctx, GCPBucket, map[string]string{gcp.MetadataKeyImageName: GCPImage}) if err != nil { log.Printf("[GCP] Error: %v", err) } for _, obj := range objects { if err = g.StorageObjectDelete(ctx, obj.Bucket, obj.Name); err != nil { log.Printf("[GCP] Error: %v", err) } log.Printf("[GCP] 🧹 Deleted object %s/%s related to build of image %s", obj.Bucket, obj.Name, GCPImage) } // Try to delete the imported image log.Printf("[GCP] 🧹 Deleting image %s. This should fail if the test succeeded.", GCPImage) err = g.ComputeImageDelete(ctx, GCPImage) if err != nil { log.Printf("[GCP] Error: %v", err) } } func cleanupAzure(testID string, wg *sync.WaitGroup) { defer wg.Done() log.Println("[Azure] Running clean up") // Load Azure credentials creds, err := azuretest.GetAzureCredentialsFromEnv() if err != nil { log.Printf("[Azure] Error: %v", err) return } if creds == nil { log.Println("[Azure] Error: empty credentials") return } // Delete the vhd image imageName := "image-" + testID + ".vhd" log.Println("[Azure] Deleting image. This should fail if the test succeeded.") err = azuretest.DeleteImageFromAzure(creds, imageName) if err != nil { log.Printf("[Azure] Error: %v", err) } // Delete all remaining resources (see the full list in the CleanUpBootedVM function) log.Println("[Azure] Cleaning up booted VM. This should fail if the test succeeded.") parameters := azuretest.NewDeploymentParameters(creds, imageName, testID, "") clientCredentialsConfig := auth.NewClientCredentialsConfig(creds.ClientID, creds.ClientSecret, creds.TenantID) authorizer, err := clientCredentialsConfig.Authorizer() if err != nil { log.Printf("[Azure] Error: %v", err) return } err = azuretest.CleanUpBootedVM(creds, parameters, authorizer, testID) if err != nil { log.Printf("[Azure] Error: %v", err) } } func main() { log.Println("Running a cloud cleanup") // Get test ID testID, err := test.GenerateCIArtifactName("") if err != nil { log.Fatalf("Failed to get testID: %v", err) } log.Printf("TEST_ID=%s", testID) var wg sync.WaitGroup wg.Add(2) go cleanupAzure(testID, &wg) go cleanupGCP(testID, &wg) wg.Wait() }
package rotationfile import ( // "fmt" "os" "strings" "sync" "time" ) type Rotator struct { baseFileName string currentFileName string internalFile *os.File rotationByTime int nextRotationTime int64 fileLock sync.Mutex } func (this *Rotator) GetCurrentFileName() string { return this.currentFileName } const ( NoRotation = iota MinutelyRotation /* just for test */ HourlyRotation DailyRotation ) func GetTimeFormat(rotationByTime int) string { switch rotationByTime { default: fallthrough case NoRotation: return "" case MinutelyRotation: return "20060102-1504" case HourlyRotation: return "20060102-15" case DailyRotation: return "20060102" } } func (this *Rotator) createSymLink(currentName string) error { linkName := this.baseFileName if info, err := os.Lstat(linkName); !os.IsNotExist(err) { if info != nil && (info.Mode()&os.ModeSymlink == os.ModeSymlink) { // remove old link file os.Remove(linkName) } else { // link exist but not symlink, use some alter name as linkname linkName += ".alt" return nil } } if info, err := os.Lstat(currentName); err == nil { if info.Mode().IsRegular() { os.Symlink(currentName, linkName) } } return nil } func (this *Rotator) switchFile(now time.Time) error { if this.rotationByTime != NoRotation { logFileName := this.baseFileName logFileName += "." + now.Format(GetTimeFormat(this.rotationByTime)) // fmt.Println("next log-name will be", logFileName, ".") switch this.rotationByTime { default: break case MinutelyRotation: this.nextRotationTime = now.Add(time.Minute).Add(-time.Duration(now.Second()) * time.Second).Unix() case HourlyRotation: this.nextRotationTime = now.Add(time.Hour).Add(-time.Duration(now.Second()+now.Minute()*60) * time.Second).Unix() case DailyRotation: this.nextRotationTime = now.Add(24 * time.Hour).Add(-time.Duration(now.Hour()*3600+now.Minute()*60+now.Second()) * time.Second).Unix() } // fmt.Println("next rotation time-point will be", this.nextRotationTime, " vs now ", now.Unix(), ".") logFile, err := os.OpenFile(logFileName, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666) if err == nil { this.currentFileName = logFileName this.internalFile = logFile this.createSymLink(logFileName) // fmt.Println("file swapped.") } return err } return nil } func (this *Rotator) Create(name string, rotationByTime int) { if name[len(name)-1] == '\\' || name[len(name)-1] == '/' { this.baseFileName = name + "default.log" } else { this.baseFileName = name } if strings.LastIndexAny(name, "\\/") != -1 { os.MkdirAll(name[0:strings.LastIndexAny(name, "\\/")], 0766) } // fmt.Println("name:", name) this.rotationByTime = rotationByTime now := time.Now() this.switchFile(now) } func (this *Rotator) Write(p []byte) (n int, err error) { now := time.Now() if now.Unix() >= this.nextRotationTime { this.fileLock.Lock() defer this.fileLock.Unlock() if now.Unix() >= this.nextRotationTime { if err := this.switchFile(now); err != nil { return 0, err } } } return this.internalFile.Write(p) } func (this *Rotator) Close() { this.internalFile.Close() }
// Copyright 2021 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "context" "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" computepb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/compute/compute_go_proto" emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto" "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/compute" ) // Server implements the gRPC interface for NetworkEndpointGroup. type NetworkEndpointGroupServer struct{} // ProtoToNetworkEndpointGroupNetworkEndpointTypeEnum converts a NetworkEndpointGroupNetworkEndpointTypeEnum enum from its proto representation. func ProtoToComputeNetworkEndpointGroupNetworkEndpointTypeEnum(e computepb.ComputeNetworkEndpointGroupNetworkEndpointTypeEnum) *compute.NetworkEndpointGroupNetworkEndpointTypeEnum { if e == 0 { return nil } if n, ok := computepb.ComputeNetworkEndpointGroupNetworkEndpointTypeEnum_name[int32(e)]; ok { e := compute.NetworkEndpointGroupNetworkEndpointTypeEnum(n[len("ComputeNetworkEndpointGroupNetworkEndpointTypeEnum"):]) return &e } return nil } // ProtoToNetworkEndpointGroupCloudRun converts a NetworkEndpointGroupCloudRun resource from its proto representation. func ProtoToComputeNetworkEndpointGroupCloudRun(p *computepb.ComputeNetworkEndpointGroupCloudRun) *compute.NetworkEndpointGroupCloudRun { if p == nil { return nil } obj := &compute.NetworkEndpointGroupCloudRun{ Service: dcl.StringOrNil(p.Service), Tag: dcl.StringOrNil(p.Tag), UrlMask: dcl.StringOrNil(p.UrlMask), } return obj } // ProtoToNetworkEndpointGroupAppEngine converts a NetworkEndpointGroupAppEngine resource from its proto representation. func ProtoToComputeNetworkEndpointGroupAppEngine(p *computepb.ComputeNetworkEndpointGroupAppEngine) *compute.NetworkEndpointGroupAppEngine { if p == nil { return nil } obj := &compute.NetworkEndpointGroupAppEngine{ Service: dcl.StringOrNil(p.Service), Version: dcl.StringOrNil(p.Version), UrlMask: dcl.StringOrNil(p.UrlMask), } return obj } // ProtoToNetworkEndpointGroupCloudFunction converts a NetworkEndpointGroupCloudFunction resource from its proto representation. func ProtoToComputeNetworkEndpointGroupCloudFunction(p *computepb.ComputeNetworkEndpointGroupCloudFunction) *compute.NetworkEndpointGroupCloudFunction { if p == nil { return nil } obj := &compute.NetworkEndpointGroupCloudFunction{ Function: dcl.StringOrNil(p.Function), UrlMask: dcl.StringOrNil(p.UrlMask), } return obj } // ProtoToNetworkEndpointGroup converts a NetworkEndpointGroup resource from its proto representation. func ProtoToNetworkEndpointGroup(p *computepb.ComputeNetworkEndpointGroup) *compute.NetworkEndpointGroup { obj := &compute.NetworkEndpointGroup{ Id: dcl.Int64OrNil(p.Id), SelfLink: dcl.StringOrNil(p.SelfLink), SelfLinkWithId: dcl.StringOrNil(p.SelfLinkWithId), Name: dcl.StringOrNil(p.Name), Description: dcl.StringOrNil(p.Description), NetworkEndpointType: ProtoToComputeNetworkEndpointGroupNetworkEndpointTypeEnum(p.GetNetworkEndpointType()), Size: dcl.Int64OrNil(p.Size), Location: dcl.StringOrNil(p.Location), Network: dcl.StringOrNil(p.Network), Subnetwork: dcl.StringOrNil(p.Subnetwork), DefaultPort: dcl.Int64OrNil(p.DefaultPort), CloudRun: ProtoToComputeNetworkEndpointGroupCloudRun(p.GetCloudRun()), AppEngine: ProtoToComputeNetworkEndpointGroupAppEngine(p.GetAppEngine()), CloudFunction: ProtoToComputeNetworkEndpointGroupCloudFunction(p.GetCloudFunction()), Project: dcl.StringOrNil(p.Project), } return obj } // NetworkEndpointGroupNetworkEndpointTypeEnumToProto converts a NetworkEndpointGroupNetworkEndpointTypeEnum enum to its proto representation. func ComputeNetworkEndpointGroupNetworkEndpointTypeEnumToProto(e *compute.NetworkEndpointGroupNetworkEndpointTypeEnum) computepb.ComputeNetworkEndpointGroupNetworkEndpointTypeEnum { if e == nil { return computepb.ComputeNetworkEndpointGroupNetworkEndpointTypeEnum(0) } if v, ok := computepb.ComputeNetworkEndpointGroupNetworkEndpointTypeEnum_value["NetworkEndpointGroupNetworkEndpointTypeEnum"+string(*e)]; ok { return computepb.ComputeNetworkEndpointGroupNetworkEndpointTypeEnum(v) } return computepb.ComputeNetworkEndpointGroupNetworkEndpointTypeEnum(0) } // NetworkEndpointGroupCloudRunToProto converts a NetworkEndpointGroupCloudRun resource to its proto representation. func ComputeNetworkEndpointGroupCloudRunToProto(o *compute.NetworkEndpointGroupCloudRun) *computepb.ComputeNetworkEndpointGroupCloudRun { if o == nil { return nil } p := &computepb.ComputeNetworkEndpointGroupCloudRun{ Service: dcl.ValueOrEmptyString(o.Service), Tag: dcl.ValueOrEmptyString(o.Tag), UrlMask: dcl.ValueOrEmptyString(o.UrlMask), } return p } // NetworkEndpointGroupAppEngineToProto converts a NetworkEndpointGroupAppEngine resource to its proto representation. func ComputeNetworkEndpointGroupAppEngineToProto(o *compute.NetworkEndpointGroupAppEngine) *computepb.ComputeNetworkEndpointGroupAppEngine { if o == nil { return nil } p := &computepb.ComputeNetworkEndpointGroupAppEngine{ Service: dcl.ValueOrEmptyString(o.Service), Version: dcl.ValueOrEmptyString(o.Version), UrlMask: dcl.ValueOrEmptyString(o.UrlMask), } return p } // NetworkEndpointGroupCloudFunctionToProto converts a NetworkEndpointGroupCloudFunction resource to its proto representation. func ComputeNetworkEndpointGroupCloudFunctionToProto(o *compute.NetworkEndpointGroupCloudFunction) *computepb.ComputeNetworkEndpointGroupCloudFunction { if o == nil { return nil } p := &computepb.ComputeNetworkEndpointGroupCloudFunction{ Function: dcl.ValueOrEmptyString(o.Function), UrlMask: dcl.ValueOrEmptyString(o.UrlMask), } return p } // NetworkEndpointGroupToProto converts a NetworkEndpointGroup resource to its proto representation. func NetworkEndpointGroupToProto(resource *compute.NetworkEndpointGroup) *computepb.ComputeNetworkEndpointGroup { p := &computepb.ComputeNetworkEndpointGroup{ Id: dcl.ValueOrEmptyInt64(resource.Id), SelfLink: dcl.ValueOrEmptyString(resource.SelfLink), SelfLinkWithId: dcl.ValueOrEmptyString(resource.SelfLinkWithId), Name: dcl.ValueOrEmptyString(resource.Name), Description: dcl.ValueOrEmptyString(resource.Description), NetworkEndpointType: ComputeNetworkEndpointGroupNetworkEndpointTypeEnumToProto(resource.NetworkEndpointType), Size: dcl.ValueOrEmptyInt64(resource.Size), Location: dcl.ValueOrEmptyString(resource.Location), Network: dcl.ValueOrEmptyString(resource.Network), Subnetwork: dcl.ValueOrEmptyString(resource.Subnetwork), DefaultPort: dcl.ValueOrEmptyInt64(resource.DefaultPort), CloudRun: ComputeNetworkEndpointGroupCloudRunToProto(resource.CloudRun), AppEngine: ComputeNetworkEndpointGroupAppEngineToProto(resource.AppEngine), CloudFunction: ComputeNetworkEndpointGroupCloudFunctionToProto(resource.CloudFunction), Project: dcl.ValueOrEmptyString(resource.Project), } return p } // ApplyNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroup Apply() method. func (s *NetworkEndpointGroupServer) applyNetworkEndpointGroup(ctx context.Context, c *compute.Client, request *computepb.ApplyComputeNetworkEndpointGroupRequest) (*computepb.ComputeNetworkEndpointGroup, error) { p := ProtoToNetworkEndpointGroup(request.GetResource()) res, err := c.ApplyNetworkEndpointGroup(ctx, p) if err != nil { return nil, err } r := NetworkEndpointGroupToProto(res) return r, nil } // ApplyNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroup Apply() method. func (s *NetworkEndpointGroupServer) ApplyComputeNetworkEndpointGroup(ctx context.Context, request *computepb.ApplyComputeNetworkEndpointGroupRequest) (*computepb.ComputeNetworkEndpointGroup, error) { cl, err := createConfigNetworkEndpointGroup(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return s.applyNetworkEndpointGroup(ctx, cl, request) } // DeleteNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroup Delete() method. func (s *NetworkEndpointGroupServer) DeleteComputeNetworkEndpointGroup(ctx context.Context, request *computepb.DeleteComputeNetworkEndpointGroupRequest) (*emptypb.Empty, error) { cl, err := createConfigNetworkEndpointGroup(ctx, request.ServiceAccountFile) if err != nil { return nil, err } return &emptypb.Empty{}, cl.DeleteNetworkEndpointGroup(ctx, ProtoToNetworkEndpointGroup(request.GetResource())) } // ListComputeNetworkEndpointGroup handles the gRPC request by passing it to the underlying NetworkEndpointGroupList() method. func (s *NetworkEndpointGroupServer) ListComputeNetworkEndpointGroup(ctx context.Context, request *computepb.ListComputeNetworkEndpointGroupRequest) (*computepb.ListComputeNetworkEndpointGroupResponse, error) { cl, err := createConfigNetworkEndpointGroup(ctx, request.ServiceAccountFile) if err != nil { return nil, err } resources, err := cl.ListNetworkEndpointGroup(ctx, request.Project, request.Location) if err != nil { return nil, err } var protos []*computepb.ComputeNetworkEndpointGroup for _, r := range resources.Items { rp := NetworkEndpointGroupToProto(r) protos = append(protos, rp) } return &computepb.ListComputeNetworkEndpointGroupResponse{Items: protos}, nil } func createConfigNetworkEndpointGroup(ctx context.Context, service_account_file string) (*compute.Client, error) { conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file)) return compute.NewClient(conf), nil }
package services import ( "context" "encoding/json" "errors" "fmt" "goChat/Server/db" "goChat/Server/models" "goChat/Server/utils" "log" "net/http" "strings" "time" "github.com/dgrijalva/jwt-go" ) const signingKey = "APIKeyWHICHISNotTOOSECRET" var keyGetter = func(t *jwt.Token) (interface{}, error) { return []byte(signingKey), nil } // TokenClaims - TokenClaims type TokenClaims struct { UserName string `json:"userName"` jwt.StandardClaims } // AuthService - AuthService type AuthService struct { userRepository db.IUserRepository } // Token - Token type Token struct { AccessToken string `json:"access_token"` } // LoginResponse - LoginResponse type LoginResponse struct { UserID string `json:"userID"` AccessToken string `json:"accessToken"` FirstName string `json:"firstName"` LastName string `json:"lastName"` NickName string `json:"nickName"` } // NewAuthService - create new instance of auth service func NewAuthService(userRepo db.IUserRepository) *AuthService { authService := AuthService{ userRepository: userRepo, } return &authService } // AuthenticateHandler - handler function for authentication func (auth *AuthService) AuthenticateHandler(w http.ResponseWriter, r *http.Request) { var userCredential models.UserCredential _ = json.NewDecoder(r.Body).Decode(&userCredential) user, err := auth.userRepository.GetUserByEmail(userCredential.UserName) if err != nil { msg := fmt.Sprintf("The provided user name: %s cannot be found", userCredential.UserName) utils.JSONUnAuthorizedResponse(w, msg) log.Printf(msg) return } pwdMatch := utils.CheckPasswordHash(userCredential.Password, user.PasswordHashed) if !pwdMatch { log.Printf("The provided password does not match") utils.JSONUnAuthorizedResponse(w, "Invalid credentials") return } token, err := generateToken(user) if err != nil { utils.JSONInternalServerErrorResponse(w, fmt.Sprintf("Error while creating token: %s", err)) log.Printf("Error while creating token: %s", err) return } loginResponse := LoginResponse{ AccessToken: token, FirstName: user.FirstName, LastName: user.LastName, NickName: user.NickName, UserID: user.ID, } if err != nil { w.WriteHeader(http.StatusInternalServerError) log.Printf("Error while converting token to json object") return } utils.JSONSuccessResponse(w, loginResponse) } // AuthenticationMiddleware - middleware function that authenticates the request using JWT func (auth *AuthService) AuthenticationMiddleware(next http.HandlerFunc) http.HandlerFunc { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { token, err := auth.validateRequest(r) if err != nil { http.Error(w, "Invalid auth token", http.StatusUnauthorized) return } user, err := auth.parseUserFromRequest(token) if err != nil { http.Error(w, "Unable to get user information", http.StatusInternalServerError) return } rWithContext := r.WithContext(context.WithValue(r.Context(), utils.RequestContextKeyUser, user)) *r = *rWithContext next(w, r) }) } func (auth *AuthService) validateRequest(r *http.Request) (*jwt.Token, error) { tokenString, err := extractTokenFromHeader(r) if err != nil { return &jwt.Token{}, fmt.Errorf("error while extracting auth token: %s", err) } if tokenString == "" { return &jwt.Token{}, fmt.Errorf("no authentication token found") } token, err := jwt.Parse(tokenString, keyGetter) if err != nil { return &jwt.Token{}, fmt.Errorf("error while validating auth token") } if !token.Valid { return &jwt.Token{}, fmt.Errorf("invalid token") } return token, nil } func (auth *AuthService) parseUserFromRequest(token *jwt.Token) (models.User, error) { user := models.User{} claims, ok := token.Claims.(*TokenClaims) if !ok { return user, fmt.Errorf("unable to process claims") } user, err := auth.userRepository.GetUserByEmail(claims.UserName) if err != nil { return user, fmt.Errorf("unable to find user given in token. Error: %s", err) } return user, nil } func extractTokenFromHeader(r *http.Request) (string, error) { authHeader := r.Header.Get("Authorization") if authHeader == "" { return "", nil } headerParts := strings.Split(authHeader, " ") if len(headerParts) != 2 && strings.ToUpper(headerParts[0]) != "BEARER" { return "", errors.New("authorization header not in correct format") } return headerParts[1], nil } func generateToken(user models.User) (string, error) { claims := TokenClaims{ user.Email, jwt.StandardClaims{ ExpiresAt: time.Now().Add(time.Hour * time.Duration(2)).Unix(), IssuedAt: time.Now().Unix(), }, } token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims) return token.SignedString([]byte(signingKey)) }
/* FOR EXPERIMENTATION ONLY the os pkg (and even os/exec) are better suited for low-level stuff, in comparison to Lua's os library */ package main import ( "os/exec" "io" //"os" //"encoding/json" //"strings" "fmt" ) func PathCheck(bin ...string) bool { valid := false for _, k := range bin { if dir, err := exec.LookPath(k) ; err == nil { valid = true fmt.Println(dir) } } return valid } // output of first cmd is input to last cmd func PipeStatusBar(outof *exec.Cmd, into *exec.Cmd) { if output, err := outof.Output() ; err == nil { outof.Start() if input, er := into.StdinPipe() ; er == nil { go func() { defer input.Close() io.WriteString(input, string(output)) }() into.Run() into.Wait() } } } func main() { if PathCheck("lemonbar", "slstatus") { fmt.Println("no error reported") } sl := exec.Command("slstatus", "-s") lemon := exec.Command("lemonbar") PipeStatusBar(sl, lemon) } /*func ConfigValues(fpath string) [][]string { f, _ := os.Open(fpath) defer f.Close() jdec := json.NewDecoder(strings.NewReader(f)) }*/
package router import ( "net/http" "strconv" "github.com/mrap/combo/functions/api/models" "github.com/gin-gonic/gin" eztemplate "github.com/michelloworld/ez-gin-template" ) func NewRouter() *gin.Engine { router := gin.New() render := eztemplate.New() render.TemplatesDir = "templates/" render.Ext = ".tmpl" router.HTMLRender = render.Init() router.Static("/public", "build/client/assets") router.GET("/", getIndex) router.GET("/combos", getCombos) return router } func getIndex(c *gin.Context) { c.HTML(http.StatusOK, "app/index", gin.H{ "title": "Home", }) } type CombosRes struct { Combos []string `json:"combos"` } func getCombos(c *gin.Context) { // TODO: handle bad params chars := c.Query("chars") count, _ := strconv.Atoi(c.Query("count")) minLen, _ := strconv.Atoi(c.Query("min_len")) maxLen, _ := strconv.Atoi(c.Query("max_len")) comboList := models.GenerateCombos(chars, count, minLen, maxLen) c.JSON(http.StatusOK, CombosRes{comboList}) }
package car import ( "testing" "github.com/stretchr/testify/assert" ) func TestCar(t *testing.T) { car := New(&Config{}) assert.NotNil(t, car) }
package roce import ( "errors" "fmt" "math" "runtime/debug" "strings" "sync" "time" "github.com/Huawei/eSDK_K8S_Plugin/src/connector" "github.com/Huawei/eSDK_K8S_Plugin/src/utils" "github.com/Huawei/eSDK_K8S_Plugin/src/utils/log" ) type connectorInfo struct { tgtPortals []string tgtLunGuids []string } type shareData struct { stopConnecting bool numLogin int64 failedLogin int64 stoppedThreads int64 foundDevices []string findDeviceMap map[string]string } func getNVMeInfo(connectionProperties map[string]interface{}) (*connectorInfo, error) { var con connectorInfo tgtPortals, portalExist := connectionProperties["tgtPortals"].([]string) if !portalExist { msg := "there are no target portals in the connection info" log.Errorln(msg) return nil, errors.New(msg) } tgtLunGuids, lunGuidExist := connectionProperties["tgtLunGuids"].([]string) if !lunGuidExist { msg := "there are no target lun guid in the connection info" log.Errorln(msg) return nil, errors.New(msg) } if tgtLunGuids == nil || len(tgtPortals) != len(tgtLunGuids) { msg := "the num of tgtPortals and num of tgtLunGuids is not equal" log.Errorln(msg) return nil, errors.New(msg) } con.tgtPortals = tgtPortals con.tgtLunGuids = tgtLunGuids return &con, nil } func buildNVMeSession(allSessions, tgtPortal string) (string, error) { output, err := utils.ExecShellCmd("nvme discover -t rdma -a %s", tgtPortal) if err != nil { log.Errorf("Cannot discover nvme target %s, reason: %v", tgtPortal, output) return "", err } var tgtNqn string lines := strings.Split(output, "\n") for _, line := range lines { if strings.Contains(line, "subnqn") { splits := strings.SplitN(line, ":", 2) if len(splits) == 2 && splits[0] == "subnqn" { tgtNqn = strings.Trim(splits[1], " ") break } } } if strings.Contains(allSessions, tgtPortal) { log.Infof("RoCE target %s has already login, no need login again", tgtPortal) return tgtNqn, nil } else { output, err = utils.ExecShellCmd("nvme connect -t rdma -a %s -n %s", tgtPortal, tgtNqn) if err != nil { log.Errorf("Cannot login nvme target %s, reason: %v", tgtPortal, output) return "", err } } return tgtNqn, nil } func singleConnectVolume(allSessions, tgtPortal, tgtLunGuid string, nvmeShareData *shareData) { var device string tgtNqn, err := buildNVMeSession(allSessions, tgtPortal) if err != nil { log.Errorf("build nvme session %s error, reason: %v", tgtPortal, err) nvmeShareData.failedLogin += 1 } else { nvmeShareData.numLogin += 1 connectInfo := map[string]interface{}{ "protocol": "iscsi", "targetNqn": tgtNqn, } for i := 1; i < 4; i++ { connector.ScanNVMe(connectInfo) device, err = connector.GetDevice(nvmeShareData.findDeviceMap, tgtLunGuid) if err != nil { log.Errorf("Get device of guid %s error: %v", tgtLunGuid, err) break } if device != "" { break } if !nvmeShareData.stopConnecting { time.Sleep(time.Second * time.Duration(math.Pow(2, float64(i)))) } else { break } } if device != "" { nvmeShareData.foundDevices = append(nvmeShareData.foundDevices, device) if nvmeShareData.findDeviceMap == nil { nvmeShareData.findDeviceMap = map[string]string{ device: device, } } else { nvmeShareData.findDeviceMap[device] = device } } } nvmeShareData.stoppedThreads += 1 return } func findMultiPath(tgtLunWWN string) (string, error) { output, err := utils.ExecShellCmd("multipath -l | grep %s", tgtLunWWN) if err != nil { if strings.Contains(output, "command not found") { msg := fmt.Sprintf("run cmd multipath -l error, error: %s", output) log.Errorln(msg) return "", errors.New(msg) } return "", err } var mPath string if output != "" { multiLines := strings.Split(output, " ") for _, line := range multiLines { if strings.HasPrefix(line, "dm") { mPath = line break } } } return mPath, nil } func findTgtMultiPath(lenIndex int, nvmeShareData *shareData, conn *connectorInfo) string { var mPath string var lastTryOn int64 for { if (int64(lenIndex) == nvmeShareData.stoppedThreads && nvmeShareData.foundDevices == nil) || (mPath != "" && int64(lenIndex) == nvmeShareData.numLogin+nvmeShareData.failedLogin) { break } mPath, err := findMultiPath(conn.tgtLunGuids[0]) if err != nil { log.Warningf("Can not find dm path, error: %s", err) } if mPath != "" { return mPath } if lastTryOn == 0 && nvmeShareData.foundDevices != nil && int64(lenIndex) == nvmeShareData.stoppedThreads { log.Infoln("All connection threads finished, giving 15 seconds for dm to appear.") lastTryOn = time.Now().Unix() + 15 } else if lastTryOn != 0 && lastTryOn < time.Now().Unix() { break } time.Sleep(1 * time.Second) } return "" } func tryConnectVolume(connMap map[string]interface{}) (string, error) { conn, err := getNVMeInfo(connMap) if err != nil { return "", err } allSessions, err := utils.ExecShellCmd("nvme list-subsys") if err != nil { return "", err } var mPath string var wait sync.WaitGroup var nvmeShareData = new(shareData) lenIndex := len(conn.tgtPortals) for index := 0; index < lenIndex; index++ { tgtPortal := conn.tgtPortals[index] tgtLunGuid := conn.tgtLunGuids[index] wait.Add(1) go func() { defer func() { wait.Done() if r := recover(); r != nil { log.Errorf("Runtime error caught in loop routine: %v", r) log.Errorf("%s", debug.Stack()) } log.Flush() }() singleConnectVolume(allSessions, tgtPortal, tgtLunGuid, nvmeShareData) }() } if lenIndex > 1 && mPath == "" { mPath = findTgtMultiPath(lenIndex, nvmeShareData, conn) } nvmeShareData.stopConnecting = true wait.Wait() if mPath != "" { mPath = fmt.Sprintf("/dev/%s", mPath) log.Infof("Found the dm path %s", mPath) return mPath, nil } else { log.Infoln("no dm was created, connection to volume is probably bad and will perform poorly") } if nvmeShareData.foundDevices != nil { dev := fmt.Sprintf("/dev/%s", nvmeShareData.foundDevices[0]) log.Infof("find the dev %s", nvmeShareData.foundDevices[0]) return dev, nil } msg := fmt.Sprintf("volume device not found, lun is %s", conn.tgtLunGuids[0]) log.Errorln(msg) return "", errors.New(msg) }
package posnode import ( "github.com/Fantom-foundation/go-lachesis/src/metrics" ) var ( countNodePeersTop = metrics.RegisterCounter("count_node_peers_top", nil) )
package stitchApi // List of types (eg s3, snowflake) GET /v4/destination-types // Get type details (eg Redshift) GET /v4/destination-types/{destination_type}
package message import ( "encoding/json" "fmt" "io/ioutil" "path/filepath" ) // Config contains the unmarshalled config.json type Config struct { Username string `json:"username"` URL string `json:"url"` Secrets struct { ClientID string `json:"client_id"` ClientSecret string `json:"client_secret"` VToken string `json:"verification_token"` APIToken string `json:"api_token"` } `json:"secrets"` } // LoadConfig unmarshals config.json into a Config func LoadConfig() Config { filepath, _ := filepath.Abs("./config.json") raw, err := ioutil.ReadFile(filepath) if err != nil { fmt.Println(err) } var c Config json.Unmarshal(raw, &c) return c }
package main import ( "flag" "fmt" "os" "path/filepath" ) const ( majorVersion = 3 minorVersion = 0 patchVersion = 1 CONFIG_XML = "config.xml" CONFIG_JSON = "config.json" CONFIG_CACHE = "__cache__" ) var flagConfigFile string var flagOutputFile string var flagLegacy bool func init() { flag.StringVar(&flagConfigFile, "f", filepath.Join(home, "config.xml"), "config file") flag.StringVar(&flagOutputFile, "o", "", "output file") flag.BoolVar(&flagLegacy, "legacy", false, "use legacy (2.0) mode") } func usage() { version(os.Stderr) fmt.Fprintf(os.Stderr, "usage: passwdgen [CMD] site\n") fmt.Fprintf(os.Stderr, " or: passwdgen -legacy site\n\n") flag.PrintDefaults() fmt.Fprintln(os.Stderr, ` CMDs: make: generate password init: initialize home add: add site to working cache remove: delete site from working cache save: marshal cache to json file load: unmarshal json file to cache list: list all sites `) os.Exit(1) } func version(f *os.File) { fmt.Fprintf(f, "passwdgen version %d.%d.%d\n", majorVersion, minorVersion, patchVersion) } func main() { flag.Usage = usage flag.Parse() if flag.NArg() > 0 { var global_passwd string if flagLegacy { conf, err := NewPwdConfigLegacy(flagConfigFile) if err != nil { fmt.Println(err) return } if global_passwd, err = DefaultPassReader.ReadPassword(); err != nil || len(global_passwd) == 0 { usage() } fmt.Println() conf.GenAllPassword(global_passwd) if flag.NArg() > 0 { conf.SavePassword(flagOutputFile, flag.Arg(0)) } else { fmt.Println(conf) } return } switch flag.Arg(0) { case "version": version(os.Stdout) return case "init": mkConfigDir() //create CONFIG_JSON return case "conv", "convert": case "add": case "remove", "delete": case "list": conf, err := NewPwdConfigLegacy(flagConfigFile) if err != nil { fmt.Println(err) return } sites := conf.Convert().Cache for i, _ := range sites { fmt.Println(sites[i].Name, sites[i].URL) } case "make", "gen", "generate": conf, err := NewPwdConfigLegacy(flagConfigFile) if err != nil { fmt.Println(err) return } if global_passwd, err = DefaultPassReader.ReadPassword(); err != nil { fmt.Println(err) return } fmt.Println() err = conf.Convert().Make(global_passwd, flag.Arg(1)) if err != nil { fmt.Println(err) } default: fmt.Printf("Unrecognised command: %s\n", flag.Arg(0)) return } } else { usage() } }
package api import ( "GOLANG/entities" "GOLANG/models" "encoding/json" "math/rand" "net/http" ) func HashUrl(response http.ResponseWriter, request *http.Request) { urls, ok1 := request.URL.Query()["url"] userNames, ok2 := request.URL.Query()["username"] if !ok1 || !ok2 || len(urls) < 1 || len(userNames) < 1 { responseWithError(response, http.StatusBadRequest, "Url Param id is missing") return } userNameHash := models.HashString(userNames[0]) urlHash := models.HashString(urls[0] + userNameHash + string(rand.Intn(1000000))) shortenUrl := models.EncodeString(urlHash) shorten := "http://misa/" + shortenUrl[:6] responseWithJSON(response, http.StatusOK, shorten) } func FindUser(response http.ResponseWriter, request *http.Request) { ids, ok := request.URL.Query()["id"] if !ok || len(ids) < 1 { responseWithError(response, http.StatusBadRequest, "Url Param id is missing") return } user, err := models.FindUser(ids[0]) if err != nil { responseWithError(response, http.StatusBadRequest, err.Error()) return } responseWithJSON(response, http.StatusOK, user) } func GetAll(response http.ResponseWriter, request *http.Request) { users := models.GetAllUser() responseWithJSON(response, http.StatusOK, users) } func CreateUser(response http.ResponseWriter, request *http.Request) { var user entities.User err := json.NewDecoder(request.Body).Decode(&user) // Decoder(*variable) đọc dữ liệu JSON và lưu vào user, truyền tham chiếu => truyền vào địa chỉ của biến; Encoder() ghi dữ liệu JSON if err != nil { responseWithError(response, http.StatusBadRequest, err.Error()) } else { result := models.CreateUser(&user) if !result { responseWithError(response, http.StatusBadRequest, "Could not create user") return } responseWithJSON(response, http.StatusOK, user) } } func UpdateUser(response http.ResponseWriter, request *http.Request) { var user entities.User err := json.NewDecoder(request.Body).Decode(&user) if err != nil { responseWithError(response, http.StatusBadRequest, err.Error()) } else { result := models.UpdateUser(&user) if !result { responseWithError(response, http.StatusBadRequest, "Could not update user") return } responseWithJSON(response, http.StatusOK, "Update user successfully") } } func Delete(response http.ResponseWriter, request *http.Request) { ids, ok := request.URL.Query()["id"] if !ok || len(ids) < 1 { responseWithError(response, http.StatusBadRequest, "Url Param id is missing") return } result := models.DeleteUser(ids[0]) if !result { responseWithError(response, http.StatusBadRequest, "Could not delete user") return } responseWithJSON(response, http.StatusOK, "Delete user successfully") } func responseWithError(response http.ResponseWriter, statusCode int, msg string) { responseWithJSON(response, statusCode, map[string]string{ "error": msg, }) } func responseWithJSON(response http.ResponseWriter, statusCode int, data interface{}) { result, _ := json.Marshal(data) response.Header().Set("Content-Type", "application/json") response.WriteHeader(statusCode) response.Write(result) }
// Copyright 2017 Jeff Foley. All rights reserved. // Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. // +build windows package main import ( "os" "os/signal" "syscall" "github.com/OWASP/Amass/amass" ) // If the user interrupts the program, print the summary information func signalHandler(e *amass.Enumeration) { quit := make(chan os.Signal, 1) signal.Notify(quit, os.Interrupt, syscall.SIGTERM) <-quit // Start final output operations close(e.Done) <-finished os.Exit(1) }
/* Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package common import ( "context" "errors" "testing" mf "github.com/manifestival/manifestival" "go.uber.org/zap" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" util "knative.dev/operator/pkg/reconciler/common/testing" kubeclient "knative.dev/pkg/client/injection/kube/client" _ "knative.dev/pkg/client/injection/kube/client/fake" "knative.dev/pkg/injection" "knative.dev/pkg/logging" "knative.dev/pkg/logging/logkey" ) var ( platform Platforms platformErr Platforms ) func TestTransformers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cfg := &rest.Config{} ctx, _ = injection.Fake.SetupInformers(ctx, cfg) logger := logging.FromContext(ctx). Named("test-controller"). With(zap.String(logkey.ControllerType, "test-controller")) results, err := platform.Transformers(kubeclient.Get(ctx), logger) util.AssertEqual(t, err, nil) util.AssertEqual(t, len(results), 0) platform = append(platform, fakePlatform) results, err = platform.Transformers(kubeclient.Get(ctx), logger) util.AssertEqual(t, err, nil) util.AssertEqual(t, len(results), 1) platformErr = append(platformErr, fakePlatformErr) results, err = platformErr.Transformers(kubeclient.Get(ctx), logger) util.AssertEqual(t, err.Error(), "Test Error") util.AssertEqual(t, len(results), 0) } func fakePlatformErr(kubeClient kubernetes.Interface, logger *zap.SugaredLogger) (mf.Transformer, error) { return fakeTransformer(), errors.New("Test Error") } func fakePlatform(kubeClient kubernetes.Interface, logger *zap.SugaredLogger) (mf.Transformer, error) { return fakeTransformer(), nil } func fakeTransformer() mf.Transformer { return func(u *unstructured.Unstructured) error { return nil } }
package aoc2015 import ( "testing" aoc "github.com/janreggie/aoc/internal" "github.com/stretchr/testify/assert" ) func Test_newPassword(t *testing.T) { assert := assert.New(t) testCases := []struct { input string want string }{ {"heqaabcc", "heqaabcc"}, {"heqa", "aaaaheqa"}, {"abcdefgH", "abcdefga"}, {"abcde~Gh", "abcdezah"}, {"posdfidpoi", "posdfidp"}, } for _, tt := range testCases { assert.Equal(tt.want, newPassword(tt.input).string(), tt.input) } } func Test_increment(t *testing.T) { assert := assert.New(t) testCases := []struct { input string want string }{ {"kayatazy", "kayatazz"}, {"kayatazz", "kayatbaa"}, {"zzzzzzzz", "aaaaaaaa"}, } for _, tt := range testCases { ps := newPassword(tt.input) ps.increment() assert.Equal(tt.want, ps.string(), tt.input) } } func TestDay11(t *testing.T) { assert := assert.New(t) testCases := []aoc.TestCase{ {Input: "abcdefgh", Result1: "abcdffaa"}, {Input: "ghijklmn", Result1: "ghjaabcc"}, {Details: "Y2015D11 my input", Input: day11myInput, Result1: "hepxxyzz", Result2: "heqaabcc"}, } for _, tt := range testCases { tt.Test(Day11, assert) } } func BenchmarkDay11(b *testing.B) { aoc.Benchmark(Day11, b, day11myInput) }
package main import ( "bufio" "context" "fmt" "os" "strings" pb "github.com/AndreaEsposit/bachelors-thesis/echo_server/proto" "google.golang.org/grpc" ) func main() { conn, err := grpc.Dial("localhost:50051", grpc.WithInsecure()) check(err) client := pb.NewEchoClient(conn) fmt.Println("Exit/exit' to exit this program") for { reader := bufio.NewReader(os.Stdout) fmt.Print("Message to send: ") text, _ := reader.ReadString('\n') text = strings.Replace(text, "\n", "", -1) if text == "exit" || text == "Exit" { break } else if text == "" { continue } message := &pb.EchoMessage{Content: text} returnMessage, err := client.Send(context.Background(), message) check(err) fmt.Printf("Recived this from server: '%v'\n", string(returnMessage.Content)) } } func check(err error) { if err != nil { panic(err) } }
package utils import ( "bytes" "fmt" "os/exec" ) func NginxReload(nginx string) error { cmd := exec.Command(nginx, "-s", "reload") var stderr bytes.Buffer cmd.Stderr = &stderr err := cmd.Run() if err != nil { return fmt.Errorf("NginxReload error: '%s' - '%s'", err, stderr.String()) } return nil }
/* Description 在嵌入式系统开发中,Modbus协议是工业控制系统中广泛应用的一种协议。本题用来简单模拟Modbus协议,只需根据条件生成符合该协议的数据帧,并解析所获取的数据。 假设设备使用的协议发送数据格式如下: <SlaveAddress, 1 Byte> <Function, 1 Byte> <Start Address, 2 Bytes> <NumberofBytes, 2 Bytes> <Checksum, 2 Bytes> 其中前四项将在输入条件中给出,最后一项为CRC校验和,需根据前四项的数据,按照CRC算法进行计算。注意数据的长度,多于1byte的高位在前,低位在后。该CRC校验算法的描述如下: 1)将CRC赋值0xFFFF。 2)取初始信息的第一个字节(8位)与CRC进行异或运算,将结果赋给CRC。 3)将CRC数据右移一位,最前位(左边)补0。 4)如果右移前,CRC最低位(最右端)为1,则将右移后的CRC与0xA001进行异或运算,且将结果赋给CRC。否则,跳过此步。 5)重复3,4步8次(即右边8位)。 6)对初始信息的下一个字节,同样执行2,3,4,5步,直到信息中所有字节都执行了同样的步骤。 7)将此时得到的CRC值的高8位和低8位交换,即得到CRC校验和。 对应的接收格式如下: <SlaveAddress,1Byte> <Function,1Byte> <NumberofBytes,1Byte> <DataIEEE32,xByte> <Checksum,2Bytes> 其中DataIEEE32为一个或多个按IEEE754标准定义的32位浮点数,具体的数据长度由NumberofBytes项来决定(比如NumberofBytes为4,则DataIEEE32项为4 bytes,正好表示一个浮点数;如为8,则DataIEEE32项为8 bytes,可表示两个浮点数)。本题要求编程实现从IEEE32数据(如“420B999A”)到浮点数(如34.9)的转换,从而解析出浮点数值。 提示:你可以根据IEEE754标准自行设计转换算法;或者直接利用C语言float类型的实现特性:x86 linux下,gcc编译器将C语言代码“float f = 34.9;”编译成汇编代码“movl $0x420b999a, -4(%ebp)” (AT&T x86汇编格式),也就是说,单精度浮点数34.9在内存中就是由整数0x420b999a来表示的,你可以利用这一特性来完成转换。 Input 输入包含多组数据,以EOF结束 每组数据共两行。 第一行共四个十进制整数,分别为协议格式要求的:<SlaveAddress, 1 Byte>,<Function, 1 Byte>,<Start Address, 2 Bytes>,<NumberofBytes, 2 Bytes>,以逗号“,”分开。 如:1,4,40,2 其中:1为SlaveAddress;4为Function;40为Start Address;2为NumberofBytes。 第二行为符合接收格式的数据帧(16进制表示),需从其中解析所接收的数据,其长度小于64个字符,浮点数数据最多为4个(即DataIEEE32数据项最多为32bytes)。 如: 010404420B999A7405 其中:01为SlaveAddress;04为Function;04为NumberofBytes; 420B999A 为DataIEEE32;7405为Checksum。 Output 每组数据输出共两行。 第一行:根据输入结果的第一行,输出完整的符合该协议发送格式的数据帧,数据用16进制大写表示,每部分的长度都要求符合协议格式,比如Start Address项如果不到2 bytes,则需要在左边补零。 如:010400280002F1C3 其中:01为SlaveAddress;04 为Function;0028为Start Address;0002为NumberofBytes;F1C3为Checksum。 第二行:根据输入结果的第二行,依次解析IEEE32数据,将其转换成浮点数并打印结果(小数点后保留一位)。解析之前需检查CRC校验和,如校验失败则直接打印CRC_ERROR。如有多个数据,用逗号分隔。 如:34.9 该浮点值为420B999A所对应的值。 Sample Input 1,4,40,2 010404420B999A7405 1,4,40,2 010404420B999A7404 2,4,383,4 02040841CC0000477F2100DF85 Sample Output 010400280002F1C3 34.9 010400280002F1C3 CRC_ERROR 0204017F0004C1DE 25.5,65313.0 Source */ package main import ( "encoding/binary" "fmt" "math" ) func main() { fmt.Println(encode(1, 4, 40, []byte{0x42, 0x0b, 0x99, 0x9a})) fmt.Println(encode(2, 4, 383, []byte{0x41, 0xCC, 0x00, 0x00, 0x47, 0x7F, 0x21, 0x00})) } func encode(slave, function uint8, addr uint16, data []byte) (hdr string, vals []float32, err error) { datalen := len(data) if datalen/2 > math.MaxUint16 || datalen%4 != 0 { err = fmt.Errorf("invalid data size") return } buf := make([]byte, 8) buf[0] = slave buf[1] = function binary.BigEndian.PutUint16(buf[2:], addr) binary.BigEndian.PutUint16(buf[4:], uint16(datalen/2)) binary.BigEndian.PutUint16(buf[6:], crc16(buf[:len(buf)-2])) for i := 0; i < datalen; i += 4 { bits := binary.BigEndian.Uint32(data[i:]) vals = append(vals, math.Float32frombits(bits)) } hdr = fmt.Sprintf("%02X", buf) return } func crc16(buf []byte) uint16 { crc := uint16(0xFFFF) for _, val := range buf { crc ^= uint16(val) for i := 8; i > 0; i-- { if (crc & 0x0001) != 0 { crc >>= 1 crc ^= 0xA001 } else { crc >>= 1 } } } return crc>>8 | crc<<8 }
//Q8. Merge list and sort //Write a function that merges two sorted lists into a new sorted list, //e.g. merge([1,4,6], [2,3,5]) = [1,2,3,4,5,6]. //Code addapted from the following links //http://austingwalters.com/merge-sort-in-go-golang/ //https://gist.github.com/LordZamy/2adcb6d879fcef557d3d //https://stackoverflow.com/questions/25510958/go-lang-print-inputted-array //https://tour.golang.org/moretypes/11 //https://github.com/ZachOrr/golang-algorithms/blob/master/sorting/merge-sort.go package main //imports paths from package main import "fmt" func main(){ arrayList1 := []int {1,4,6} //creates a new array list with values {1, 4, 6} arrayList2 := []int {2,3,5} //creates a new array list with values {2, 3, 5} fmt.Printf("\n%v", mergeSort(arrayList1, arrayList2)) //calls mergeSort function }//main // Runs MergeSort algorithm on a sortedList single func mergeSort(l1, l2 []int) []int{ arrayLen := len(l1)+len(l2) //initializes arrayLen to the total size of the 2 arrays i, j := 0, 0 //initializes i and j to 0 sortedList := make([]int, arrayLen) //it gives the new list the size of the arrayLen for k := 0; k < arrayLen; k++{ //as long as k is smaller than the size of the array keep the loop going if i > len(l1)-1 && j <= len(l2)-1 { //every time i is bigger than the size of 1st array list-1 and j smaller than the size of 2nd array list-1 //(-1 is used because i starts is 0-5 while len of the array is from 1-6) sortedList[k] = l2[j] //make the element at k position from the sorted list equal to the element at j position from l2 j++ //increment j because 1 element from it was used } else if j > len(l2)-1 && i <= len(l1)-1 { //every time j is bigger than the size of 2nd array list-1 and i smaller than the size of 1st array list-1 sortedList[k] = l1[i] //make the element at k position from the sorted list equal to the element at i position from l1 i++ //increment i because 1 element from it was used } else if l1[i] < l2[j] { //every time an element of l1 at position i is smaller than an element of l2 at position j sortedList[k] = l1[i] //make the element at k position from the sorted list equal to the element at i position from l1 i++ //increment i because 1 element from it was used } else { //every time an element of l1 at position i is bigger than an element of l2 at position j - only statement that can be true given the others are false sortedList[k] = l2[j] //make the element at k position from the sorted list equal to the element at j position from l2 j++ //increment j because 1 element from it was used } //if/else..if }//for return sortedList; //returns the value of sortedList }//mergeSort
package main import ( "net/http" "log" "testwork1/src" ) func main() { http.HandleFunc("/", ppp.MainHandler) log.Print("Listen on 8080") log.Fatal(http.ListenAndServe(":8080", nil)) close(ppp.EmailChannel) }
// Copyright 2021 Google Inc. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // // This executable builds the Docker images based off the WASM executables in the // gcr.io/skia-public/skia-wasm-release image. It then issues a PubSub notification to have those apps // tagged and deployed by docker_pushes_watcher. // See //docker_pushes_watcher/README.md in the infra repo for more. package main import ( "context" "flag" "fmt" "io/ioutil" "cloud.google.com/go/pubsub" "google.golang.org/api/option" "go.skia.org/infra/go/auth" docker_pubsub "go.skia.org/infra/go/docker/build/pubsub" "go.skia.org/infra/go/util" "go.skia.org/infra/task_driver/go/lib/auth_steps" "go.skia.org/infra/task_driver/go/lib/checkout" "go.skia.org/infra/task_driver/go/lib/docker" "go.skia.org/infra/task_driver/go/lib/golang" "go.skia.org/infra/task_driver/go/lib/os_steps" "go.skia.org/infra/task_driver/go/td" ) var ( // Required properties for this task. projectId = flag.String("project_id", "", "ID of the Google Cloud project.") taskId = flag.String("task_id", "", "ID of this task.") taskName = flag.String("task_name", "", "Name of the task.") workdir = flag.String("workdir", ".", "Working directory") checkoutFlags = checkout.SetupFlags(nil) // Optional flags. local = flag.Bool("local", false, "True if running locally (as opposed to on the bots)") output = flag.String("o", "", "If provided, dump a JSON blob of step data to the given file. Prints to stdout if '-' is given.") ) const ( debuggerImageName = "debugger-app" particlesImageName = "particles" shaderImageName = "shaders" skottieImageName = "skottie" ) var ( infraCommonEnv = []string{ "SKIP_BUILD=1", "ROOT=/WORKSPACE", } ) func buildPushSkottieImage(ctx context.Context, tag, repo, wasmProductsDir, configDir string, topic *pubsub.Topic) error { tempDir, err := os_steps.TempDir(ctx, "", "") if err != nil { return err } image := fmt.Sprintf("gcr.io/skia-public/%s", skottieImageName) cmd := []string{"/bin/sh", "-c", "cd /home/skia/golib/src/go.skia.org/infra/skottie && make release_ci"} volumes := []string{ fmt.Sprintf("%s:/OUT", wasmProductsDir), fmt.Sprintf("%s:/WORKSPACE", tempDir), } return docker.BuildPushImageFromInfraImage(ctx, "Skottie", image, tag, repo, configDir, tempDir, "prod", topic, cmd, volumes, infraCommonEnv, nil) } func buildPushParticlesImage(ctx context.Context, tag, repo, wasmProductsDir, configDir string, topic *pubsub.Topic) error { tempDir, err := os_steps.TempDir(ctx, "", "") if err != nil { return err } image := fmt.Sprintf("gcr.io/skia-public/%s", particlesImageName) cmd := []string{"/bin/sh", "-c", "cd /home/skia/golib/src/go.skia.org/infra/particles && make release_ci"} volumes := []string{ fmt.Sprintf("%s:/OUT", wasmProductsDir), fmt.Sprintf("%s:/WORKSPACE", tempDir), } return docker.BuildPushImageFromInfraImage(ctx, "Particles", image, tag, repo, configDir, tempDir, "prod", topic, cmd, volumes, infraCommonEnv, nil) } func buildPushDebuggerImage(ctx context.Context, tag, repo, wasmProductsDir, configDir string, topic *pubsub.Topic) error { tempDir, err := os_steps.TempDir(ctx, "", "") if err != nil { return err } image := fmt.Sprintf("gcr.io/skia-public/%s", debuggerImageName) cmd := []string{"/bin/sh", "-c", "cd /home/skia/golib/src/go.skia.org/infra/debugger-app && make release_ci"} volumes := []string{ fmt.Sprintf("%s:/OUT", wasmProductsDir), fmt.Sprintf("%s:/WORKSPACE", tempDir), } return docker.BuildPushImageFromInfraImage(ctx, "Debugger-App", image, tag, repo, configDir, tempDir, "prod", topic, cmd, volumes, infraCommonEnv, nil) } func buildPushShadersImage(ctx context.Context, tag, repo, wasmProductsDir, configDir string, topic *pubsub.Topic) error { tempDir, err := os_steps.TempDir(ctx, "", "") if err != nil { return err } image := fmt.Sprintf("gcr.io/skia-public/%s", shaderImageName) cmd := []string{"/bin/sh", "-c", "cd /home/skia/golib/src/go.skia.org/infra/shaders && make release_ci"} volumes := []string{ fmt.Sprintf("%s:/OUT", wasmProductsDir), fmt.Sprintf("%s:/WORKSPACE", tempDir), } return docker.BuildPushImageFromInfraImage(ctx, "Shaders", image, tag, repo, configDir, tempDir, "prod", topic, cmd, volumes, infraCommonEnv, nil) } func main() { // Setup. ctx := td.StartRun(projectId, taskId, taskName, output, local) defer td.EndRun(ctx) rs, err := checkout.GetRepoState(checkoutFlags) if err != nil { td.Fatal(ctx, err) } wd, err := os_steps.Abs(ctx, *workdir) if err != nil { td.Fatal(ctx, err) } // Setup go. ctx = golang.WithEnv(ctx, wd) // Create token source with scope for cloud registry (storage) and pubsub. ts, err := auth_steps.Init(ctx, *local, auth.ScopeUserinfoEmail, auth.ScopeFullControl, pubsub.ScopePubSub) if err != nil { td.Fatal(ctx, err) } // Create pubsub client. client, err := pubsub.NewClient(ctx, docker_pubsub.TOPIC_PROJECT_ID, option.WithTokenSource(ts)) if err != nil { td.Fatal(ctx, err) } topic := client.Topic(docker_pubsub.TOPIC) // Figure out which tag to use for docker build and push. tag := rs.Revision if rs.Issue != "" && rs.Patchset != "" { tag = fmt.Sprintf("%s_%s", rs.Issue, rs.Patchset) } // Create a temporary config dir for Docker. configDir, err := ioutil.TempDir("", "") if err != nil { td.Fatal(ctx, err) } defer util.RemoveAll(configDir) // Login to docker (required to push to docker). token, err := ts.Token() if err != nil { td.Fatal(ctx, err) } if err := docker.Login(ctx, token.AccessToken, "gcr.io/skia-public/", configDir); err != nil { td.Fatal(ctx, err) } // Run skia-wasm-release image and extract wasm products out of it. wasmProductsDir, err := os_steps.TempDir(ctx, "", "") if err != nil { td.Fatal(ctx, err) } // Run Doxygen pointing to the location of the checkout and the out dir. volumes := []string{ fmt.Sprintf("%s:/OUT", wasmProductsDir), } wasmCopyCmd := []string{"/bin/sh", "-c", "cp -r /tmp/* /OUT"} releaseImg := fmt.Sprintf("gcr.io/skia-public/skia-wasm-release:%s", tag) if err := docker.Run(ctx, releaseImg, configDir, wasmCopyCmd, volumes, nil); err != nil { td.Fatal(ctx, err) } // Build and push all apps of interest below. if err := buildPushSkottieImage(ctx, tag, rs.Repo, wasmProductsDir, configDir, topic); err != nil { td.Fatal(ctx, err) } if err := buildPushParticlesImage(ctx, tag, rs.Repo, wasmProductsDir, configDir, topic); err != nil { td.Fatal(ctx, err) } if err := buildPushDebuggerImage(ctx, tag, rs.Repo, wasmProductsDir, configDir, topic); err != nil { td.Fatal(ctx, err) } if err := buildPushShadersImage(ctx, tag, rs.Repo, wasmProductsDir, configDir, topic); err != nil { td.Fatal(ctx, err) } // Remove all temporary files from the host machine. Swarming gets upset if there are root-owned // files it cannot clean up. cleanupCmd := []string{"/bin/sh", "-c", "rm -rf /OUT/*"} if err := docker.Run(ctx, releaseImg, configDir, cleanupCmd, volumes, nil); err != nil { td.Fatal(ctx, err) } }
package model type Subscription struct { ID int CreditCardNumber string IsFraud bool IsIncomplete bool }
package base import ( "errors" "gengine/context" "reflect" ) type Arg struct { Constant *Constant Variable string FunctionCall *FunctionCall MethodCall *MethodCall MapVar *MapVar Expression *Expression } func (a *Arg) Evaluate(dc *context.DataContext, Vars map[string]reflect.Value) (reflect.Value, error) { if len(a.Variable) > 0 { return dc.GetValue(Vars, a.Variable) } if a.Constant != nil { return a.Constant.Evaluate(dc, Vars) } if a.FunctionCall != nil { return a.FunctionCall.Evaluate(dc, Vars) } if a.MethodCall != nil { return a.MethodCall.Evaluate(dc, Vars) } if a.MapVar != nil { return a.MapVar.Evaluate(dc, Vars) } if a.Expression != nil { return a.Expression.Evaluate(dc, Vars) } return reflect.ValueOf(nil), errors.New("argHolder holder has more values than want!") }
package bot import ( . "github.com/smartystreets/goconvey/convey" "testing" ) const ( serverName = "irc.server.com" ) func TestGetServerName(t *testing.T) { Convey("Given a config message", t, func() { config = &Config{} Convey("When there is no port specified", func() { config.Server = serverName So(getServerName(), ShouldEqual, serverName) }) Convey("When there is a port specified", func() { config.Server = serverName + ":6667" So(getServerName(), ShouldEqual, serverName) }) }) }
package geeRPC import ( "bufio" "encoding/gob" "io" ) type GobCodec struct { conn io.ReadWriteCloser buf *bufio.Writer dec *gob.Decoder enc *gob.Encoder } // var _ Codec = (*GobCodec)nil func NewGobCode(conn io.ReadWriteCloser) Codec { buf := bufio.NewWriter(conn) return &GobCodec{conn: conn, buf: buf, dec: gob.NewDecoder(conn), enc: gob.NewEncoder(buf)} } func (c *GobCodec) ReadHeader(h *Header) error { return c.dec.Decode(h) } func (c *GobCodec) ReadBody(body interface{}) error { return c.dec.Decode(body) } func (c *GobCodec) Write(h *Header, body interface{}) (err error) { defer func() { _ = c.buf.Flush() if err != nil { _ = c.Close() } }() if err = c.enc.Encode(h); err != nil { return } if err = c.enc.Encode(body); err != nil { return } return } func (c *GobCodec) Close() error { return c.conn.Close() }
package ismerror import "fmt" type IsmError struct { Code int Message string } func (err *IsmError) Error() string { return fmt.Sprintf("[%d] %s", err.Code, err.Message) }
package command import ( "fmt" "net/http" "testing" "github.com/mitchellh/cli" "github.com/stretchr/testify/assert" "github.com/romantomjak/b2/b2" "github.com/romantomjak/b2/testutil" ) func TestListCommand_CanListBuckets(t *testing.T) { server, mux := testutil.NewServer() defer server.Close() mux.HandleFunc("/b2api/v2/b2_list_buckets", func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, `{ "buckets": [ { "accountId": "30f20426f0b1", "bucketId": "4a48fe8875c6214145260818", "bucketInfo": {}, "bucketName" : "Kitten-Videos", "bucketType": "allPrivate", "lifecycleRules": [] }, { "accountId": "30f20426f0b1", "bucketId" : "5b232e8875c6214145260818", "bucketInfo": {}, "bucketName": "Puppy-Videos", "bucketType": "allPublic", "lifecycleRules": [] }, { "accountId": "30f20426f0b1", "bucketId": "87ba238875c6214145260818", "bucketInfo": {}, "bucketName": "Vacation-Pictures", "bucketType" : "allPrivate", "lifecycleRules": [] } ] }`) }) cache, _ := b2.NewInMemoryCache() client, _ := b2.NewClient("key-id", "key-secret", b2.SetBaseURL(server.URL), b2.SetCache(cache)) ui := cli.NewMockUi() cmd := &ListCommand{ baseCommand: &baseCommand{ui: ui, client: client}, } code := cmd.Run([]string{}) assert.Equal(t, 0, code) out := ui.OutputWriter.String() assert.Contains(t, out, "Kitten-Videos/") assert.Contains(t, out, "Puppy-Videos/") assert.Contains(t, out, "Vacation-Pictures/") } func TestListCommand_LookupBucketByName(t *testing.T) { server, mux := testutil.NewServer() defer server.Close() mux.HandleFunc("/b2api/v2/b2_list_buckets", func(w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, `{"buckets": []}`) }) cache, _ := b2.NewInMemoryCache() client, _ := b2.NewClient("key-id", "key-secret", b2.SetBaseURL(server.URL), b2.SetCache(cache)) ui := cli.NewMockUi() cmd := &ListCommand{ baseCommand: &baseCommand{ui: ui, client: client}, } tc := []struct { bucketName string }{ {"bucket-name"}, {"bucket-name/help"}, {"bucket-name/help/myfile"}, } for _, tt := range tc { t.Run(tt.bucketName, func(t *testing.T) { code := cmd.Run([]string{tt.bucketName}) assert.Equal(t, 1, code) out := ui.ErrorWriter.String() assert.Contains(t, out, `bucket with name "bucket-name" was not found`) ui.ErrorWriter.Reset() }) } }
package upload import ( "bytes" "fmt" "log" "os/exec" "testing" "os" ) const ( TEST_FILE_DATA = "Test Data?" ) func TestGetMime(t *testing.T) { mime_t, err := Get_Mime("/local/testpic.png") if mime_t != "image/png" { if err == nil { t.Error("Error was not thrown for PNG") } t.Errorf("PNG mime type is not %s", mime_t) } mime_t, err = Get_Mime("testpic2.jpg") if mime_t != "image/jpeg" { if err == nil { t.Error("Error was not thrown for JPG") } t.Errorf("JPG mime type is not %s", mime_t) } } func TestGetMimeFail(t *testing.T) { _, err := Get_Mime("folder/testfile") if err == nil { t.Error("Should've thrown an error for no extension in Get_Mime") } _, err = Get_Mime("folder/testfile.fakeextensionfortesting") if err == nil { t.Error("Should've thrown an error for not MIME type in Get_Mime") } } func TestGetNewName(t *testing.T) { name, err := Get_New_Name("folder/testfile.jpg", "othertest") if name != "othertest.jpg" { t.Errorf("New name was not correct, it was %s instead of othertest.jpg", name) } name, err = Get_New_Name("folder/testfile", "othertest") if err == nil { t.Error("Get_New_Name should've failed") } } func TestAWSInit(t *testing.T) { if !CheckAWSEnabled() { t.Skip("AWS access credentials not found, so skipping") } AWSInit("", "") if PicsBucket == nil { t.Errorf("Failed to open AWS bucket %s", BUCKET_NAME) } } func TestUploadFile(t *testing.T) { filename := UploadTestFile(t) _, err := PicsBucket.Get(filename) if err != nil { t.Error(err) } } func UploadTestFile(t *testing.T) string { if !CheckAWSEnabled() { t.Skip("AWS access credentials not found, so skipping") } AWSInit("", "") data := bytes.NewReader([]byte(TEST_FILE_DATA)) filename := fmt.Sprintf("testing_%s.jpg", GetUUID(t)) err := Upload_S3(data, filename) if err != nil { t.Error(err) } return filename } func GetUUID(t *testing.T) []byte { uuid, err := exec.Command("uuidgen").Output() if err != nil { log.Fatal(err) } return uuid } func CheckAWSEnabled() bool { return os.Getenv("AWS_SECRET_ACCESS_KEY") != "" && os.Getenv("AWS_ACCESS_KEY_ID") != "" }
package main import ( "context" "flag" "os" "os/signal" "time" "logstream/pkg/client" pb "logstream/pkg/proto" ) func main() { raddr := flag.String("raddr", ":8500", "remote address of upstream server") id := flag.String("id", "1", "unique ID of client") readFx := flag.Duration("freq", 1*time.Second, "frequency of reads") flag.Parse() ctx := context.Background() ctx, cancel := context.WithCancel(ctx) c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) defer func() { signal.Stop(c) cancel() }() go func() { select { case <-c: cancel() case <-ctx.Done(): } }() client := client.NewClient(*raddr, *readFx, nullWriter{}) client.Run(ctx, *id) } type nullWriter struct{} func (nullWriter) Write(*pb.Response) {}
package main /* func main() { sum := 1 for sum < 1000 { //초기화 구문과 사후 구문은 필수는 아님 sum += sum } fmt.Println(sum) } */ /*func main() { sum := 1 for sum < 1000 { //;을 생략할 수 있다는 점에서 C의 while == Go의 for sum += sum } fmt.Println(sum) }*/ /*func main() { for { } }*/ /* func sqrt(x float64) string { if x < 0 { return sqrt(-x) + "i" } return fmt.Sprint(math.Sqrt(x)) } func main() { fmt.Println(sqrt(2), sqrt(-4)) }*/ /* func pow(x, n, lim float64) float64 { if v := math.Pow(x, n); v < lim { //이 라인에서 선언된 변수들은 if문의 끝까지만 존재 return v } else { fmt.Printf("%g >= %g\n", v, lim) } //여기서 죽음 return lim } func main() { fmt.Println( pow(3, 2, 10), pow(3, 3, 20), ) } */ /* func Sqrt(x float64) float64 { z := float64(1) for { z -= z - (z*z-x)/(2*z) } } func main() { fmt.Println(Sqrt(2)) } */ /* func main() { var sum = 0 var num int Loop1: for i := 0; i < 10; i++ { Loop2: for j := 0; j < 10; j++ { fmt.Print("정수 입력:") fmt.Scanln(&num) if num < 0 { break Loop1 } else { if num > 100 { break Loop2 } } sum += num fmt.Println("j:", j) } fmt.Println("i:", i) } fmt.Println("합계:", sum) } */ //Switch : case /* func main() { fmt.Printf("GO runs on ") switch os := runtime.GOOS; os { case "darwin": fmt.Println("OS X.") case "linux": fmt.Println("Linux.") default: fmt.Printf("%s. \n", os) //freebsd, openbsdd //plan9, windows.. } } */ /*func main() { fmt.Println("When's Saturday?") today := time.Now().Weekday() switch time.Saturday { case today + 0: fmt.Println("Today.") case today + 1: fmt.Println("Tomorrow.") case today + 2: fmt.Println("In two days.") default: fmt.Println("Too far away") } } */ /* func main() { t := time.Now() switch { case t.Hour() < 12: fmt.Println("Good morning!") case t.Hour() < 17: fmt.Println("Good Afternoon.") default: fmt.Println("Good evening.") } } */ /* func main() { var score int fmt.Print("점수 입력:") fmt.Scanln(&score) if (score < 0) || (score > 100) { fmt.Println("잘못 입력하셨네요.") return } switch score / 10 { case 10: case 9: fmt.Println("A") case 8: fmt.Println("B") case 7: fmt.Println("C") case 6: fmt.Println("D") default: fmt.Println("F") }*/ /* var x, y int fmt.Print("두 개의 정수 입력:") fmt.Scanln(&x, &y) switch { case x > y: fmt.Println("큰 값:", x) case x < y: fmt.Println("큰 값:", y) default: fmt.Println("두 수는 서로 같습니다.") } }*/ //fallthrough : 특정 case문 실행 후 다음 case문 실행하고 싶을 때 사용. c의 switch에서 break 생략한것과 동일. *맨 마지막 case에는 사용 불가 /*func main() { i := 4 switch i { case 4: fmt.Println("4 이상") fallthrough case 3: fmt.Println("3 이상") fallthrough case 2: fmt.Println("2 이상") fallthrough case 1: fmt.Println("1 이상") fallthrough case 0: fmt.Println("0 이상") } } */ //case 여러개 함께 처리 /* func main() { i := 3 switch i { case 2, 4, 6: //i가 2,4,6일 때 fmt.Println("짝수") case 1, 3, 5: fmt.Println("홀수") } } */ /* func main() { fmt.Println("counting") for i := 0; i < 10; i++ { defer fmt.Println(i) } fmt.Println("done!") } */ /* func main() { const ( JAN = iota + 1 FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC ) //월 fmt.Println("JAN:", JAN) fmt.Println("JUL:", JUL) fmt.Println("DEC:", DEC) const ( c0 = iota * 10 c1 = iota * 10 c2 = iota * 10 c3 = iota * 10 c4 = iota * 10 ) //10씩 증가하는 상수 fmt.Println("c1 : ", c1) fmt.Println("c1 : ", c2) fmt.Println("c1 : ", c3) fmt.Println("c1 : ", c4) } */ /* func main() { const max_elem int = 5 var i int = 0 fmt.Println("i:", i) i = (i + 1) % max_elem fmt.Println("i:", i) i = (i + 1) % max_elem fmt.Println("i:", i) i = (i + 1) % max_elem fmt.Println("i:", i) i = (i + 1) % max_elem fmt.Println("i:", i) i = (i + 1) % max_elem fmt.Println("i:", i) i = (i + 1) % max_elem fmt.Println("i:", i) } */ /* func main() { fmt.Printf("2<3=%b\n", 2 < 3) //bool fmt.Printf("23은 이진수로 %b\n", 23) //이진수 fmt.Printf("family name %c\n", '홍') //문자 fmt.Printf("10진수 출력:%d\n", 23) //10진수 fmt.Printf("8진수 출력:%o\n", 23) //8진수 fmt.Printf("16진수 출력:%x\n", 23) //16진수,a~f fmt.Printf("16진수 출력:%X\n", 23) //16진수,A~F fmt.Printf("고정소수점:%f\n", 123.4567) //고정소수점 fmt.Printf("고정소수점:%F\n", 123.4567) //고정소수점 fmt.Printf("지수 표현:%e\n", 123.4567) //지수 표현, e fmt.Printf("지수 표현:%E\n", 123.4567) //지수 표현, E fmt.Printf("간단한 실수 표현:%g\n", 123.4567) //간단한 실수 표현 fmt.Printf("간단한 실수 표현:%g\n", 123.4567) //간단한 실수 표현 fmt.Printf("문자열:%s", "홍길동\n") //문자열 var a int = 32 fmt.Printf("메모리주소:%p\n", a) //포인터 fmt.Printf("유니코드 %U\n", '\ud55c') //유니코드 fmt.Printf("%T\n", 23) //타입 fmt.Printf("모든 형식:%v , %v\n", 23, 'a') //모든 형식 fmt.Printf("형식도 함께:%d %#o , %#x\n", 23, 23, 23) //%#v 형식을 구분할 수 있게 fmt.Printf("%4d %04d\n", 23, 23) //출력 폭 지정, 빈 곳 0출력 fmt.Printf("%-4d %-4d\n", 23, 23) //왼쪽 정렬 fmt.Printf("%9.2f\n", 123.4567) //소수점 이하 자리 출력 지 } */ /* func main() { var name string fmt.Print("이름:") fmt.Scanln(&name) var num int fmt.Print("번호:") fmt.Scanln(&num) var addr string fmt.Print("주소:") fmt.Scanln(&addr) fmt.Println("이름은 ", name, "번호는 ", num) fmt.Println("주소는 ", addr) var name, addr string var num int fmt.Println("이름 번호 주소") var re int re, _ = fmt.Scanln(&name, &num, &addr) fmt.Println("이름은 ", name, " 번호는 ", num) fmt.Println("주소는 ", addr) fmt.Println(re) */ /* var n1, n2, n3, n4 int fmt.Println("IPv4 주소") fmt.Scanf("%d.%d.%d.%d", &n1, &n2, &n3, &n4) fmt.Printf("입력한 IPv4주소는 %d.%d.%d.%d\n", n1, n2, n3, n4) }*/ /* func main() { var scores [3]int = [3]int{1, 2} fmt.Println("=== scores ===") for i := 0; i < len(scores); i++ { fmt.Println(scores[i]) } var scores2 = [3]int{1, 2} fmt.Println("=== scores2 ===") for i := 0; i < len(scores2); i++ { fmt.Println(scores2[i]) } var scores3 = [3]int{1, 2} fmt.Println("=== scores3 ===") for i := 0; i < len(scores3); i++ { fmt.Println(scores3[i]) } scores4 := [...]int{1, 2} fmt.Println("=== scores4 ===") for i := 0; i < len(scores4); i++ { fmt.Println(scores4[i]) } } */ /* func main() { arr := [5]int{12, 34, 23, 56, 34} //var length int //length = len(arr) //for i := 0; i < length; i++ { // fmt.Println(i, ":", arr[i]) //} for _, i := range arr { //fmt.Println(i, ":", value) fmt.Println(i) } } */ /* func main() { var n int fmt.Print("학생수:") fmt.Scanln(&n) var scores []int = make([]int, n) var i int for i = 0; i < n; i++ { fmt.Printf("%d번 성적 : ", i+1) fmt.Scanln(&scores[i]) } fmt.Println("==== 학생 성적 출력 ====") var sum int for i = 0; i < n; i++ { sum += scores[i] fmt.Printf("%d번 성적: %d\n", i+1, sum) } fmt.Printf("총점:%d\n", sum) } */ /* func main() { var arr [5]int = [5]int{90, 88, 76, 80, 99} var scores []int = make([]int, 5, 10) var i int for i = 0; i < 5; i++ { scores[i] = arr[i] } fmt.Printf("저장소 크기:%d 보관한 자료 개수:%d\n", cap(scores), len(scores)) var score int for i = 5; i < 10; i++ { fmt.Printf("%d 번 성적 : ", i+1) fmt.Scanln(&score) scores = append(scores, score) fmt.Printf("저장소 크기:%d 보관한 자료 개수:%d\n", cap(scores), len(scores)) } fmt.Println("==== 학생 성적 출력 ====") var sum int for i = 0; i < 10; i++ { sum += scores[i] fmt.Printf("%d번 성적:%d\n", i+1, sum) } fmt.Printf("총점:%d\n", sum) } */ /* func main() { var datas []int fmt.Println(datas) datas = append(datas, 9, 8, 7, 6, 5, 4) fmt.Println(datas) } */ /* func main() { var s []int fmt.Printf("용량:%d 원소 개수:%d\n", cap(s), len(s)) var i int for i = 0; i < 10; i++ { s = append(s, i+1) fmt.Printf("용량:%d 원소 개수:%d\n", cap(s), len(s)) } } */ /* func main() { var scores []int = []int{90, 88, 76, 80, 99} var i int fmt.Printf("저장소 크기:%d 보관한 자료 개수:%d\n", cap(scores), len(scores)) var score int for i = 5; i < 10; i++ { fmt.Printf("%d번 성적:", i+1) fmt.Scanln(&score) scores = append(scores, score) fmt.Printf("저장소 크기:%d 보관한 자료 개수:%d\n", cap(scores), len(scores)) } fmt.Println("==== 학생 성적 출력 ====") var sum int for i = 0; i < 10; i++ { sum += scores[i] fmt.Printf("%d번 성적:%d\n", i+1, sum) } fmt.Printf("총점:%d\n", sum) } */ /* func main() { //array var arr1 [3]int = [3]int{1, 2, 3} var arr2 [3]int fmt.Print("before arr1 : ") fmt.Println(arr1) fmt.Print("before arr2 : ") fmt.Println(arr2) arr2 = arr1 arr2[0] = 8 fmt.Print("after arr1 : ") fmt.Println(arr1) fmt.Print("after arr2 : ") fmt.Println(arr2) //slice var s1 []int = []int{1, 2, 3} var s2 []int fmt.Print("before s1 : ") fmt.Println(s1) fmt.Print("before s2 : ") fmt.Println(s2) s2 = s1 s2[0] = 8 fmt.Print("after s1 : ") fmt.Println(s1) fmt.Print("after s2 : ") fmt.Println(s2) } */ /* func main() { var s1 []int = []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} var s2 []int = make([]int, 3, 5) s2[0] = 8 s2[1] = 9 fmt.Printf("s1 용량 %d 원소 개수:%d\n", cap(s1), len(s1)) fmt.Println(s1) fmt.Printf("s2 용량 %d 원소 개수:%d\n", cap(s2), len(s2)) fmt.Println(s2) copy(s2, s1) fmt.Println("===copy(s2,s1) 수행 후===") fmt.Printf("s1 용량 %d 원소 개수:%d\n", cap(s1), len(s1)) fmt.Println(s1) fmt.Printf("s2 용량 %d 원소 개수:%d\n", cap(s2), len(s2)) fmt.Println(s2) } */ /* func main() { var origin_s []int = []int{10, 23, 34, 47, 62, 7, 89, 91, 102} var start int var last int fmt.Print("원본 슬라이스:") fmt.Println(origin_s) fmt.Println("원본 슬라이스에서 부분 슬라이스 만들기") fmt.Printf("시작 인덱스:") fmt.Scanln(&start) fmt.Printf("끝 인덱스:") fmt.Scanln(&last) var sub_s []int = origin_s[start : last+1] fmt.Print("부분 슬라이스:") fmt.Println(sub_s) } */ /* func main() { i, j := 42, 2701 p := &i //i를 가리키는 pointer fmt.Println(*p) //pointer를 통해 i의 값 읽기 *p = 21 //pointer를 통해 i값 설정 fmt.Println(i) p = &j //j를 가리키는 pointer *p = *p / 37 //pointer를 통해 j를 나눔 fmt.Println(j) } */ /* type Vertex struct { X int Y int } //구조체 인스턴스 언언 방법 var ( //1. 일반적인 방식. X=1, Y=2로 초기화 v1 = Vertex{1, 2} //2. X값만 지정, Y는 int에 zero value로 설정 v2 = Vertex{X: 1} //3. X,Y 모두 int에 zero value로 설정 v3 = Vertex{} ) func main() { fmt.Println("v1.X값:", v1.X) v1.X = 4 fmt.Println("v1.X = 4로 바꾼 v1.X의 값 : ", v1.X) //4. 구조체 pointer로도 구조체의 값을 바꿀 수 있다. var p = &v1 p.X = 10 fmt.Println("pointer로 바꾼 v1.X값 : ", v1.X) } */ /* func main() { names := [4]string{ "John", "Paul", "George", "Ringo", } fmt.Println("배열 names:", names) fmt.Println("1. 슬라이스 선언") // 슬라이스 선언방법 // 1. 일반적인 선언방법 : 변수 선언과 비슷합니다. 슬라이스타입은 []type입니다. var s1 []string = names[0:3] // 2. 슬라이스도 var키워드와 타입 명시를 생략할 수 있습니다. s2 := names[0:2] fmt.Println("names[0:3]:", s1) fmt.Println("names[0:2]:", s2) //s1에서 값을 바꾸면 names, s1에서도 바뀐 값을 볼 수 있습니다. fmt.Println("2. 슬라이스로 값 변경") fmt.Println("s1[0]", s1[0]) s1[0] = "XXX" fmt.Println("s1[0] = XXX 실행 후 s1:", s1) fmt.Println("s1[0] = XXX 실행 후 s2:", s2) fmt.Println("s1[0] = XXX 실행 후 names:", names) s2 = s1[0:2] fmt.Println("s2 = s1[0:2] 실행 후 s2:", s2) } */ /* func main() { fmt.Println("1. 슬라이스 리터럴 선언") //1. 기본형 슬라이스 리터럴 q := []int{2, 3, 5, 7, 11, 13} fmt.Println("기본형 슬라이스 리터럴 : ", q) //2. 구조체 슬라이스 리터럴 s := []struct { i int b bool }{ {2, true}, {3, false}, {5, true}, {7, true}, {11, false}, {13, true}, } fmt.Println("구조체 슬라이스 리터럴:", s) fmt.Println("2. 슬라이스를 슬라이스") q = q[:2] fmt.Println("q[:2]:", q) q = q[1:] fmt.Println("q[1:]:", q) } */ /* func main() { //make로 가변 길이 만들기 a := make([]int, 5) fmt.Printf("a := make([]int, 5)의 \t") printSlice(a) b := make([]int, 0, 5) fmt.Printf("b := make([]int, 0, 5)의 \t") printSlice(b) c := b[:2] fmt.Printf("c := b[:2]의 \t") printSlice(c) d := c[2:5] fmt.Printf("d := c[2:5]의 \t") printSlice(d) //한번에 여러개 원소 추가 가능 d = append(d, 1, 2, 3) fmt.Printf("d = append(d, 1,2,3)후\t") printSlice(d) } func printSlice(s []int) { fmt.Printf("len=%d cap=%d %v\n", len(s), cap(s), s) } */ /* type Vertex struct { Lat, Long float64 } func main() { //1. map 사용 //map[string] type 변수 선언 var mymap map[string]Vertex //make()로 맵 생성 mymap = make(map[string]Vertex) mymap["Bell Labs"] = Vertex{ 40.68433, -74.39967, } fmt.Println("1. mymap[\"Bell Labs\"]: ", mymap["Bell Labs"]) //2. map literal 사용 var mymap_literal = map[string]Vertex{ "Bell Labs": Vertex{ 40.68433, -74.39967, }, "Google": Vertex{ 37.42202, -122.08408, }, } fmt.Println("2. mymap_literal[\"Google\"]", mymap_literal["Google"]) } */ /* func main() { m := make(map[string]int) //1. key-value 지정 m["Answer"] = 42 fmt.Println("m[\"Answer\"]값은:", m["Answer"]) //2. key-value 삭제 delete(m, "Answer") fmt.Println("m[\"Answer\"]값은:", m["Answer"]) //3. key 존재 확인 v, ok := m["Answer"] fmt.Println("m[\"Answer\"]값은", v, "존재하나요?", ok) } */ /* func main() { solarSystem := make(map[string]float32) //key : string, value : float32인 map 생성, 공간 할다으 solarSystem["Mercury"] = 87.969 // 맵[키] = 값 solarSystem["Venus"] = 224.70069 solarSystem["Earth"] = 365.25641 solarSystem["Mars"] = 686.9600 solarSystem["Jupiter"] = 4333.2867 solarSystem["Saturn"] = 10756.1995 solarSystem["Uranus"] = 30707.4896 solarSystem["Neptune"] = 60223.3528 value, ok := solarSystem["Pluto"] // map에 key가 있는지 검사할 때는 return 값을 두 개 사용. if ok == false { fmt.Println("Not exist") } else { fmt.Println(value, ok) } if value, ok := solarSystem["Saturn"]; ok { fmt.Println(value) } for key, value := range solarSystem { fmt.Println(key, ":", value) } for _, value := range solarSystem { fmt.Println(value) } } */ //func 함수명(매개변수명 자료형) (리턴값_변수명 자료형){} /* func sum(a int, b int) (r int) { //return 값 변수 이름을 r로 지정 r = a + b return //return 값 변수 사용시 return 뒤에 변수 지정 x } func main() { r := sum(1, 2) fmt.Println(r) } */ //가변인자 사용하기 //func 함수명(매개변수명 ...자료형) 리턴값 _자료형 /* func sum(n ...int) int { //int형 가변인자를 받는 함수 정의 //여기서 가변인자 함수는 INT형 값만 여러 개 받도록 되어있고, SLICE 자체는 받을 수 없다. //따라서 매개변수에 슬라이스만 넣지 않고 뒤에 ...을 붙인다. //...을 붙이면 슬라이스에 들어있는 요소를 각각 넘겨준다. total := 0 for _, value := range n { //range로 가변인자의 모든 값을 꺼냄 total += value //꺼낸 값 모두 더하기 } return total } func main() { //r := sum(1, 2, 3, 4, 5) //fmt.Println(r) n := []int{1, 2, 3, 4, 5} //slice 선언 r := sum(n...) // ...를 사용해 가변인자에 슬라이스를 바로 넘겨줌 fmt.Println(r) } */ //재귀호출 사용하기 /* func factorial(n uint64) uint64 { if n == 0 { return 1 } return n * factorial(n-1) } func main() { fmt.Println(factorial(5)) } */ //함수를 변수에 저장하기 //var 변수명 func(매개변수명 자료형) 리턴값_자료형 = 함수명 /* func sum(a int, b int) int { return a + b } func main() { var hello func(a int, b int) int = sum //함수를 저장하는 변수 선언하고 함수 대입 world := sum //변수 선언과 동시에 함수를 바로 대입 fmt.Println(hello(1, 2)) fmt.Println(world(1, 2)) } */ //슬라이스 = []func(매개변수명 자료형) 리턴값_자료형{함수명1, 함수명2} /* func sum(a int, b int) int { return a + b } func diff(a int, b int) int { return a - b } func main() { f := []func(int, int) int{sum, diff} //함수를 저장할 수 있는 slice 생성한 뒤 함수로 초기화 fmt.Println(f[0](1, 2)) fmt.Println(f[1](1, 2)) } */ //맵 := map[키_자료형]func(매개변수명 자료형) 리턴값_자료형{"키":함수명} /* func sum(a int, b int) int { return a + b } func diff(a int, b int) int { return a - b } func main() { f := map[string]func(int, int) int{ //함수를 저장할 수 있는 맵을 생성한 뒤 함수로 초기화 "sum": sum, "diff": diff, } fmt.Println(f["sum"](1, 2)) //3 : map에 sum 키를 지정해 함수 호출 fmt.Println(f["diff"](1, 2)) //-1 : map에 diff 키를 지정해 함수 호출 } */ //익명 함수 사용하기 //func(매개변수명 자료형) 리턴값_자료형 {}() /* func main() { func() { //함수에 이름이 없음 fmt.Println("Hello world!") }() func(s string) { //익명 함수를 정의한 뒤 fmt.Println(s) }("Hello World!") //바로 호출 r := func(a int, b int) int { //익명 함수를 정의한 뒤 return a + b }(1, 2) //바로 호출해 return 값을 변수 r에 저장 fmt.Println(r) //3 } */ //Closure : 함수 안에서 함수를 선언 및 정의할 수 있고, 바깥쪽 함수에 선언된 변수에도 접근할 수 있는 함수. //변수 := func(매개변수명 자료형) 리턴값_자료형 {} /*func main() { //함수 안에서 sum := func(a, b int) int { //익명 함수를 선언 및 정의 return a + b } r := sum(1, 2) //익명함수 사용 fmt.Println(r)//3 a, b := 3, 5 f := func(x int) int { return a*x + b //함수 바깥의 변수 a,b 사용 } y := f(5) fmt.Println(y) //20 } */ //클로저를 사용하는 이유 /* func calc() func(x int) int { a, b := 3, 5 return func(x int) int { return a*x + b //클로저이므로 함수를 호출 할 때 마다 변수 a와 b의 값을 사용할 수 있음 } //익명 함수를 리턴 } func main() { f := calc() // calc 함수를 실행해 리턴값으로 나온 클로저를 변수에 저장. 원래 calc()의 지역변수 a,b는 f에 저장과 동시에 소멸된다. i := 1 for i <= 5 { fmt.Println(f(i)) //클로저를 사용하면 지역변수가 소멸되지 않고, 함수를 호출할 때마다 계속 사용할 수 있다. 클로저는 함수가 선언될 때의 환경을 계속 유지한다. i++ } } */ //defer 함수명() --> 지연호출 //defer 함수명(매개변수) /* func helloworld() { defer func() { //helloworld()가 끝나기 직전에 호출 fmt.Println("world") }() func() { fmt.Println("Hello") }() } func ReadHello() { file, err := os.Open("hello.txt") defer file.Close() //지연 호ㅗ출한 file.Close가 맨 마지막에 호출됨 if err != nil { fmt.Println(err) return //file.Close()호출 } buf := make([]byte, 100) if _, err = file.Read(buf); err != nil { fmt.Println(err) return //file.Close() 호출 } fmt.Println(string(buf)) //file.Close()호츌 } func main() { helloworld() //defer는 stack과 동일하다(LIFO) --> 맨 나중에 지연 호출한 함수가 먼저 실행됨. for i := 0; i < 5; i++ { defer fmt.Printf("%d ", i) } ReadHello() } */ //panic and recover /*func main() { a := [...]int{1, 2} for i := 0; i < 3; i++ { fmt.Println(a[i]) //index out of range } } */ //panic(에러_메시지) 사용시 사용자가 에러 발생 시킬 수 있음 /*func main() { panic("Error !!") fmt.Println("Hello, world!") //실행되지 않음. } */ //변수 := recover() //recover()은 panic에서 설정한 error message를 받아올 수 있음. /* func f() { defer func() { //recover()은 defer로 사용해야 함. 그렇지 않으면 프로그램이 바로 종료됨. s := recover() //패닉이 발생해도 프로그램을 종료하지 않음. fmt.Println(s) }() a := [...]int{1, 2} for i := 0; i < 5; i++ { fmt.Println(a[i]) } } func main() { f() fmt.Println("Hello World!") // 패닉이 발생했지만 계속 실행됨 } */ /* func compute(fn func(float64, float64) float64) float64 { return fn(3, 4) } func main() { //1. 함수를 변수에 할당해, 변수를 함수처럼 씁니다. hypot := func(x, y float64) float64 { return math.Sqrt(x*x + y*y) } fmt.Println("1. 변수를 통해 함수 호출", hypot(5, 12)) //2. 함수를 compute 함수에 인자로 전달한다. fmt.Println("2. 함수를 함수에 인자로 전달") fmt.Println("comput(hypot):\t\t", compute(hypot)) fmt.Println("comput(math.Pow):\t", compute(math.Pow)) } */ /* func adder() func(int) int { sum := 0 return func(x int) int { sum += x return sum } } func main() { //adder는 클로져를 리턴, 클로져 pos, neg는 서로 다른 변수 sum을 갖음 pos, neg := adder(), adder() for i := 0; i < 10; i++ { fmt.Println( i, ":", pos(i), neg(-2*i), ) } } */ /* type Vertex struct { X, Y float64 } //1. abs method는 리시버 인자로 v Vertex를 받는다. func (v Vertex) Abs() float64 { return math.Sqrt(v.X*v.X + v.Y*v.Y) } //2. 기본형 타입(여기는 float64)도 메소드를 만들 수 있습니다. type MyFloat float64 func (f MyFloat) Abs() float64 { if f < 0 { return float64(-f) } return float64(f) } //3. MyFloat이 포인터가 아닌 리시버 인자이다. func (f MyFloat) power10() { f = f * MyFloat(10) } //4. MyFloat이 포인터 리시버 인자이다. func (f *MyFloat) power100() { *f = *f * MyFloat(100) } func main() { v := Vertex{3, 4} fmt.Println("1. 점을 씩어 메소드에 접근한다.") fmt.Println("v.Abs():", v.Abs()) f := MyFloat(-math.Sqrt2) fmt.Println("2. numeric type도 메소드 정의가 가능하다.") fmt.Println("f.Abs():", f.Abs()) fmt.Println("3. pointer 리시버를 쓰면 메소드 내부에서 값을 바꿀 수 있다.") fmt.Println("기존의 f\t\t\t\t", f) f.power10() fmt.Println("일반 리시버를 써서 10을 곱한 경우\t", f) f.power100() fmt.Println("포인터 리시버를 써서 100을 곱한 경우\t", f) } */ /* type Vertex struct { X, Y float64 } //1. Vertex pointer 리시버가 있는 메소드이다. Vertex or Vertex pointer로 접근 가능하다. func (v *Vertex) Scale(f float64) { v.X = v.X * f v.Y = v.Y * f } //2. Vertex pointer 인자가 있는 함수다. //Vertex pointer만 인자로 들어올 수 있다. func ScaleFunc(v *Vertex, f float64) { v.X = v.X * f v.Y = v.Y * f } func main() { v := Vertex{3, 4} v.Scale(2) ScaleFunc(&v, 10) fmt.Println("1. Vertex{3,4}로 접근했을 떄:", v) p := &Vertex{3, 4} p.Scale(2) ScaleFunc(p, 10) fmt.Println("2. &Vertex{3,4}로 접근했을 때:", p) } */ /* type I interface { M() } type T struct { S string } //1. 별도의 키워드를 쓰지 않아도 T가 인터페이스 I를 구현하게 된다. func (t *T) M() { fmt.Println(t.S) } type F float64 //2. 별도의 키워드를 쓰지 않아도 F가 인터페이스 I를 구현하게 된다. func (f F) M() { fmt.Println(f) } func main() { var i I fmt.Println("1. i = &T{\"Hello\"}에 대해") i = &T{"Hello"} describe(i) i.M() fmt.Println("2. i = F(math.Pi)") i = F(math.Pi) describe(i) i.M() } func describe(i I) { fmt.Printf("인터페이스의 (값, 타입) : (%v, %T)\n", i, i) } */ /* func main() { fmt.Println("1. empty interface에 대해") var i interface{} describe(i) fmt.Println("1. i = 42에 대해") i = 42 describe(i) fmt.Println("1. i = \"hello\"에 대해") i = "hello" describe(i) } func describe(i interface{}) { fmt.Printf("인터페이스 i의 (값, 타입) : (%v, %T)\n", i, i) } */ /* func say(s string) { for i := 0; i < 5; i++ { time.Sleep(100 * time.Millisecond) fmt.Println(s) } } func main() { go say("2. 다른 루틴") say("1. 이 루틴") } */ //-------------------------------------------------------------- //json to map /* func main() { doc := ` { "u_id" : "byeoungwoolee", "u_pw": "1234" } ` var data map[string]interface{} //JSON 문서의 데이터를 저장할 공간을 map으로 선언. json.Unmarshal([]byte(doc), &data) //doc을 바이트 슬라이스로 변환하여 넣고, //data의 포인터를 넣어줌. fmt.Println(data["u_id"], data["u_pw"]) //byeoungwoolee 1234 : map에 key를 지정해 값을 가져옴 } */ //map to json /* func main() { data := make(map[string]interface{}) //문자열을 key로 하고, 모든 자료형을 저장할 수 있는 map 생성 data["u_id"] = "byeoungwoolee" data["u_pw"] = "1234" //doc, _ := json.Marshal(data) //map을 JSON 문서로 변환 --> json.Marshal()은 []byte형식으로 리턴됨. doc, _ := json.MarshalIndent(data, "", " ") //JSON 문서로 변환 시 한 줄로 붙어서 나오면 읽기 힘들기 때문에 //json.MarshalIndent()를 사용. //json.MarshalIndent(JSON 문서로 만들 데이터, JSON 문서의 첫 칸에 표시할 문자열.(보통 빈 문자 지정), 들여쓰기 할 문자(공백문자나 탭 문자)) fmt.Println(string(doc)) // {"u_id":"byeoungwoolee", "u_pw":"1234"} : []byte형식을 string으로 변환해서 출력 } */ //json struct //json to data /* type Author struct { Name string Email string } type Comment struct { ID uint64 Author Author //Author 구조체 Content string } type Article struct { //JSON 은 필드 안에 다시 Object나 배열이 들어가는 방식이므로 이에 맞추어 다른 구조체를 넣어준다. //하위 객체면 구조체를 그대로 넣고, 배열이라면 구조체를 배열로 만든다. ID uint64 Title string Author Author //Author 구조체 Content string Recommends []string //문자열 배열 Comments []Comment //Comment 구조체 배열 } func main() { doc := ` [{ "ID": 1, "Title": "Hello, World", "Author":{ "Name": "Maria", "Email": "maria@example.com" }, "Content": "Hello~", "Recommends": [ "John", "Andrew" ], "Comments": [{ "id":1, "Author":{ "Name": "Andrew", "Email": "andrew@hello.com" }, "Content": "Hello Maria" }] }] ` var data []Article //JSON 문서의 DATA를 저장할 구조체 slice 선언 json.Unmarshal([]byte(doc), &data) //doc의 내용을 변환하여 data에 저장 fmt.Println(data) //[{1 Hello, world! {Maria maria@example.com....}}] } */ //data to json //*구조체 필드가 대문자로 시작하면 --> json 문서 안의 키도 대문자로 시작 //JSON 문서 안의 키를 소문자로 시작하고 싶다면 --> 구조체 필드에 태그 지정. /* type Author struct { Name string `json:"name"` Email string `json:"email"` } type Comment struct { Id uint64 `json:"id"` Author Author `json:"author"` //Author 구조체 Content string `json:"content"` } type Article struct { Id uint64 `json:"id"` Title string `json:"title"` Author Author `json:"author"` //Author 구조체 Content string `json:"content"` Recommends []string `json:"recommends"` //문자열 배열 Comments []Comment `json:"comments"` //comment 구조체 배열 } func main() { data := make([]Article, 1) data[0].Id = 1 data[0].Title = "Hello, World!" data[0].Author.Name = "Maria" data[0].Author.Email = "maria@example.com" data[0].Content = "Hello~" data[0].Recommends = []string{"John", "Andrew"} data[0].Comments = make([]Comment, 1) //게시물 데이터를 저장할 슬라이스 선언 후, make함수로 공간 할당. //data 채워넣기 data[0].Comments[0].Id = 1 data[0].Comments[0].Author.Name = "Andrew" data[0].Comments[0].Author.Email = "andrew@hello.com" data[0].Comments[0].Content = "Hello Maria" doc, _ := json.MarshalIndent(data, "", " ") //data를 JSON 문서로 변환 fmt.Println(string(doc)) //[{"Id":1,"Title":"Hello, World!",...}] } */ //json file 저장하기 /* type Author struct { Name string `json:"name"` Email string `json:"email"` } type Comment struct { Id uint64 `json:"id"` Author Author `json:"author"` //Author 구조체 Content string `json:"content"` } type Article struct { Id uint64 `json:"id"` Title string `json:"title"` Author Author `json:"author"` //Author 구조체 Content string `json:"content"` Recommends []string `json:"recommends"` //문자열 배열 Comments []Comment `json:"comments"` //comment 구조체 배열 } func main() { data := make([]Article, 1) //값을 저장할 구조체 슬라이스 생성 data[0].Id = 1 data[0].Title = "Hello, World!" data[0].Author.Name = "Maria" data[0].Author.Email = "maria@example.com" data[0].Content = "Hello~" data[0].Recommends = []string{"John", "Andrew"} data[0].Comments = make([]Comment, 1) //게시물 데이터를 저장할 슬라이스 선언 후, make함수로 공간 할당. //data 채워넣기 data[0].Comments[0].Id = 1 data[0].Comments[0].Author.Name = "Andrew" data[0].Comments[0].Author.Email = "andrew@hello.com" data[0].Comments[0].Content = "Hello Maria" doc, _ := json.MarshalIndent(data, "", " ") //data를 JSON 문서로 변환 //ioutile.WriteFile(파일 명, json문서, unix/linux type의 file permission(os.FileMode)) err := ioutil.WriteFile("./articles.json", doc, os.FileMode(0644)) //articles.json file에 JSON 문서 저장 if err != nil { fmt.Println(err) return } } */ //저장된 json file 읽어오기 /* type Author struct { Name string `json:"name"` Email string `json:"email"` } type Comment struct { Id uint64 `json:"id"` Author Author `json:"author"` //Author 구조체 Content string `json:"content"` } type Article struct { Id uint64 `json:"id"` Title string `json:"title"` Author Author `json:"author"` //Author 구조체 Content string `json:"content"` Recommends []string `json:"recommends"` //문자열 배열 Comments []Comment `json:"comments"` //comment 구조체 배열 } func main() { b, err := ioutil.ReadFile("./articles.json") //articles.json file의 내용르 읽어서 byte slice에 저장 if err != nil { fmt.Println(err) return } var data []Article //JSON 문서의 데이터를 저장할 구조체 슬라이스 선언 json.Unmarshal(b, &data) //JSON 문서의 내용을 변환해 data에 저장. fmt.Println(data) //[{1 Hello, world! {Maria maria@exa..}}] } */ //암호화 사용하기 //Hash algorithm --> MD5, SHA1, SHA256, SHA512등 DATA에서 고유한 HASH값 추출. pw저장시 사용 //func New() hash.Hash : SHA 512 해시 인스턴스 생성 //func Sum512(data []byte) [Size]byte : SHA512 해시를 계산해 리턴 //func (d *digest) Write(p []byte) (nn int, err error) : 해시 인스턴스에 데이터 추가 //func (d0 *digest) Sum(in []byte) []byte : 해시 인스턴스에 저장된 데이터의 SHA512 해시 값 추출 //SHA512 Algorithm을 사용해 data에서 hash value 추출 /* func main() { s := "Hello World!" h1 := sha512.Sum512([]byte(s)) fmt.Printf("%x\n", h1) sha := sha512.New() //SHA512 HASH INSTANCE 생성 sha.Write([]byte("Hello, ")) //HASH INSTANCE에 DATA 추가 sha.Write([]byte("world!")) //HASH INSTANCE에 DATA 추가 h2 := sha.Sum(nil) //HASH INSTANCE에 저장된 DATA의 SHA512 HASH 값 추출 fmt.Printf("%x\n", h2) } */ //AES 대칭키 알고리즘 사용하기 crypto/aes //AES는 BLOCK 암호화 알고리즘이므로 KEY와 DATA의 크기가 일정해햐 한다. //길이가 긴 데이터는 16byte씩 잘라서 암호화함. --> ECB(Electronic Codebook) /* func main() { key := "Hello, key 12345" //16byte s := "Hello, world! 12" //16byte block, err := aes.NewCipher([]byte(key)) //AES 대칭키 암호화 블럭 생성 if err != nil { fmt.Println(err) return //암호화 블록(cipher.Block)이 return. } ciphertext := make([]byte, len(s)) block.Encrypt(ciphertext, []byte(s)) //평문을 AES 알고리즘으로 암호화. Encrypt(암호화된_데이터를_저장할_슬라이스, data) fmt.Printf("%x\n", ciphertext) plaintext := make([]byte, len(s)) block.Decrypt(plaintext, ciphertext) //AES 알고리즘으로 암호화된 데이터를 평문으로 복호화 fmt.Println(string(plaintext)) } */ //CBC(Cipher Block Chaining) : 긴 데이터를 안전하게 암호화하기 위해 사용. //--------------------------------------------------------------------- /* func Listen(net, laddr string) (Listener, error): 프로토콜, IP 주소, 포트 번호를 설정하여 네트워크 연결을 대기합니다. func (l *TCPListener) Accept() (Conn, error): 클라이언트가 연결되면 TCP 연결(커넥션)을 리턴 func (l *TCPListener) Close() error: TCP 연결 대기를 닫음 func (c *TCPConn) Read(b []byte) (int, error): 받은 데이터를 읽음 func (c *TCPConn) Write(b []byte) (int, error): 데이터를 보냄 func (c *TCPConn) Close() error: TCP 연결을 닫음 */ /* func requestHandler(c net.Conn) { //client에서 받은 패킷을 처리하고, client로 패킷을 보내는 함수. data := make([]byte, 4096) //4096 크기의 byte slice 생성 for { //무한 loop 을 돌면서 client에서 보낸 data를 읽어서 다시 client로 보내는 구조. n, err := c.Read(data) //TCP 연결 'c'에서 Read()함수로 client에서 보낸 data 읽음 if err != nil { fmt.Println(err) return } fmt.Println(string(data[:n])) //data 출력 _, err = c.Write(data[:n]) //TCP 연결 'c'에서 Write()함수를 사용해 client로 data를 본냄. if err != nil { fmt.Println(err) return } } } func main() { ln, err := net.Listen("tcp", ":8000") //net.Listen()함수에 tcp와 포트번호설정해 TCP 프로토콜에 8000 포트로 연결 받음. if err != nil { fmt.Println(err) return } defer ln.Close() //tcp 연결 대기 'ln'은 서버가 끝나면 닫음. for { // 무한 loop을 돌면서 client의 연결을 기다림. conn, err := ln.Accept() //client가 연결되면 TCP 연결(conn) 리턴 if err != nil { fmt.Println(err) continue } defer conn.Close() //main함수가 끝나기 직전에 TCP 연결을 닫음. go requestHandler(conn) // 그 뒤 패킷을 처리할 requestHandler()함수를 고루틴으로 ㅗ실행. } } */ /* func hello(n *int) { *n = 2 } func main() { var n int = 1 hello(&n) fmt.Println(n) } */ /*type Rectangle struct { width, height int } func main() { var rect Rectangle var rect1 *Rectangle //구조체 pointer 선선 rect1 = new(Rectangle) //구조체 pointer에 메모리 할당 rect2 := new(Rectangle) //구조체 pointer 선언과 동시에 memory 할당 var rect1 Rectangle = Rectangle{10, 20} //구조체 인스턴스 생성하면서 값 초기화 rect2 := Rectangle{45, 62} //var keyword 생략. 구조체 인스턴스 생성하면서 값 초기화 rect3 := Rectangle{width: 30, height: 15} //구조체 필드를 지정해 값 초기화 var rect1 Rectangle //구조체 인스턴스 생성 var rect2 *Rectangle = new(Rectangle) //구조체 포인터 선언 후 메모리 할당 rect1.height = 20 rect2.height = 62 fmt.Println(rect1) // {0 20} fmt.Println(rect2) //&{0 62} --> 구조체 포인터이므로 앞에 & 붙음 } */ /* type Rectangle struct { width, height int } func NewRectangle(width, height int) *Rectangle { return &Rectangle{width, height} //구조체 인스턴스 생성한 뒤 pointer 리턴 } func main() { //rect := NewRectangle(10, 20) rect := &Rectangle{20, 10} fmt.Println(rect) } */ /* type Rectangle struct { width, height int } func rectangleArea(rect *Rectangle) int { return rect.width * rect.height } func rectangleScaleA(rect *Rectangle, factor int) { //매개변수로 구조체 포인터를 받음 rect.width = rect.width * factor //pointer이므로 원래의 값이 변경됨 rect.height = rect.height * factor //pointer이므로 원래의 값이 변경됨 } func rectangleScaleB(rect Rectangle, factor int) { //매개변수로 구조체 인스턴스를 받음 rect.width = rect.width * factor //값이 복사되었으므로 원래의 값에는 영향을 미치지 않음 rect.height = rect.height * factor //값이 복사되었으므로 원래의 값에는 영향을 미치지 않음 } func main() { rect1 := Rectangle{30, 30} //area := rectangleArea(&rect) //구조체의 pointer를 넘김 //fmt.Println(area) rectangleScaleA(&rect1, 10) fmt.Println(rect1) rect2 := Rectangle{30, 30} rectangleScaleB(rect2, 10) fmt.Println(rect2) } */ //goroutine /* func hello(n int) { r := rand.Intn(100) //랜덤한 숫자 생성 time.Sleep(time.Duration(r)) //랜덤한 시간동안 대기ㅣ fmt.Println(n) //n출력 } func main() { for i := 0; i < 100; i++ { //100번 반복해 go hello(i) //함수를 고루틴으로 실행(고루틴 100개 생) } fmt.Scanln() //main 함수가 종료되지 않도록 대기 } */ //multicore 사용하기 /* func main() { runtime.GOMAXPROCS(runtime.NumCPU()) //runtime.NumCPU()함수로 CPU 개수를 구한 뒤 runtime.GOMAXPROCS함수에 사용할 최대 CPU 개수 생성 //runtime.GOMAXPROCS 함수는 CPU 코어 개수를 구하지 않고, 특정 값을 설정해도 됨 //runtime.GOMAXPROCS 함수에 0을 넣으면 설정 값은 바꾸지 않으며 현재 설정 값만 리턴 fmt.Println(runtime.GOMAXPROCS(0)) s := "Hello, world!" for i := 0; i < 100; i++ { go func(n int) { fmt.Println(s, n) }(i) } fmt.Scanln() } */ //closure /* func main() { runtime.GOMAXPROCS(1) //CPU 하나만 사용 s := "Hello, world!" for i := 0; i < 100; i++ { go func(n int) { //익명함수를 고루틴으로 실행(클로저) fmt.Println(s, n) //s와 매개변수로 받은 n값 출력 }(i) go func() { fmt.Println(s, i) }() } fmt.Scanln() } */ //Channel --> make(chan 자료형) /* func sum(a int, b int, c chan int) { c <- a + b //channel에 a+b를 보냄 } func main() { // c := make(chan int) //int형 channel 생성. channel을 사용하기 전에는 반드시 make함수로 공간을 할당해야 함. ==> synchronous channel 생성됨 var c chan int //chan int형 변수 선언 c = make(chan int) go sum(1, 2, c) //sum을 고루틴으로 실행한 뒤 채널을 매개변수로 넘겨줌 n := <-c //channel에서 값을 꺼낸 뒤 n에 대입 //<-c는 채널에서 값이 들어올 떄까지 대기. 이 후 채널에 값이 들어오면 대기를 끝내고 다음 코드 실행 ==> 값을 주고 받는 동시에 동기화 역할까지 수행 fmt.Println(n) } */ //동기 채널 /* func main() { done := make(chan bool) //동기 채널 생성 count := 5 //반복할 회시수 go func() { for i := 0; i < count; i++ { done <- true //goroutine에 true 보냄. 값을 꺼낼 떄까지 대기. 따라서 반복문도 실행되지 않으므로 '고루틴:숫자'가 계속해서 출력되지 않음. fmt.Println("고루틴 : ", i) //반복문의 변수 출력 time.Sleep(1 * time.Second) //1초 대기 } }() for i := 0; i < count; i++ { <-done //channel에 값이 들어올 떄까지 대기, 값을 꺼냄. fmt.Println("메인 함수 : ", i) //반복문의 변수 출력 } //고루틴 -> 메인함수 -> 고루틴 -> 메인함수 순으로 실행됨 } */ //채널 버퍼링 //채널에 버퍼를 1개 이상 설정하면 비동기ㅣ 채널이 생성됨. //비동기 채널은 보내는 쪽에서 버퍼가 가득차면 실행을 멈추고 //대기하며 받는 쪽에서는 버퍼에 값이 없으면 대기한다. //고루틴 생성후 반복문을 실행할 때마다 채널 done에 true값을 보냄 /* func main() { runtime.GOMAXPROCS(1) done := make(chan bool, 2) //buffer가 2개인 비동기 채널 생성 count := 4 //반복할 횟수 go func() { for i := 0; i < count; i++ { done <- true //channel에 true 보냄. buffer가 가득차면 대기 //비동기 채널이므로 버퍼가 가득찰떄까지 값을 계속 보냄. //여기서는 채널의 버퍼가 2이므로 done에 true를 2번 보낸 뒤 다음 lop에서 대기. fmt.Println("고루틴 : ", i) } }() for i := 0; i < count; i++ { <-done //buffer에 값이 없으면 대기, 값을 꺼냄. //비동기 채널에 버퍼가 2개. 이미 값이 2개 들어있ㅇ음. --> loop을 2번 반복하며 <-done에서 값을 꺼냄. //이후 채널이 비었으므로 실행을 멈추고 대기. //이후 다시 고루틴쪽에서 값을 두번 보내고, 메인에서 2번 꺼냄. // 고루틴 -> 고루틴 -> 메인 -> 메인 순으로 실행 fmt.Println("메인 함수 : ", i) //반복문의 변수 출력 } } */ //range, close //이미 닫힌 채널에 값을 보내면 패닉 발생 //채널을 닫으면 range loop 종료 //채널이 열려있고, 값이 들어오지 않는다면 range는 실행되지 않고 계속 대기. //만약 다른 곳에서 채널에 값을 보냈다면(채널에 값이 들어오면) 그때부터 range 계속 반복됨 // range는 채널에 값이 몇개나 들어올지 모르기 때문에 값이 들어올 때마다 계속 꺼내기 위해 사용. /* func main() { c := make(chan int) //int 형 채널 생성 go func() { for i := 0; i < 5; i++ { c <- i //채널에 값을 보냄 } close(c) }() for i := range c { //range 이용해 채널이 닫힐 때까지 반복해서 값을 꺼냄 fmt.Println(i) //꺼낸 값 출력 } } */ //mutex //RWMutext: 읽기/쓰기 mutex. 읽기와 쓰기 동작을 나누어 lock을 걸 수 있다. //Cond : 조건 변수(condition variable)입니다. 대기하고 있는 하나의 객체를 깨울 수도 있고 여러 개를 동시에 깨울 수도 있습니다. //Once: 특정 함수를 딱 한 번만 실행할 때 사용합니다. //Pool: 멀티 스레드(고루틴)에서 사용할 수 있는 객체 풀입니다. 자주 사용하는 객체를 풀에 보관했다가 다시 사용합니다. //WaitGroup: 고루틴이 모두 끝날 때까지 기다리는 기능입니다. //Atomic: 원자적 연산이라고도 하며 더 이상 쪼갤 수 없는 연산이라는 뜻입니다. 멀티 스레드(고루틴), 멀티코어 환경에서 안전하게 값을 연산하는 기능입니다. //sync.Mutex //func(m *Mutex) Lock() : mutex 잠금 //func(m *Mutex) Unlock() : mutex 잠금 해제 /* func main() { runtime.GOMAXPROCS(runtime.NumCPU()) //모든 CPU사용 var data = []int{} //int형 slice 생성 var mutext = new(sync.Mutex) go func() { //goroutine에서 for i := 0; i < 1000; i++ { //1000번 반복하며 mutext.Lock() //mutex 잠금, data slice 보호 시작 data = append(data, 1) //dataslice에 1 추가 mutext.Unlock() //mutex 잠금 해제. data slice 보호 종료 runtime.Gosched() // 다른 goroutine이 cpu를 사용할 수 있도록 양보 } }() go func() { for i := 0; i < 1000; i++ { mutext.Lock() data = append(data, 1) runtime.Gosched() mutext.Unlock() } }() time.Sleep(2 * time.Second) fmt.Println(len(data)) } */ //RWMutex /* func main() { runtime.GOMAXPROCS(runtime.NumCPU()) var data int = 0 var rwMutex = new(sync.RWMutex) //읽기 쓰기 뮤텍스 생성 go func() { //값을 쓰는 고루틴 for i := 0; i < 3; i++ { rwMutex.Lock() //쓰기 뮤텍스 잠금, 쓰기 보호 시작 data += 1 // data에 값 쓰기 fmt.Println("write : ", data) //data값 출력 time.Sleep(10 * time.Millisecond) rwMutex.Unlock() //쓰기 뮤텍스 잠금 해제, 쓰기 보호 종료 } }() go func() { //값을 읽는 고루틴 for i := 0; i < 3; i++ { rwMutex.RLock() //읽기 뮤텍스 잠금, 읽기 보호 시작 fmt.Println("Read1 : ", data) //data 값을 출력(읽기) time.Sleep(1 * time.Second) rwMutex.RUnlock() //읽기 뮤텍스 잠금 해제, 읽기 보호 종료 } }() go func() { //값을 읽는 고루틴 for i := 0; i < 3; i++ { rwMutex.RLock() //읽기 뮤텍스 잠금, 읽기 보호 시작 fmt.Println("Read2 : ", data) //data 값 출력(읽기) time.Sleep(2 * time.Second) rwMutex.RUnlock() //읽기 뮤텍스 잠금, 읽기 보호 시작 } }() time.Sleep(10 * time.Second) //10초동안 프로그램 실행 } */ //조건변수 ==> 대기하고 있는 객체 하나만 깨우너가 여러 개를 동시에 깨울 때 사용 //sync.Cond //func NewCond(l Locker) *Cond: 조건 변수 생성 //func (c *Cond) Wait(): 고루틴 실행을 멈추고 대기 //func (c *Cond) Signal(): 대기하고 있는 고루틴 하나만 깨움 //func (c *Cond) Broadcast(): 대기하고 있는 모든 고루틴을 깨움 /* func main() { runtime.GOMAXPROCS(runtime.NumCPU()) // 모든 CPU 사용 var mutex = new(sync.Mutex) // 뮤텍스 생성 var cond = sync.NewCond(mutex) // 뮤텍스를 이용하여 조건 변수 생성 c := make(chan bool, 3) // 비동기 채널 생성 for i := 0; i < 3; i++ { go func(n int) { // 고루틴 3개 생성 mutex.Lock() // 뮤텍스 잠금, cond.Wait() 보호 시작 c <- true // 채널 c에 true를 보냄 fmt.Println("wait begin : ", n) cond.Wait() // 조건 변수 대기 fmt.Println("wait end : ", n) mutex.Unlock() // 뮤텍스 잠금 해제, cond.Wait() 보호 종료 }(i) } for i := 0; i < 3; i++ { <-c // 채널에서 값을 꺼냄, 고루틴 3개가 모두 실행될 때까지 기다림 } for i := 0; i < 3; i++ { mutex.Lock() // 뮤텍스 잠금, cond.Signal() 보호 시작 fmt.Println("signal : ", i) cond.Signal() // 대기하고 있는 고루틴을 하나씩 깨움 mutex.Unlock() // 뮤텍스 잠금 해제, cond.Signal() 보고 종료 } fmt.Scanln() } */ /* func main() { runtime.GOMAXPROCS(runtime.NumCPU()) // 모든 CPU 사용 var mutex = new(sync.Mutex) // 뮤텍스 생성 var cond = sync.NewCond(mutex) // 뮤텍스를 이용하여 조건 변수 생성 c := make(chan bool, 3) // 비동기 채널 생성 for i := 0; i < 3; i++ { go func(n int) { // 고루틴 3개 생성 mutex.Lock() // 뮤텍스 잠금, cond.Wait() 보호 시작 c <- true // 채널 c에 true를 보냄 fmt.Println("wait begin : ", n) cond.Wait() // 조건 변수 대기 fmt.Println("wait end : ", n) mutex.Unlock() // 뮤텍스 잠금 해제, cond.Wait() 보호 종료 }(i) } for i := 0; i < 3; i++ { <-c // 채널에서 값을 꺼냄, 고루틴 3개가 모두 실행될 때까지 기다림 } mutex.Lock() // 뮤텍스 잠금, cond.Broadcast() 보호 시작 fmt.Println("broadcast") cond.Broadcast() // 대기하고 있는 모든 고루틴을 깨움 mutex.Unlock() // 뮤텍스 잠금 해제, cond.Signal() 보고 종료 fmt.Scanln() } */ /* import ( "crypto/aes" "crypto/cipher" "crypto/rand" "encoding/hex" "fmt" "io" ) func main() { // Must Kept Secret No Hardcoding , This is for Demo purpose. key := "myverystrongpasswordo32bitlength" // IN CBC Must be Block Size of AES (Multiple of 16) // Other WIse Paddign needs to be perfomed plainText := "1234567890123456" if len(plainText)%aes.BlockSize != 0 { panic("plaintext is not a multiple of the block size") } fmt.Printf("Original Text: %s\n", plainText) fmt.Println("====CBC Encryption/ Decryption====") // IV Length Must be equal to Block Size. iv := make([]byte, aes.BlockSize) if _, err := io.ReadFull(rand.Reader, iv); err != nil { panic(err.Error()) } ciphertext := CBCEncrypter(key, plainText, iv, nil) fmt.Printf("CBC Encrypted Text: %s\n", ciphertext) ret := CBCDecrypter(key, ciphertext, iv, nil) fmt.Printf("CBC Decrypted Text: %s\n", ret) } func CBCEncrypter(key string, plaintext string, iv []byte, additionalData []byte) string { block, err := aes.NewCipher([]byte(key)) if err != nil { panic(err) } // include it at the beginning of the ciphertext. ciphertext := make([]byte, aes.BlockSize+len(plaintext)) mode := cipher.NewCBCEncrypter(block, iv) mode.CryptBlocks(ciphertext[aes.BlockSize:], []byte(plaintext)) return hex.EncodeToString(ciphertext) } func CBCDecrypter(key string, ct string, iv []byte, additionalData []byte) string { ciphertext, _ := hex.DecodeString(ct) block, err := aes.NewCipher([]byte(key)) if err != nil { panic(err) } ciphertext = ciphertext[aes.BlockSize:] // CBC mode always works in whole blocks. if len(ciphertext)%aes.BlockSize != 0 { panic("ciphertext is not a multiple of the block size") } mode := cipher.NewCBCDecrypter(block, iv) // CryptBlocks can work in-place if the two arguments are the same. mode.CryptBlocks(ciphertext, ciphertext) s := string(ciphertext[:]) return s } */ /* import ( "fmt" "time" ) func main() { ticker := time.NewTicker(3000 * time.Millisecond) done := make(chan bool) go func() { for { select { case <-done: return case t := <-ticker.C: fmt.Println("Tick at", t) } } }() time.Sleep(160000 * time.Millisecond) ticker.Stop() done <- true fmt.Println("Ticker Stopped") } */ /* import ( "fmt" "time" ) func main() { ticker := time.NewTicker(time.Minute) fmt.Println(time.Now().Unix()) defer ticker.Stop() done := make(chan bool) go func() { time.Sleep(10 * time.Minute) done <- true //채널에 'true' 보냄 }() for { select { case <-done: // fmt.Println("Done!") return case t := <-ticker.C: //채널로부터 값을 받으면 t에 입력 fmt.Println("Current time : ", t) } } } */ import ( "fmt" "sync" ) /* func main() { tables := []string{"policy_list", "traffic_log", "detect_log", "id_manage", "server_status_log"} for _, e := range tables { os.Setenv("table", e) } for _, e := range os.Environ() { pair := strings.Split(e, "=") fmt.Println(pair[0], "=>", pair[1]) } }*/ /* type Data struct { tag string //pool tag buffer []int //data store용 slice } func main() { runtime.GOMAXPROCS(runtime.NumCPU()) //모든 cpu 사용 p := sync.Pool{ //pool 할당 New: func() interface{} { //get 함수 사용시 호출될 함수 정의 단 pool에 객체가 없을 때만 호출되므로 객체를 생성하고, 메모리를 할당하는 code를 작성. pool에 객체가 들어있다면 New field의 함수는 호출되지 않고, 보관된 객체가 return 됨. data := new(Data) //새 memory 할당 data.tag = "new" //tag 설정 data.buffer = make([]int, 10) //slice 공간 할당 return data //할당한 memory(object) return }, } for i := 0; i < 10; i++ { go func() { //goroutine 10개 생성 data := p.Get().(*Data) //pool에서 *Data type으로 data 가져옴. (Type assertion) for index := range data.buffer { data.buffer[index] = rand.Intn(100) //slice에 random 값 저장 } fmt.Println(data) //data 내용 출력 data.tag = "used" //객체가 사용되었다는 tag 설정 p.Put(data) //pool에 객체 보관 }() } for i := 0; i < 10; i++ { go func() { //goroutine 10개 생성 data := p.Get().(*Data) //pool에서 *Data type으로 data 가져옴 n := 0 for index := range data.buffer { data.buffer[index] = n //slice에 짝수 저장 n += 2 } fmt.Println(data) //data 내용 출력 data.tag = "used" //객체가 사용되었다는 태그 설정 p.Put(data) //pool에 객체 보관 }() } fmt.Scanln() } */ type DB struct { bConnected bool } func (c *DB) connect() { c.bConnected = true fmt.Println("[connection completed]") } func (c *DB) query() { if c.bConnected { fmt.Println(" [query completed]") } else { fmt.Println(" [could not query]") } } func main() { pool := sync.Pool{ New: func() interface{} { r := new(DB) r.bConnected = false return r }, } for i := 0; i < 10; i++ { go func() { c := pool.Get().(*DB) if !c.bConnected { c.connect() } c.query() pool.Put(c) }() } fmt.Scanln() } /* func main() { p := sync.Pool{ New: func() interface{} { return make([]int, 0, 10) }, } for n := 0; n < 10; n++ { // slice := make([]int, 0, 10) slice := p.Get().([]int) //pull에서 slice를 받아옴. pull은 interface를 반환하기 때문에 type assertion 필요/ slice = append(slice, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) fmt.Println(slice) // 사용을 마친 슬라이스는 초기화 해 pool에 반환 p.Put(slice[:0]) } } */ /* func main() { wg := sync.WaitGroup{} wg.Add(10) /*slice := make([]int, 0, 10) for n := 0; n < 10; n++ { go func() { slice = append(slice, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) fmt.Println(slice) slice = slice[:0] wg.Done() }() } wg.Wait() p := sync.Pool{ // "A Pool is safe for use by multiple goroutines simultaneously." New: func() interface{} { return make([]int, 0, 10) }, } for n := 0; n < 10; n++ { go func() { slice := p.Get().([]int) slice = append(slice, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10) fmt.Println(slice) p.Put(slice[:0]) wg.Done() }() } wg.Wait() } */
package utils import ( "time" "github.com/pquerna/otp" "github.com/pquerna/otp/totp" ) // 生成TOTP秘钥 func GenerateTOTPSecret(accountName string) (string, error) { key, err := totp.Generate(totp.GenerateOpts{ Issuer: "tpay", AccountName: accountName, Period: 30, Algorithm: otp.AlgorithmSHA512, }) if err != nil { return "", err } // key.Url格式: // otpauth://totp/Example.com:alice@example.com?algorithm=SHA512&digits=6&issuer=Example.com&period=30&secret=XA4HKCWD5AQOQR5HK6INZ3NM36LIGA6C // key.Secret格式: XA4HKCWD5AQOQR5HK6INZ3NM36LIGA6C return key.Secret(), nil } // 验证TOTP密码code func VerifyTOTPPasscode(passcode, secret string) bool { // return totp.Validate(passcode, secret) result, _ := totp.ValidateCustom( passcode, secret, time.Now().UTC(), totp.ValidateOpts{ Period: 30, Skew: 0, Digits: otp.DigitsSix, Algorithm: otp.AlgorithmSHA1, }, ) return result }
package hello import ( "fmt" "net/http" "github.com/gin-gonic/gin" "github.com/wkrzyzanowski/todox-go/server" ) const HELLO_BASE_URL = server.BASE_API_URL + "/hello" type HelloController struct { Endpoints []server.ApiEndpoint } func NewHelloController() *HelloController { return &HelloController{ Endpoints: enpoints, } } func (controller *HelloController) GetEndpoints() []server.ApiEndpoint { return controller.Endpoints } var enpoints = []server.ApiEndpoint{ { HttpMethod: http.MethodGet, RelativePath: HELLO_BASE_URL, HandlerFunc: []gin.HandlerFunc{ GetHello(), }, }, { HttpMethod: http.MethodPost, RelativePath: HELLO_BASE_URL, HandlerFunc: []gin.HandlerFunc{ PostHello(), }, }, } func GetHello() gin.HandlerFunc { return func(ctx *gin.Context) { ctx.JSON(200, gin.H{ "message": "Hello world!", }) } } func PostHello() gin.HandlerFunc { return func(ctx *gin.Context) { jsonData, err := ctx.GetRawData() if err == nil { message := fmt.Sprintf("Hello world! Your message is: %v", string(jsonData)) ctx.JSON(200, gin.H{ "message": message, }) } else { ctx.JSON(404, gin.H{ "message": "Bad Request", }) } } }
package store import ( "fmt" req "github.com/LapinDmitry/ExampleService/internal/store/sqlRequests" gen "github.com/LapinDmitry/ExampleService/third_party/grpcGenerated" "strconv" "time" ) const CommonLayout = "2006-1-2T15:04:05Z" // CreateUser - создать пользователя с набором предметов // func (s *Store) CreateUser(createUser *gen.CreateUserRequest) (*gen.User, error) { tm := time.Now() t := openTransaction(s.db) user, err := t.CreateUser(createUser, tm) if err != nil { return nil, fmt.Errorf("error creating user record! Err(%v)", err) } createItems := createUser.Items for _, createItem := range createItems { createItem.UserId = strconv.Itoa(*user.Id) } items, err := t.CreateItems(createItems, tm) if err != nil { return nil, fmt.Errorf("error creating items records! Err(%v)", err) } t.Commit() return reqUserReqItemsToGenUser(user, items), nil } // UpdateUser - обновить пользователя и набор предметов // func (s *Store) UpdateUser(updateUser *gen.UpdateUserRequest) (*gen.User, error) { tm := time.Now() t := openTransaction(s.db) user, err := t.UpdateUser(updateUser, tm) if err != nil { return nil, fmt.Errorf("error updating user record! Err(%v)", err) } updateItems := updateUser.Items items, err := t.UpdateItems(updateItems, tm) if err != nil { return nil, fmt.Errorf("error updating items records! Err(%v)", err) } t.Commit() return reqUserReqItemsToGenUser(user, items), nil } // преобразует *req.User + []*req.Item = *gen.User func reqUserReqItemsToGenUser(user *req.User, items []*req.Item) *gen.User { genUser := userReqToGen(user) genUser.Items = make([]*gen.Item, len(items)) for i, item := range items { genUser.Items[i] = itemReqToGen(item) } return genUser } // DeleteUser - удалить пользователя и связанные предметы // func (s *Store) DeleteUser(deleteUser *gen.DeleteUserRequest) error { t := openTransaction(s.db) err := t.DeleteUser(deleteUser) if err != nil { return fmt.Errorf("error deleting records! Err(%v)", err) } t.Commit() return nil } // CreateItem - создать предмет с привязкой к пользователю // func (s *Store) CreateItem(createItem *gen.CreateItemRequest) (*gen.Item, error) { tm := time.Now() createItems := []*gen.CreateItemRequest{createItem} t := openTransaction(s.db) items, err := t.CreateItems(createItems, tm) if err != nil { return nil, fmt.Errorf("error creating item records! Err(%v)", err) } t.Commit() return itemReqToGen(items[0]), nil } // UpdateItem - обновить предмет // func (s *Store) UpdateItem(updateItem *gen.UpdateItemRequest) (*gen.Item, error) { tm := time.Now() updateItems := []*gen.UpdateItemRequest{updateItem} t := openTransaction(s.db) items, err := t.UpdateItems(updateItems, tm) if err != nil { return nil, fmt.Errorf("error updating item records! Err(%v)", err) } t.Commit() return itemReqToGen(items[0]), nil }
/* Return the number of even ints in the given array. Note: the % "mod" operator computes the remainder, e.g. 5 % 2 is 1. */ package main import ( "fmt" ) func count_evens(a []int) int { count := 0 for _, v := range a { if v % 2 == 0 { count++ } } return count } func main(){ var status int = 0 if count_evens([]int{1, 3, 4}) == 1 { status += 1 } if count_evens([]int{0}) == 1 { status += 1 } if count_evens([]int{1, 7}) == 0 { status += 1 } if status == 3 { fmt.Println("OK") } else { fmt.Println("NOT OK") } }
package main import ( "fmt" "os" ) func main() { channel := make(chan int) go func() { for i := 0; i < 10; i++ { channel <- i if i == 5 { close(channel) os.Exit(1) } } }() for i := range channel { fmt.Println(i) } }
package profile import ( "fmt" "sort" "sync" "time" ) type ProfileEntry struct { Calls int TotalTime time.Duration } type ProfileToken struct { Name string start time.Time } func (this *ProfileToken) Exit() { g_profiler.mutex.Lock() defer g_profiler.mutex.Unlock() var entry *ProfileEntry var ok bool entry, ok = g_profiler.entries[this.Name] if !ok { entry = &ProfileEntry{} g_profiler.entries[this.Name] = entry } entry.Calls++ entry.TotalTime += time.Since(this.start) } type ProfileResult struct { Name string entry *ProfileEntry } func (this *ProfileResult) Avg() time.Duration { return this.entry.TotalTime / time.Duration(this.entry.Calls) } func (this ProfileResult) String() string { return fmt.Sprintf("%64s %6d, %20s, %20s", this.Name, this.entry.Calls, this.entry.TotalTime, this.Avg()) } type Profiler struct { mutex sync.Mutex entries map[string]*ProfileEntry } var g_profiler = Profiler{entries: make(map[string]*ProfileEntry)} func (this *Profiler) Enter(name string) *ProfileToken { return &ProfileToken{name, time.Now()} } func (this *Profiler) Results() (ret []*ProfileResult) { this.mutex.Lock() defer this.mutex.Unlock() for key, entry := range this.entries { ret = append(ret, &ProfileResult{key, entry}) } return ret } func (this *Profiler) SortByName(isAcsending bool) []*ProfileResult { ret := this.Results() if isAcsending { sort.Slice(ret, func(i, j int) bool { return ret[i].Name < ret[j].Name }) } else { sort.Slice(ret, func(i, j int) bool { return ret[i].Name >= ret[j].Name }) } return ret } func (this *Profiler) SortByTotalTime(isAcsending bool) []*ProfileResult { ret := this.Results() if isAcsending { sort.Slice(ret, func(i, j int) bool { return ret[i].entry.TotalTime < ret[j].entry.TotalTime }) } else { sort.Slice(ret, func(i, j int) bool { return ret[i].entry.TotalTime >= ret[j].entry.TotalTime }) } return ret } func (this *Profiler) SortByAvgTime(isAcsending bool) []*ProfileResult { ret := this.Results() if isAcsending { sort.Slice(ret, func(i, j int) bool { return ret[i].Avg() < ret[j].Avg() }) } else { sort.Slice(ret, func(i, j int) bool { return ret[i].Avg() >= ret[j].Avg() }) } return ret } func (this *Profiler) String() (ret string) { results := this.SortByAvgTime(false) ret = fmt.Sprintf("%64s %6s, %20s, %20s\n", "Name", "Calls", "Total Time", "Average") for _, v := range results { ret += fmt.Sprintf("%s\n", v) } return ret } func Enter(name string) *ProfileToken { return g_profiler.Enter(name) } func Results(name string) []*ProfileResult { return g_profiler.Results() } func SortByName(isAcsending bool) []*ProfileResult { return g_profiler.SortByName(isAcsending) } func SortByTotalTime(isAcsending bool) []*ProfileResult { return g_profiler.SortByTotalTime(isAcsending) } func SortByAvgTime(isAcsending bool) []*ProfileResult { return g_profiler.SortByAvgTime(isAcsending) }
package controllers import ( "net/http" "strconv" "github.com/martinyonathann/bookstore_items-api/domain/items" "github.com/martinyonathann/bookstore_items-api/logger" "github.com/martinyonathann/bookstore_items-api/services" "github.com/martinyonathann/bookstore_items-api/utils/errors" "go.uber.org/zap" "github.com/gin-gonic/gin" ) func getBookId(userIdParam string) (int64, *errors.RestErr) { userID, userErr := strconv.ParseInt(userIdParam, 10, 64) if userErr != nil { return 0, errors.NewBadRequestError("invalid email address") } return userID, nil } func GetAllBook(c *gin.Context) { flagActive := c.Query("flagActive") logger.RequestLog("ReqGetAllBook", zap.Any("flagActive", flagActive)) items, err := services.ItemsService.GetAll(flagActive) if err != nil { c.JSON(err.Status, err) return } logger.ResponseLog("RespGetAllBook", zap.Any("DataResponse", items)) c.JSON(http.StatusOK, items) } func GetBookById(c *gin.Context) { bookID, idErr := getBookId(c.Param("items_id")) if idErr != nil { c.JSON(idErr.Status, idErr) return } result, getErr := services.ItemsService.GetItemByID(bookID) if getErr != nil { c.JSON(getErr.Status, getErr) } c.JSON(http.StatusOK, result) } func CreateBook(c *gin.Context) { var book items.Item if err := c.ShouldBindJSON(&book); err != nil { restErr := errors.NewBadRequestError("invalid json body") c.JSON(restErr.Status, restErr) return } result, saveErr := services.ItemsService.CreateBook(book) if saveErr != nil { c.JSON(saveErr.Status, saveErr) return } c.JSON(http.StatusCreated, result) }
package main import "fmt" func main() { // var colors map[string]string // colors := make(map[string]string) // colors["white"] = "#ffffff" // delete(colors, "white") colors := map[string]string{ "red": "#ff0000", "green": "#4bf745", "white": "#ffffff", } //initalize map m := make(map[string]int) if m == nil { fmt.Println("m is nil") } else { fmt.Println("m is not nil") } m["Udir"] = 10 m["Udit2"] = 20 printM(m) printMap(colors) } func printM(d map[string]int) { for key, value := range d { fmt.Println("key is ", key, " value is ", value) } } func printMap(c map[string]string) { for color, hex := range c { fmt.Println("hex code for", color, "is", hex) } }
package renderer import ( "bytes" "io/ioutil" "reflect" "strings" "text/template" "github.com/Altemista/render/files" "github.com/Altemista/render/renderer/configuration" "github.com/Masterminds/sprig" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) const ( // MissingKeyInvalidOption is the renderer option to continue execution on missing key and print "<no value>" MissingKeyInvalidOption = "missingkey=invalid" // MissingKeyErrorOption is the renderer option to stops execution immediately with an error on missing key MissingKeyErrorOption = "missingkey=error" // MissingKeyZeroOption is the renderer option tries to add default values for missing keys // NOTE: This emits <no value> for types it does not know, which is migitated in the rendered output using strict replacement MissingKeyZeroOption = "missingkey=zero" // LeftDelim is the default left template delimiter LeftDelim = "{{" // RightDelim is the default right template delimiter RightDelim = "}}" ) // Renderer structure holds configuration and options type Renderer struct { configuration *configuration.Configuration options []string tpl *template.Template } // New creates a new renderer with the specified configuration and zero or more options func New(configuration *configuration.Configuration, opts ...string) *Renderer { return &Renderer{ configuration: configuration, options: opts, tpl: template.New("gotpl").Delims(LeftDelim, RightDelim), } } // Partials adds helper templates, functions and variables from the specfied file without rendering it func (r *Renderer) Partials(path string) (*Renderer, error) { input, err := files.ReadInput(path) if err != nil { return r, err } r.tpl, err = r.tpl.New(path).Funcs(r.ExtraFunctions()).Parse(string(input)) return r, err } // SimpleRender is a simple rendering function, also used as a custom template function // to allow in-template recursive rendering, see also Render, RenderWith func (r *Renderer) SimpleRender(rawTemplate string) (string, error) { return r.Render(rawTemplate) } // RenderFile takes a file path as input, sets the ParseName for error messages and // then calls Render() with the file contents func (r *Renderer) RenderFile(path string) (string, error) { bytes, err := ioutil.ReadFile(path) if err != nil { return "", errors.Wrapf(err, "unable to read file at %q", path) } if r.tpl != nil && r.tpl.Tree != nil { r.tpl.ParseName = path } return r.Render(string(bytes)) } // Render is the main rendering function, see also SimpleRender, Configuration and ExtraFunctions func (r *Renderer) Render(rawTemplate string) (string, error) { err := r.Validate() if err != nil { logrus.Errorf("Invalid state; %v", err) return "", err } t, err := r.Parse(rawTemplate, r.ExtraFunctions()) if err != nil { logrus.Errorf("Can't parse the template; %v", err) return "", err } out, err := r.Execute(t) if err != nil { logrus.Errorf("Can't execute the template; %v", err) return "", err } return out, nil } // Validate checks the internal state and returns error if necessary func (r *Renderer) Validate() error { if r.configuration != nil { err := r.configuration.Validate() if err != nil { return err } } else { return errors.New("unexpected 'nil' configuration") } for _, o := range r.options { switch o { case MissingKeyErrorOption: case MissingKeyInvalidOption: case MissingKeyZeroOption: default: return errors.Errorf("unexpected option: '%s', option must be in: '%s'", o, strings.Join([]string{MissingKeyInvalidOption, MissingKeyErrorOption, MissingKeyZeroOption}, ", ")) } } return nil } // Parse is a basic template parsing function func (r *Renderer) Parse(rawTemplate string, extraFunctions template.FuncMap) (*template.Template, error) { return r.tpl. Funcs(extraFunctions). Option(r.options...). Parse(rawTemplate) } // Execute is a basic template execution function func (r *Renderer) Execute(t *template.Template) (string, error) { var buffer bytes.Buffer err := t.Execute(&buffer, r.configuration) if err != nil { retErr := err logrus.Debugf("(%v): %v", reflect.TypeOf(err), err) if e, ok := err.(template.ExecError); ok { retErr = errors.Wrapf(err, "Error evaluating the template named: '%s'", e.Name) } return "", retErr } return strings.Replace(buffer.String(), "<no value>", "", -1), nil } /* ExtraFunctions provides additional template functions to the standard (text/template) ones, it adds sprig functions and custom functions: - render - calls the render from inside of the template, making the renderer recursive - readFile - reads a file from a given path, relative paths are translated to absolute paths, based on root function - root - the root path for rendering, used relative to absolute path translation in any file based operations - toYaml - provides a configuration data structure fragment as a YAML format - gzip - use gzip compression inside the templates, for best results use with b64enc - ungzip - use gzip extraction inside the templates, for best results use with b64dec */ func (r *Renderer) ExtraFunctions() template.FuncMap { extraFunctions := sprig.TxtFuncMap() // Remove dangerous functions delete(extraFunctions, "env") delete(extraFunctions, "expandenv") extraFunctions["include"] = func(name string, data interface{}) (string, error) { buf := bytes.NewBuffer(nil) if err := r.tpl.ExecuteTemplate(buf, name, data); err != nil { return "", err } return buf.String(), nil } extraFunctions["render"] = r.SimpleRender extraFunctions["readFile"] = r.ReadFile extraFunctions["toYaml"] = ToYaml extraFunctions["ungzip"] = Ungzip extraFunctions["gzip"] = Gzip return extraFunctions }
package server import ( "log" "net/http" "github.com/sirupsen/logrus" ) //Server is a struct representing HTTP server type Server struct { mux *http.ServeMux logger *logrus.Logger } //NewServer returns a server with no routes func NewServer() *Server { return &Server{ mux: http.NewServeMux(), logger: logrus.New(), } } //Start ... func (s *Server) Start(config *Config) error { s.configureRoutes() s.configureLogger(config.LogLevel) return http.ListenAndServe(config.BindAddr, s.mux) } func (s *Server) configureRoutes() { //This handler is for static part of the website: homepage and blog. staticHandler := http.FileServer(http.Dir("./web/static/")) s.mux.Handle("/", s.logRequest(staticHandler)) } func (s *Server) configureLogger(lvl string) { level, err := logrus.ParseLevel(lvl) if err != nil { log.Fatal(err) } s.logger.SetLevel(level) }
package oidc import ( "testing" "github.com/stretchr/testify/assert" ) func TestIsSigningAlgLess(t *testing.T) { assert.False(t, isSigningAlgLess(SigningAlgRSAUsingSHA256, SigningAlgRSAUsingSHA256)) assert.False(t, isSigningAlgLess(SigningAlgRSAUsingSHA256, SigningAlgHMACUsingSHA256)) assert.True(t, isSigningAlgLess(SigningAlgHMACUsingSHA256, SigningAlgNone)) assert.True(t, isSigningAlgLess(SigningAlgHMACUsingSHA256, SigningAlgRSAUsingSHA512)) assert.True(t, isSigningAlgLess(SigningAlgHMACUsingSHA256, SigningAlgRSAPSSUsingSHA256)) assert.True(t, isSigningAlgLess(SigningAlgHMACUsingSHA256, SigningAlgECDSAUsingP521AndSHA512)) assert.True(t, isSigningAlgLess(SigningAlgRSAUsingSHA256, SigningAlgECDSAUsingP521AndSHA512)) assert.True(t, isSigningAlgLess(SigningAlgECDSAUsingP521AndSHA512, "JS121")) assert.False(t, isSigningAlgLess("JS121", SigningAlgECDSAUsingP521AndSHA512)) assert.False(t, isSigningAlgLess("JS121", "TS512")) }
package main import ( "flag" "log" "net/http" "os" "github.com/evkuzin/consoleChatWs/server" "github.com/sirupsen/logrus" ) var addr = flag.String("addr", ":8080", "http service address") var logger = &logrus.Logger{ ReportCaller: true, Level: logrus.InfoLevel, Formatter: new(logrus.TextFormatter), Out: os.Stderr, } func serveHome(w http.ResponseWriter, r *http.Request) { log.Println(r.URL) if r.URL.Path != "/" { http.Error(w, "Not found", http.StatusNotFound) return } if r.Method != "GET" { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) return } http.ServeFile(w, r, "home.html") } func main() { flag.Parse() hub := server.NewHub(logger) go hub.Run() http.HandleFunc("/", serveHome) http.HandleFunc("/ws", func(w http.ResponseWriter, r *http.Request) { server.ServeWs(hub, w, r) }) err := http.ListenAndServe(*addr, nil) if err != nil { log.Fatal("ListenAndServe: ", err) } }
package main import "fmt" func reverse_int(n int) int { new_int := 0 for n > 0 { remainder := n % 10 new_int *= 10 new_int += remainder n /= 10 } return new_int } func main() { result := 0 bigPal := 0 for i := 100; i < 1000; i++ { for j := 100; j < 1000; j++ { result = i * j if result == reverse_int(result) && result > bigPal { bigPal = result } } } fmt.Println(bigPal) }
package tsmt import ( "encoding/xml" "github.com/thought-machine/finance-messaging/iso20022" ) type Document05000101 struct { XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:tsmt.050.001.01 Document"` Message *RoleAndBaselineRejectionV01 `xml:"RoleAndBaselnRjctn"` } func (d *Document05000101) AddMessage() *RoleAndBaselineRejectionV01 { d.Message = new(RoleAndBaselineRejectionV01) return d.Message } // Scope // The RoleAndBaselineRejection message is sent by a secondary bank to the matching application if it rejects to join the transaction based on the baseline and the role that it is expected to play. // Usage // The RoleAndBaselineRejection message is sent in response to a message that is a direct request to join a transaction. type RoleAndBaselineRejectionV01 struct { // Identifies the rejection message. RejectionIdentification *iso20022.MessageIdentification1 `xml:"RjctnId"` // Reference to the message that contained the baseline and is rejected. RelatedMessageReference *iso20022.MessageIdentification1 `xml:"RltdMsgRef"` // Unique identification assigned by the matching application to the transaction. // This identification is to be used in any communication between the parties. // TransactionIdentification *iso20022.SimpleIdentificationInformation `xml:"TxId"` // Reason why the user cannot accept the request. RejectionReason *iso20022.Reason2 `xml:"RjctnRsn,omitempty"` } func (r *RoleAndBaselineRejectionV01) AddRejectionIdentification() *iso20022.MessageIdentification1 { r.RejectionIdentification = new(iso20022.MessageIdentification1) return r.RejectionIdentification } func (r *RoleAndBaselineRejectionV01) AddRelatedMessageReference() *iso20022.MessageIdentification1 { r.RelatedMessageReference = new(iso20022.MessageIdentification1) return r.RelatedMessageReference } func (r *RoleAndBaselineRejectionV01) AddTransactionIdentification() *iso20022.SimpleIdentificationInformation { r.TransactionIdentification = new(iso20022.SimpleIdentificationInformation) return r.TransactionIdentification } func (r *RoleAndBaselineRejectionV01) AddRejectionReason() *iso20022.Reason2 { r.RejectionReason = new(iso20022.Reason2) return r.RejectionReason }
package env import ( "context" "net" "net/http" "os" "strings" "github.com/Azure/go-autorest/autorest" "github.com/sirupsen/logrus" "github.com/jim-minter/rp/pkg/env/dev" "github.com/jim-minter/rp/pkg/env/prod" ) type Interface interface { CosmosDB(ctx context.Context) (string, string, error) DNS(ctx context.Context) (string, error) FirstPartyAuthorizer(ctx context.Context) (autorest.Authorizer, error) IsReady() bool ListenTLS(ctx context.Context) (net.Listener, error) Authenticated(h http.Handler) http.Handler } func NewEnv(ctx context.Context, log *logrus.Entry, subscriptionId, resourceGroup string) (Interface, error) { if strings.ToLower(os.Getenv("RP_MODE")) == "development" { log.Warn("running in development mode") return dev.New(ctx, log, subscriptionId, resourceGroup) } return prod.New(ctx, log, subscriptionId, resourceGroup) }
package gominin import ( "errors" "io" "sort" ) type SearchIndex interface { Add(in io.Reader) (Document, error) Search(query string) ([]DocID, error) } type searchIndex struct { store DocumentStore tokenizer Tokenizer termTable TermTable term2positions InvertedIndex } type termIDPosition struct { id TermID pos LocalPosition size int } type termIDPositions []*termIDPosition func NewSearchIndex() SearchIndex { return newSearchIndex() } func newSearchIndex() (si *searchIndex) { si = new(searchIndex) si.store = NewMemoryDocumentStore() si.tokenizer = NewCharTokenizer() si.termTable = NewTermTable() si.term2positions = NewMemoryInvertedIndex() return } func newTermIDPosition(id TermID, pos LocalPosition, size int) (tp *termIDPosition) { tp = new(termIDPosition) tp.id = id tp.pos = pos tp.size = size return } func (si *searchIndex) Add(in io.Reader) (Document, error) { doc := si.store.AddDoc(in, emptyAttrs()) parsed, err := si.parse(doc.GetBytes(), true) if err != nil || len(parsed) == 0 { return nil, err } sort.Sort(parsed) for _, parsedPos := range parsed { si.term2positions.AppendPosition(parsedPos.id, doc.GetGlobalPosition(parsedPos.pos)) } return doc, nil } func (si *searchIndex) Search(query string) ([]DocID, error) { var positions GlobalPositions parsed, err := si.parse([]byte(query), false) if err != nil { return nil, err } for _, termIDPos := range parsed { positions = si.searchPositions(termIDPos, positions) } if len(positions) == 0 { // si.termTable.dump(os.Stderr) // si.term2positions.dump(os.Stderr) return nil, nil } return si.decodeDoc(positions), nil } // termIDPos is query terms. // e.g. [0, 1, 2, 5, ...] // candPositions are candidates from term2positions. // The positions are first searched term location. // Then we check relative positions for each terms. func (si *searchIndex) searchPositions(termIDPos *termIDPosition, candPositions GlobalPositions) GlobalPositions { positions := si.term2positions.FetchPositions(termIDPos.id) nextPositions := make(GlobalPositions, 0) if candPositions == nil { candPositions = make(GlobalPositions, len(positions)) copy(candPositions, positions) for i := range candPositions { candPositions[i] -= GlobalPosition(termIDPos.pos) } } for _, candPos := range candPositions { foundIndex := BinarySearch(positions, func(index int) int { return int(positions[index] - (candPos + GlobalPosition(termIDPos.pos))) }) if foundIndex > -1 { nextPositions = append(nextPositions, candPos) } } return nextPositions } func (si *searchIndex) parse(textBytes []byte, modify bool) (parsed termIDPositions, err error) { parsed = make(termIDPositions, 0) si.tokenizer.Init(textBytes) token, err := si.tokenizer.Next() for err == nil { id := si.termTable.GetID(token.Text(), modify) if id != NotFound { tp := newTermIDPosition(id, LocalPosition(token.Offset()), len(token.Text())) parsed = append(parsed, tp) } else { return nil, errors.New("NotFound") } token, err = si.tokenizer.Next() } return parsed, nil } func (si *searchIndex) decodeDoc(positions GlobalPositions) []DocID { var curID, prevID DocID var docIDs []DocID docIDs = make([]DocID, 0) prevID = InvalidDocID // fmt.Println("decodeDoc positions", positions) for _, globalPos := range positions { curID = si.store.DecodeDocID(globalPos) // fmt.Println("decodeDoc prevID, curID are ", prevID, curID) if prevID != curID { if prevID != InvalidDocID { docIDs = append(docIDs, prevID) } prevID = curID } } if prevID != InvalidDocID { docIDs = append(docIDs, prevID) } return docIDs } // Sort Interface func (tps termIDPositions) Len() int { return len(tps) } func (tps termIDPositions) Less(i, j int) bool { if tps[i].id == tps[j].id { return tps[i].pos < tps[j].pos } return tps[i].id < tps[j].id } func (tps termIDPositions) Swap(i, j int) { tps[i], tps[j] = tps[j], tps[i] } // End of Sort Interface // For BinarySearchList func (list GlobalPositions) Len() int { return len(list) }
/** * 公共配置类,用于加载数据库配置等 * Author: tesion * Date: 20th March 2019 * Note: * redis客户端配置受redis服务器配置影响 */ package config import ( "fmt" "github.com/go-ini/ini" ) const ( DEFAULT_DB_PORT = 3306 DEFAULT_DB_MAX_CONN = 10 DEFAULT_REDIS_PORT = 6379 DEFAULT_REDIS_TIMEOUT = 10 DEFAULT_REDIS_DB = 0 DEFAULT_REDIS_MAX_IDLE = 100 DEFAULT_REDIS_IDLE_TIMEOUT = 0 DEFAULT_REDIS_MAX_ACTIVE = 500 ) type DBConfig struct { Host string Port int User string Password string DB string MaxConn int Driver string } type RedisConfig struct { Host string Port int Password string Timeout int DB int MaxIdle int IdleTimeout int MaxActive int } func NewDBConfig() *DBConfig { return new(DBConfig) } func (cfg *DBConfig) LoadConfig(section, configPath string) error { if cfg == nil { return fmt.Errorf("config obj is null") } config, err := ini.Load(configPath) if err != nil { return err } sec := config.Section(section) if sec == nil { return fmt.Errorf("section(%s) not exist", section) } cfg.Host = sec.Key("host").String() cfg.Port = sec.Key("port").MustInt(DEFAULT_DB_PORT) cfg.User = sec.Key("user").String() cfg.Password = sec.Key("password").String() cfg.DB = sec.Key("db").String() cfg.MaxConn = sec.Key("max_conn").MustInt(DEFAULT_DB_MAX_CONN) cfg.Driver = sec.Key("driver").String() return nil } func (cfg *DBConfig) GetDSN() string { str := "" if cfg != nil { str = fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.DB) } return str } func (cfg *DBConfig) GetDriver() string { if cfg != nil { return cfg.Driver } return "" } func NewRedisConfig() *RedisConfig { return new(RedisConfig) } func (cfg *RedisConfig) LoadConfig(section, path string) error { config, err := ini.Load(path) if err != nil { return fmt.Errorf("load config(%s) error: %s", path, err) } sec := config.Section(section) if sec == nil { return fmt.Errorf("section(%s) not exist", section) } cfg.Host = sec.Key("host").String() cfg.Port = sec.Key("port").MustInt(DEFAULT_REDIS_PORT) cfg.Password = sec.Key("password").String() cfg.Timeout = DEFAULT_REDIS_TIMEOUT key := sec.Key("timeout") if key != nil { cfg.Timeout = key.MustInt(DEFAULT_REDIS_TIMEOUT) } cfg.DB = DEFAULT_REDIS_DB key = sec.Key("db") if key != nil { cfg.DB = key.MustInt(DEFAULT_REDIS_DB) } cfg.MaxIdle = DEFAULT_REDIS_MAX_IDLE key = sec.Key("max_idle") if key != nil { cfg.MaxIdle = key.MustInt(DEFAULT_REDIS_MAX_IDLE) } cfg.IdleTimeout = DEFAULT_REDIS_IDLE_TIMEOUT key = sec.Key("idle_timeout") if key != nil { cfg.IdleTimeout = key.MustInt(DEFAULT_REDIS_IDLE_TIMEOUT) } cfg.MaxActive = DEFAULT_REDIS_MAX_ACTIVE key = sec.Key("max_active") if key != nil { cfg.MaxActive = key.MustInt(DEFAULT_REDIS_MAX_ACTIVE) } return nil }
package main import "fmt" //有一堆桃子,猴子每天吃桃子总数的一半并多吃一个。吃了10天,到第11天只剩一个桃子。问,猴子吃之前,一共是多少个桃子。 func main() { fmt.Println(test(1)) } func test(day int) int { if day > 10 || day < 1 { return 0 } if day == 10 { return 1 } else { return (test(day+1) + 1) * 2 } }
package ltops import "io" // LoadTestOptions defines the possible options when starting a Mattermost load test. type LoadTestOptions struct { ForceBulkLoad bool // force bulk load even if previously loaded ResultsWriter io.Writer // writer to write the results to }
package acciones import ( "FileSystem-LWH/disco/ebr" "FileSystem-LWH/disco/mbr" "FileSystem-LWH/disco/particion" "FileSystem-LWH/util" "bytes" "encoding/binary" "fmt" "os" "strings" "unsafe" ) // Global var masterBootR mbr.MBR // CrearDisco crea el archivo binario func CrearDisco(size int64, path string, name string, unit string) { crearDirectorio(path) // Creacion del archivo file, err := os.Create(path + name) defer func() { file.Close() }() if err != nil { panic(err) } // Asignacion de tamaño especificado por la unidad switch strings.ToLower(unit) { case "k": size *= 1024 case "m": fallthrough default: size *= (1024 * 1024) } // Contenido inicial var finalCharacter [2]byte copy(finalCharacter[:], "\\0") // Tamaño de disco file.Seek(size, 0) // Posicion Byte final var binaryCharacter bytes.Buffer binary.Write(&binaryCharacter, binary.BigEndian, &finalCharacter) escribirBytes(file, binaryCharacter.Bytes()) var masterBoot mbr.MBR // Inserccion del Master Boot Record file.Seek(0, 0) // Posicion Byte inicial masterBoot.Inicializar(size) var binaryMBR bytes.Buffer binary.Write(&binaryMBR, binary.BigEndian, &masterBoot) escribirBytes(file, binaryMBR.Bytes()) } func escribirBytes(file *os.File, bytes []byte) { _, err := file.Write(bytes) if err != nil { panic(err) } } func crearDirectorio(path string) { err := os.MkdirAll(path, os.ModePerm) // os.ModePerm if err != nil { panic(err) } } // LeerMBR Publico func LeerMBR(path string) { file, err := os.OpenFile(path, os.O_RDWR, 0777) defer func() { file.Close() }() if err != nil { panic(">> 'ERROR, NO SE PUDO ENCONTRAR EL ARCHIVO DEL DISCO'") } sizeMBR := int(unsafe.Sizeof(masterBootR)) data := leerBytes(file, sizeMBR) buffer := bytes.NewBuffer(data) err = binary.Read(buffer, binary.BigEndian, &masterBootR) if err != nil { panic(err) } } func leerEBR(path string, start int64) ebr.EBR { file, err := os.OpenFile(path, os.O_RDWR, 0777) defer func() { file.Close() }() if err != nil { panic(">> 'ERROR, NO SE PUDO ENCONTRAR EL ARCHIVO DEL DISCO'\n") } var ebrR ebr.EBR file.Seek(0, 0) file.Seek(start, 0) sizeEBR := int(unsafe.Sizeof(ebrR)) data := leerBytes(file, sizeEBR) buffer := bytes.NewBuffer(data) err = binary.Read(buffer, binary.BigEndian, &ebrR) if err != nil { panic(err) } return ebrR } func leerBytes(file *os.File, number int) []byte { bytes := make([]byte, number) _, err := file.Read(bytes) if err != nil { panic(err) } return bytes } // EliminarDisco remueve el archivo .dsk func EliminarDisco(path string) { err := os.Remove(path) if err != nil { panic(err) } } // CrearParticion crear el struct y lo agrega en el mbr func CrearParticion(size int64, path string, name string, unit string, typeS string, fit string) { LeerMBR(path) buscarParticion(name) // Asignacion de tamaño especificado por la unidad switch strings.ToLower(unit) { case "b": size *= 1 case "m": size *= (1024 * 1024) case "k": fallthrough default: size *= 1024 } // Asignacion de fit switch strings.ToLower(fit) { case "bf": fit = "B" case "ff": fit = "F" case "wf": fallthrough default: fit = "W" } // Asignacion tipo de particion switch strings.ToLower(typeS) { case "e": typeS = "E" case "l": typeS = "L" case "p": fallthrough default: typeS = "P" } if (masterBootR.GetTamanio() - int64(unsafe.Sizeof(masterBootR))) >= size { if typeS == "P" || typeS == "E" { espaciosLibres := buscarEspacioLibre() if len(espaciosLibres) == 0 { panic(">> NO HAY ESPACIO DISPONIBLE EN EL DISCO") } /* switch fit { case "B": // Ordenamiento de la lista de particiones libres de menor a mayor sort.SliceStable(particionesLibres, func(i, j int) bool { return particionesLibres[i].Tamanio > particionesLibres[j].Tamanio }) case "W": // Ordenamiento de la lista de particiones libres de mayor a menor sort.SliceStable(particionesLibres, func(i, j int) bool { return particionesLibres[i].Tamanio < particionesLibres[j].Tamanio }) } */ if typeS == "E" { for _, partition := range masterBootR.GetParticiones() { if string(partition.GetNombre()) != "" && string(partition.GetTipo()) == "E" { panic(">> YA EXISTE UNA PARTICION EXTENDIDA") } } } if masterBootR.Particiones[3].Estado == byte(1) { panic(">> YA EXISTEN 4 PARTICIONES EN EL DISCO") } /* for i, particion := range espaciosLibres { fmt.Println("--------------", i) fmt.Println("INCIO", particion.GetInicio()) fmt.Println("TAMAÑO", particion.GetTamanio()) util.LecturaTeclado() } */ for i, partition := range masterBootR.Particiones { if partition.GetEstado() == byte(0) { for _, part := range espaciosLibres { if part.GetTamanio() >= size { var aux particion.Particion aux.Inicializar(1, byte(typeS[0]), byte(fit[0]), part.Inicio, size, name) masterBootR.Particiones[i] = aux ordernarParticiones() actualizarMBR(path) return } } } } panic(">> LA PARTICION ES MUY GRANDE") } else if typeS == "L" { for _, partition := range masterBootR.GetParticiones() { if string(partition.GetNombre()) != "" && partition.GetTipo() == byte("E"[0]) { ebrR := leerEBR(path, partition.GetInicio()) buscarParticionL(name, path, ebrR) uSizeEBR := int64(unsafe.Sizeof(ebrR)) if uSizeEBR+size <= partition.Tamanio { espacioLibre := int64(0) if ebrR.GetNombre() == "" { if ebrR.GetSiguiente() == 0 { espacioLibre = partition.GetInicio() + partition.GetTamanio() } else { espacioLibre = ebrR.GetSiguiente() - partition.GetInicio() } if uSizeEBR+size <= espacioLibre { ebrR.Inicializar(byte(fit[0]), partition.GetInicio()+uSizeEBR+1, size, ebrR.GetSiguiente(), name) actualizarEBR(path, partition.GetInicio(), ebrR) return } ebrR = leerEBR(path, ebrR.Siguiente) } for ebrR.Siguiente != 0 { espacioLibre = ebrR.GetSiguiente() - 1 - (ebrR.GetInicio() + ebrR.GetTamanio()) if uSizeEBR+size <= espacioLibre { var nuevoEbr ebr.EBR nuevoEbr.Siguiente = ebrR.GetSiguiente() ebrR.Siguiente = ebrR.GetInicio() + ebrR.GetTamanio() + 1 actualizarEBR(path, ebrR.GetInicio()-uSizeEBR-1, ebrR) nuevoEbr.Inicializar(byte(fit[0]), ebrR.GetSiguiente()+uSizeEBR+1, size, nuevoEbr.GetSiguiente(), name) actualizarEBR(path, nuevoEbr.GetInicio()-uSizeEBR-1, nuevoEbr) return } ebrR = leerEBR(path, ebrR.Siguiente) } espacioLibre = ebrR.GetInicio() + ebrR.GetTamanio() + uSizeEBR + size espacioParticion := partition.GetInicio() + partition.GetTamanio() if espacioLibre <= espacioParticion { ebrR.Siguiente = ebrR.GetInicio() + ebrR.GetTamanio() + 1 actualizarEBR(path, ebrR.GetInicio()-uSizeEBR-1, ebrR) ebrR.Inicializar(byte(fit[0]), ebrR.GetSiguiente()+uSizeEBR+1, size, 0, name) actualizarEBR(path, ebrR.GetInicio()-uSizeEBR-1, ebrR) return } panic(">> TAMAÑO DE PARTICION LOGICA MUY GRANDE") } else { panic(">> TAMAÑO DE PARTICION LOGICA EXCEDE AL TAMAÑO DE LA PARTICION EXTENDIDA") } } } panic(">> NO SE ENCONTRO UNA PARTICION EXTENDIDA") } } else { panic(">> LA PARTICION ES MAS GRANDE QUE EL DISCO") } } func ordernarParticiones() { // ORDEN de inicio menor a mayor /* sort.SliceStable(masterBootR.Particiones, func(i, j int) bool { return masterBootR.Particiones[i].Inicio > masterBootR.Particiones[j].Inicio }) */ for i, particion := range masterBootR.Particiones { if particion.GetEstado() == byte(0) { masterBootR.Particiones[i].Inicio = masterBootR.Tamanio } } n := 4 swapped := true for swapped { swapped = false for i := 1; i < n; i++ { if masterBootR.Particiones[i-1].Inicio > masterBootR.Particiones[i].Inicio { masterBootR.Particiones[i], masterBootR.Particiones[i-1] = masterBootR.Particiones[i-1], masterBootR.Particiones[i] swapped = true } } } } func buscarEspacioLibre() []particion.Particion { inicioParticiones := int64(unsafe.Sizeof(masterBootR)) + 1 var espaciosLibres []particion.Particion var particionAux particion.Particion ordernarParticiones() for i, particion := range masterBootR.Particiones { if i == 0 { if particion.GetEstado() == byte(1) { particionAux.Inicio = inicioParticiones particionAux.Tamanio = (particion.GetInicio() - 1) - particionAux.GetInicio() espaciosLibres = append(espaciosLibres, particionAux) } else if particion.GetEstado() == byte(0) { particionAux.Inicio = inicioParticiones particionAux.Tamanio = masterBootR.GetTamanio() - particionAux.GetInicio() espaciosLibres = append(espaciosLibres, particionAux) break } } else if i == 3 { if particion.GetEstado() == byte(1) { particionAux.Inicio = particion.GetInicio() + particion.GetTamanio() + 1 particionAux.Tamanio = masterBootR.GetTamanio() - particionAux.GetInicio() espaciosLibres = append(espaciosLibres, particionAux) } } else if i > 0 && i < 3 { anterior := particionActivaAnterior(i - 1) siguiente := particionActivaSiguiente(i + 1) if particion.GetEstado() == byte(1) { if anterior == -1 { particionAux.Inicio = inicioParticiones particionAux.Tamanio = particion.GetInicio() - 1 - particionAux.GetInicio() espaciosLibres = append(espaciosLibres, particionAux) } else if anterior != -1 { particionAux.Inicio = masterBootR.GetParticion(anterior).Inicio + masterBootR.GetParticion(anterior).GetTamanio() + 1 particionAux.Tamanio = particion.GetInicio() - 1 - particionAux.GetInicio() espaciosLibres = append(espaciosLibres, particionAux) } if siguiente == -1 { particionAux.Inicio = particion.GetInicio() + particion.GetTamanio() + 1 particionAux.Tamanio = masterBootR.GetTamanio() - particionAux.GetInicio() espaciosLibres = append(espaciosLibres, particionAux) } else if siguiente != -1 { particionAux.Inicio = particion.GetInicio() + particion.GetTamanio() + 1 particionAux.Tamanio = masterBootR.GetParticion(siguiente).GetInicio() - 1 - particionAux.GetInicio() espaciosLibres = append(espaciosLibres, particionAux) } } else if particion.GetEstado() == byte(0) { if anterior == -1 && siguiente == -1 { particionAux.Inicio = inicioParticiones particionAux.Tamanio = masterBootR.GetTamanio() - particionAux.Inicio espaciosLibres = append(espaciosLibres, particionAux) break } else if anterior == -1 && siguiente != -1 { particionAux.Inicio = inicioParticiones particionAux.Tamanio = masterBootR.GetParticion(siguiente).GetInicio() - 1 - particionAux.Inicio espaciosLibres = append(espaciosLibres, particionAux) } else if anterior != -1 && siguiente == -1 { particionAux.Inicio = masterBootR.GetParticion(anterior).GetInicio() + masterBootR.GetParticion(anterior).Tamanio + 1 particionAux.Tamanio = masterBootR.GetTamanio() - masterBootR.GetParticion(anterior).GetInicio() - masterBootR.GetParticion(anterior).GetTamanio() + 1 espaciosLibres = append(espaciosLibres, particionAux) } else if anterior != -1 && siguiente != -1 { particionAux.Inicio = masterBootR.GetParticion(anterior).GetInicio() + masterBootR.GetParticion(anterior).GetTamanio() + 1 particionAux.Tamanio = masterBootR.GetParticion(siguiente).GetInicio() - 1 espaciosLibres = append(espaciosLibres, particionAux) } } } } return espaciosLibres } func buscarParticion(nombre string) { for _, partition := range masterBootR.GetParticiones() { if string(partition.GetNombre()) != "" && string(partition.GetNombre()) == nombre { panic(">> YA EXISTE UNA PARTICION CON ESE NOMBRE") } } } func buscarParticionL(name string, path string, ebrR ebr.EBR) { for ebrR.GetSiguiente() != 0 { if ebrR.GetNombre() == name { panic(">> YA EXISTE UNA PARTICION LOGICA CON ESE NOMBRE") } ebrR = leerEBR(path, ebrR.GetSiguiente()) } if ebrR.GetNombre() == name { panic(">> YA EXISTE UNA PARTICION LOGICA CON ESE NOMBRE") } } // BuscarParticionCreada por medio del nombre func BuscarParticionCreada(nombre string, path string) { for _, partition := range masterBootR.GetParticiones() { if string(partition.GetNombre()) != "" && string(partition.GetNombre()) == nombre { return } else if partition.GetTipo() == byte("E"[0]) { ebrR := leerEBR(path, partition.GetInicio()) if ebrR.GetNombre() == nombre { return } for ebrR.GetSiguiente() != 0 { ebrR = leerEBR(path, ebrR.GetSiguiente()) if ebrR.GetNombre() == nombre { return } } } } panic(">> PARTICION NO ECONTRADA") } func actualizarMBR(path string) { file, err := os.OpenFile(path, os.O_RDWR, 0777) defer func() { file.Close() }() if err != nil { panic(">> 'Error al montar disco'\n") } // Inserccion del Master Boot Record file.Seek(0, 0) // Posicion Byte inicial var binaryMBR bytes.Buffer binary.Write(&binaryMBR, binary.BigEndian, &masterBootR) escribirBytes(file, binaryMBR.Bytes()) } func actualizarEBR(path string, pos int64, ebrR ebr.EBR) { file, err := os.OpenFile(path, os.O_RDWR, 0777) defer func() { file.Close() }() if err != nil { panic(">> 'Error al montar disco'\n") } file.Seek(pos, 0) // Posicion Byte inicial var binaryEBR bytes.Buffer binary.Write(&binaryEBR, binary.BigEndian, &ebrR) escribirBytes(file, binaryEBR.Bytes()) } func particionActivaAnterior(posicion int) int { for i := posicion; i >= 0; i-- { if masterBootR.GetParticion(i).GetEstado() == byte(1) { return i } } return -1 } func particionActivaSiguiente(posicion int) int { for i := posicion; i < 4; i++ { if masterBootR.GetParticion(i).GetEstado() == byte(1) { return i } } return -1 } // EliminarParticion realiza el formateo y eliminacion de una particion func EliminarParticion(path string, name string, deleteP string) { LeerMBR(path) for i, partition := range masterBootR.Particiones { if strings.EqualFold(partition.GetNombre(), name) { masterBootR.Particiones[i].Inicializar(byte(0), byte(0), byte(0), partition.GetInicio(), partition.GetTamanio(), "") if strings.EqualFold(deleteP, "FULL") { escribirCeros(path, partition.GetInicio(), partition.GetTamanio()) } inicio := int64(0) fin := int64(0) masterBootR.Particiones[i].Inicio = inicio masterBootR.Particiones[i].Tamanio = fin ordernarParticiones() actualizarMBR(path) fmt.Println(">> PARTICION ELIMINADA CORRECTAMENTE") util.LecturaTeclado() return } else if partition.GetTipo() == byte("E"[0]) { ebrR := leerEBR(path, partition.GetInicio()) if ebrR.GetNombre() != "" || ebrR.GetSiguiente() != 0 { uSizeEBR := int64(unsafe.Sizeof(ebrR)) if ebrR.GetNombre() == name { ebrR.Inicializar(0, ebrR.GetInicio(), 0, ebrR.GetSiguiente(), "") if strings.EqualFold(deleteP, "FULL") { escribirCeros(path, ebrR.GetInicio(), ebrR.GetTamanio()) } ebrR.Inicio = 0 actualizarEBR(path, partition.GetInicio(), ebrR) fmt.Println(">> PARTICION LOGICA INICIAL ELIMINADA CORRECTAMENTE") util.LecturaTeclado() return } ebrAux := leerEBR(path, ebrR.GetSiguiente()) for ebrAux.GetSiguiente() != 0 { if ebrAux.GetNombre() == name { break } ebrR = ebrAux ebrAux = leerEBR(path, ebrAux.GetSiguiente()) } if ebrAux.GetNombre() == name { ebrR.Siguiente = ebrAux.GetSiguiente() actualizarEBR(path, ebrR.GetInicio()-uSizeEBR-1, ebrR) if strings.EqualFold(deleteP, "FULL") { escribirCeros(path, ebrAux.GetInicio()-uSizeEBR-1, ebrAux.GetTamanio()) } fmt.Println(">> PARTICION LOGICA ELIMINADA CORRECTAMENTE") util.LecturaTeclado() return } } } } panic(">> LA PARTICION NO FUE ENCONTRADA") } func escribirCeros(path string, inicio int64, fin int64) { file, err := os.OpenFile(path, os.O_RDWR, 0777) defer func() { file.Close() }() if err != nil { panic(">> 'Error al montar disco'\n") } for i := inicio; i <= (fin + inicio); i++ { var finalCharacter [2]byte copy(finalCharacter[:], "") file.Seek(i, 0) var binaryCharacter bytes.Buffer binary.Write(&binaryCharacter, binary.BigEndian, &finalCharacter) escribirBytes(file, binaryCharacter.Bytes()) } } // CambiarTamanio metodo el cual aumenta o reduce el tamanio de una particion func CambiarTamanio(addT int64, path string, name string, unit string) { LeerMBR(path) switch strings.ToLower(unit) { case "b": addT *= 1 case "m": addT *= (1024 * 1024) case "k": fallthrough default: addT *= 1024 } for i, partition := range masterBootR.Particiones { if strings.EqualFold(partition.GetNombre(), name) { if addT > 0 { posSig := particionActivaSiguiente(i + 1) if posSig == -1 { if partition.GetTamanio() < masterBootR.GetTamanio() && addT <= masterBootR.GetTamanio() { masterBootR.Particiones[i].Tamanio += addT } else { panic(">> EL AUMENTO DE TAMAÑO DE LA PARTICION NO PUEDE SER MAYOR AL TAMAÑO ACTUAL DEL DISCO") } } else { part := partition if (part.GetInicio()+part.GetTamanio()+1) < (masterBootR.GetParticion(posSig).GetInicio()-1) && addT <= (masterBootR.GetParticion(posSig).GetInicio()-1) { masterBootR.Particiones[i].Tamanio += addT } else { panic(">> ERROR, NO SE PUEDE AUMENTAR EL TAMAÑO DE LA PARTICION") } } } else { if partition.GetTamanio() >= (addT * -1) { masterBootR.Particiones[i].Tamanio += addT } else { panic(">> LA REDUCCION DE LA PARTICION NO PUEDE SER MAYOR AL TAMAÑO ACTUAL") } } ordernarParticiones() actualizarMBR(path) return } else if partition.GetTipo() == byte("E"[0]) { ebrR := leerEBR(path, partition.GetInicio()) uSizeEBR := int64(unsafe.Sizeof(ebrR)) for ebrR.GetSiguiente() != 0 { if ebrR.GetNombre() == name { if addT > 0 { espacioLibre := int64(0) if ebrR.GetSiguiente() == 0 { espacioLibre = partition.GetInicio() + partition.GetTamanio() espacioLibre -= (ebrR.GetInicio() + ebrR.GetTamanio()) - 1 } else { espacioLibre = ebrR.GetSiguiente() - (ebrR.GetInicio() + ebrR.GetTamanio()) - 1 } if addT <= espacioLibre { ebrR.Tamanio += addT actualizarEBR(path, ebrR.GetInicio()-uSizeEBR-1, ebrR) return } panic(">> ERROR, NO SE PUEDE AUMENTAR EL TAMAÑO DE LA PARTICION") } else { if ebrR.GetTamanio() >= (addT * -1) { ebrR.Tamanio += addT return } panic(">> LA REDUCCION DE LA PARTICION NO PUEDE SER MAYOR AL TAMAÑO ACTUAL") } } ebrR = leerEBR(path, ebrR.GetSiguiente()) } if ebrR.GetNombre() == name { if addT > 0 { espacioLibre := int64(0) if ebrR.GetSiguiente() == 0 { espacioLibre = partition.GetInicio() + partition.GetTamanio() espacioLibre -= (ebrR.GetInicio() + ebrR.GetTamanio()) - 1 } else { espacioLibre = ebrR.GetSiguiente() - (ebrR.GetInicio() + ebrR.GetTamanio()) - 1 } if addT <= espacioLibre { ebrR.Tamanio += addT actualizarEBR(path, ebrR.GetInicio()-uSizeEBR-1, ebrR) return } panic(">> ERROR, NO SE PUEDE AUMENTAR EL TAMAÑO DE LA PARTICION") } else { if ebrR.GetTamanio() >= (addT * -1) { ebrR.Tamanio += addT return } panic(">> LA REDUCCION DE LA PARTICION NO PUEDE SER MAYOR AL TAMAÑO ACTUAL") } } } } panic(">> LA PARTICION NO FUE ENCONTRADA") }
package api import ( "bytes" "encoding/json" "errors" "fmt" "io/ioutil" "net/http" "net/url" "strconv" "strings" "time" "github.com/makkes/gitlab-cli/config" ) var ErrNotLoggedIn = errors.New("you are not logged in") type Client interface { Get(path string) ([]byte, int, error) Post(path string, body interface{}) ([]byte, int, error) Delete(path string) (int, error) FindProject(nameOrID string) (*Project, error) FindProjectDetails(nameOrID string) ([]byte, error) Login(token, url string) (string, error) GetPipelineDetails(projectID, pipelineID string) ([]byte, error) GetAccessTokens(projectID string) ([]ProjectAccessToken, error) CreateAccessToken(projectID int, name string, expires time.Time, scopes []string) (ProjectAccessToken, error) } var _ Client = &HTTPClient{} type HTTPClient struct { basePath string config config.Config client http.Client } func NewAPIClient(cfg config.Config) *HTTPClient { client := http.Client{} return &HTTPClient{ basePath: "/api/v4", config: cfg, client: client, } } func (c HTTPClient) parse(input string) string { return strings.ReplaceAll(input, "${user}", c.config.Get(config.User)) } func (c HTTPClient) CreateAccessToken(pid int, name string, exp time.Time, scopes []string) (ProjectAccessToken, error) { pat := ProjectAccessToken{ Name: name, ExpiresAt: exp, Scopes: scopes, } res, _, err := c.Post(fmt.Sprintf("/projects/%s/access_tokens", url.PathEscape(strconv.Itoa(pid))), pat) if err != nil { return ProjectAccessToken{}, fmt.Errorf("API request failed: %w", err) } var dec map[string]interface{} if err := json.Unmarshal(res, &dec); err != nil { return ProjectAccessToken{}, fmt.Errorf("failed unmarshalling response: %w", err) } pat, err = decodePAT(dec) if err != nil { return ProjectAccessToken{}, fmt.Errorf("failed decoding response: %w", err) } return pat, nil } func (c HTTPClient) GetAccessTokens(pid string) ([]ProjectAccessToken, error) { resp, _, err := c.Get(fmt.Sprintf("/projects/%s/access_tokens", url.PathEscape(pid))) if err != nil { return nil, err } var decObj []map[string]interface{} err = json.Unmarshal(resp, &decObj) if err != nil { return nil, fmt.Errorf("failed unmarshalling response: %w", err) } atl := make([]ProjectAccessToken, len(decObj)) for idx, obj := range decObj { pat, err := decodePAT(obj) if err != nil { return nil, fmt.Errorf("failed decoding token: %w", err) } atl[idx] = pat } return atl, nil } func decodePAT(obj map[string]interface{}) (ProjectAccessToken, error) { name, ok := obj["name"].(string) if !ok { return ProjectAccessToken{}, fmt.Errorf("failed decoding 'name' field: %v", obj["name"]) } id, ok := obj["id"].(float64) if !ok { return ProjectAccessToken{}, fmt.Errorf("failed decoding 'id' field: %v", obj["id"]) } expires, ok := obj["expires_at"].(string) if !ok { return ProjectAccessToken{}, fmt.Errorf("failed decoding 'expires' field: %v", obj["expires"]) } et, err := time.Parse("2006-01-02", expires) if err != nil { return ProjectAccessToken{}, fmt.Errorf("failed parsing 'expires' field: %w", err) } scopesIf, ok := obj["scopes"].([]interface{}) if !ok { return ProjectAccessToken{}, fmt.Errorf("failed decoding 'scopes' field: %v", obj["scopes"]) } scopes := make([]string, len(scopesIf)) for idx, scopeIf := range scopesIf { scope, ok := scopeIf.(string) if !ok { return ProjectAccessToken{}, fmt.Errorf("failed decoding scope: %v", scopeIf) } scopes[idx] = scope } var token string if obj["token"] != nil { var ok bool token, ok = obj["token"].(string) if !ok { return ProjectAccessToken{}, fmt.Errorf("failed decoding 'token' field: %v", obj["token"]) } } return ProjectAccessToken{ ID: int(id), Name: name, ExpiresAt: et, Scopes: scopes, Token: token, }, nil } func (c *HTTPClient) Login(token, url string) (string, error) { c.config.Set(config.Token, token) c.config.Set(config.URL, url) res, _, err := c.Get("/user") if err != nil { return "", err } var user struct { Username string } err = json.Unmarshal(res, &user) if err != nil { return "", err } c.config.Set(config.User, user.Username) c.config.Cache().Flush() return user.Username, nil } func (c HTTPClient) GetPipelineDetails(projectID, pipelineID string) ([]byte, error) { resp, _, err := c.Get(fmt.Sprintf("/projects/%s/pipelines/%s", url.PathEscape(projectID), url.PathEscape(pipelineID))) if err != nil { return nil, err } return resp, nil } // FindProjectDetails searches for a project by its ID or its name, // with the ID having precedence over the name and returns the // raw JSON object as byte array. func (c HTTPClient) FindProjectDetails(nameOrID string) ([]byte, error) { // first try to get the project by its cached ID if cachedID := c.config.Cache().Get("projects", nameOrID); cachedID != "" { resp, _, err := c.Get("/projects/" + url.PathEscape(cachedID)) if err == nil { return resp, nil } } // then try to find the project by its ID resp, _, err := c.Get("/projects/" + url.PathEscape(nameOrID)) if err == nil { return resp, nil } // now try to find the project by name as a last resort resp, _, err = c.Get("/users/${user}/projects/?search=" + url.QueryEscape(nameOrID)) if err != nil { return nil, err } projects := make([]map[string]interface{}, 0) err = json.Unmarshal(resp, &projects) if err != nil { return nil, err } if len(projects) == 0 { return nil, fmt.Errorf("Project '%s' not found", nameOrID) } c.config.Cache().Put("projects", nameOrID, strconv.Itoa(int((projects[0]["id"].(float64))))) c.config.Write() res, err := json.Marshal(projects[0]) if err != nil { return nil, err } return res, nil } // FindProject searches for a project by its ID or its name, // with the ID having precedence over the name. func (c HTTPClient) FindProject(nameOrID string) (*Project, error) { projectBytes, err := c.FindProjectDetails(nameOrID) if err != nil { return nil, err } var project Project err = json.Unmarshal(projectBytes, &project) if err != nil { return nil, err } return &project, nil } func (c HTTPClient) isLoggedIn() bool { return c.config != nil && c.config.Get(config.URL) != "" && c.config.Get(config.Token) != "" } func (c HTTPClient) Get(path string) ([]byte, int, error) { if !c.isLoggedIn() { return nil, 0, ErrNotLoggedIn } req, err := http.NewRequest("GET", c.config.Get(config.URL)+c.basePath+c.parse(path), nil) if err != nil { return nil, 0, err } req.Header.Add("Private-Token", c.config.Get(config.Token)) resp, err := c.client.Do(req) if err != nil { return nil, 0, err } if resp.StatusCode != http.StatusOK { return nil, resp.StatusCode, fmt.Errorf("%s", resp.Status) } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, resp.StatusCode, err } return body, 0, nil } func (c HTTPClient) Post(path string, reqBody interface{}) ([]byte, int, error) { if !c.isLoggedIn() { return nil, 0, ErrNotLoggedIn } var bodyBuf bytes.Buffer if err := json.NewEncoder(&bodyBuf).Encode(reqBody); err != nil { return nil, 0, fmt.Errorf("could not encode JSON body: %w", err) } req, err := http.NewRequest("POST", c.config.Get(config.URL)+c.basePath+c.parse(path), &bodyBuf) if err != nil { return nil, 0, err } req.Header.Set("Private-Token", c.config.Get(config.Token)) req.Header.Set("Content-Type", "application/json") resp, err := c.client.Do(req) if err != nil { return nil, 0, err } if resp.StatusCode < 200 || resp.StatusCode > 299 { return nil, resp.StatusCode, fmt.Errorf("%s", resp.Status) } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, 0, err } return body, 0, nil } func (c HTTPClient) Delete(path string) (int, error) { if !c.isLoggedIn() { return 0, ErrNotLoggedIn } req, err := http.NewRequest("DELETE", c.config.Get(config.URL)+c.basePath+c.parse(path), nil) if err != nil { return 0, err } req.Header.Add("Private-Token", c.config.Get(config.Token)) resp, err := c.client.Do(req) if err != nil { return 0, err } if resp.StatusCode < 200 || resp.StatusCode > 299 { return resp.StatusCode, fmt.Errorf("%s", resp.Status) } return resp.StatusCode, nil }
package main import ( "html/template" "path/filepath" "net/http" ) var templates map[string]*template.Template func InitTemplates() { if templates == nil { templates = make(map[string]*template.Template) } layouts, err := filepath.Glob("templates/layout/*.tmpl") if err != nil { Log("%v", err) } views, err := filepath.Glob("templates/views/*.tmpl") if err != nil { Log("%v", err) } // Generate our templates map from our layouts/ and includes/ directories for _, view := range views { files := append(layouts, view) templates[filepath.Base(view)] = template.Must(template.ParseFiles(files...)) } } func RenderTemplate(w http.ResponseWriter, name string, data map[string]interface{}) { name += ".tmpl" // Ensure the template exists in the map. tmpl, ok := templates[name] if !ok { Log("The template %s does not exist.", name) return } w.Header().Set("Content-Type", "text/html; charset=utf-8") err := tmpl.ExecuteTemplate(w, "layout", data) if err != nil { Log("%v", err) return } return }
package main import "fmt" /** * author: will fan * created: 2019/6/30 14:22 * description: */ func main() { var x [5]int x[0] = 1 x[4] = 25 fmt.Println("X:", x) x[1] = 10 x[2] = 23 x[3] = 13 fmt.Println("X: ", x) y := [5]int{1,2,3,4,5} fmt.Println("Y: ", y) z := [...]int{6,7,8,9,10} fmt.Println("Z: ", z) fmt.Println("Length of z: ", len(z)) langs := [4]string{0:"Go", 3:"Python"} fmt.Println("Value of langs: ", langs) langs[1] = "Scala" langs[2] = "Java" for _, lang := range langs{ fmt.Println(lang) } }
package my_package func Square(x float64) float64 { return x * x } func Add64(x float64) float64 { return x + 64 } type Result struct { number float64 } func HundredDividedBy(x float64) *Result { if x == 0. {return nil} return &Result {100. / x} }
package backend_dao import ( "2021/yunsongcailu/yunsong_server/dial" "2021/yunsongcailu/yunsong_server/web/web_model" ) type BackendLinkDao interface { // 获取所有链接 FindLinkAll() (linkList []web_model.LinksModel, err error) // 根据ID 修改链接 UpdateLinkById(link web_model.LinksModel) (err error) // 根据ID 修改链接图片 UpdateLinkIcon(id int64,path string) (err error) // 根据ID 删除链接 DeleteLinkById(id int64) (err error) // 添加链接 InsertLink(link web_model.LinksModel) (err error) } type backendLinkDao struct {} func NewBackendLinkDao() BackendLinkDao { return &backendLinkDao{} } // 获取所有链接 func (bld *backendLinkDao) FindLinkAll() (linkList []web_model.LinksModel, err error) { err = dial.DB.Find(&linkList) return } // 根据ID 修改链接 func (bld *backendLinkDao) UpdateLinkById(link web_model.LinksModel) (err error) { _,err = dial.DB.Where("id = ?",link.Id).Cols("link_icon","link_url","sort","link_title").Update(&link) return } // 根据ID 修改链接图片 func (bld *backendLinkDao) UpdateLinkIcon(id int64,path string) (err error) { newLink := new(web_model.LinksModel) newLink.LinkIcon = path _,err = dial.DB.Where("id = ?",id).Cols("link_icon").Update(newLink) return } // 根据ID 删除链接 func (bld *backendLinkDao) DeleteLinkById(id int64) (err error) { newLink := new(web_model.LinksModel) _,err = dial.DB.Where("id = ?",id).Delete(newLink) return } // 添加链接 func (bld *backendLinkDao) InsertLink(link web_model.LinksModel) (err error) { _,err = dial.DB.InsertOne(&link) return }
package main import ( "testing" ) func TestLog(t *testing.T) { logger := &logger{Prefix: "hostname"} logger.warn("asd") // if debug != "debug log" { // t.Error("lalalal") // } }