text stringlengths 11 4.05M |
|---|
package main
import (
"net/http"
"github.com/alecaivazis/maestro/api/mutations"
"github.com/alecaivazis/maestro/api/objectTypes"
"github.com/graphql-go/graphql"
"github.com/nautilus/events"
"github.com/nautilus/services/graphql"
)
// MaestroAPI is the service that acts as the interface between the
// various clients and the backend services.
type MaestroAPI struct {
events.EventBroker
}
// Schema is the graphql schema for the api provided by this service.
var Schema *graphql.Schema
// the schema definition
var schemaConfig = graphql.SchemaConfig{
Query: objectTypes.Query,
Mutation: mutations.Mutations,
}
// Schema returns the graphql schema associated with this service.
func (s *MaestroAPI) Schema() *graphql.Schema {
return Schema
}
// the routes that this service
func (s *MaestroAPI) Router() http.Handler {
// create an empty mux we can play with
mux := http.NewServeMux()
// add the graphl routes to the service mux
GraphqlService.AddRoutes(s, mux)
// return the router we just made
return mux
}
// when this package is loaded
func init() {
// define the schema
schema, err := graphql.NewSchema(schemaConfig)
// if something went wrong
if err != nil {
panic(err)
}
Schema = &schema
}
|
package main
import "fmt"
func main () {
hello := func(greeting string) {
fmt.Println(greeting)
}
hello("Good Morning!")
} |
package main
import (
"bufio"
"fmt"
"os"
)
type manualDnsResponder struct {
records map[string]string
}
func newManualDnsResponder() *manualDnsResponder {
return &manualDnsResponder{records: make(map[string]string)}
}
func (r *manualDnsResponder) Cleanup() {
for fqdn := range r.records {
fmt.Fprintf(os.Stderr, "Delete DNS record: \"%s\"\n", fqdn)
}
r.records = make(map[string]string)
}
func (r *manualDnsResponder) Respond(key, value string) error {
if r.records[key] == value {
return nil
}
fmt.Fprintf(os.Stderr,
"Add TXT record for: \"%s\", value: \"%s\" and then press ENTER\n",
key, value)
reader := bufio.NewReader(os.Stdin)
if _, err := reader.ReadString('\n'); err != nil {
return fmt.Errorf("error reading input: %s", err)
}
r.records[key] = value
return nil
}
|
package bitcoin
import (
. "ftnox.com/common"
. "ftnox.com/config"
"ftnox.com/db"
"ftnox.com/bitcoin/rpc"
)
// GENERAL
// The total blocks returned are at most Coins[coin].TotConf blocks.
// Returns two arrays based on the current blockchain info from rpc.GetBlocks().
// (NOT based on the status of the loaded blocks)
// The status in the returned blocks are either status GOOD, GOOD_CREDITED, or PROCESSING,
// for both 'orphaned' & 'good'.
// It is up to the caller to finish processing.
func LoadAndAssessLastBlocks(coin string) (orphaned, good []*Block) {
c := Config.GetCoin(coin)
// Get the last height & hash known in DB.
// blocks[0] is the latest block.
blocks := LoadLastBlocks(coin, c.TotConf)
if len(blocks) == 0 { return nil, nil }
// TODO: ensure that blocks are actually in the same chain.
actual := rpc.GetBlocks(coin, blocks[0].Height, blocks[len(blocks)-1].Height)
if len(actual) != len(blocks) {
for i, block := range blocks { Warn("blocks@%v\t%v %v", i, block.Height, block.Hash) }
for i, block := range actual { Warn("actual@%v\t%v %v", i, block.Height, block.Hash) }
panic(NewError("Expected to fetch %v blocks but only got %v", len(blocks), len(actual)))
}
// Iterate over orphaned blocks, working from latest to earliest.
for i, blk := range blocks {
if blk.Height != actual[i].Height {
panic(NewError("Expected actual block height %v but got %v", blk.Height, actual[i].Height))
}
actualHash := actual[i].Hash
if actualHash == blk.Hash {
good = blocks[i:]
break
}
orphaned = append(orphaned, blk)
}
return orphaned, good
}
// Figures out which rpc payments are to known addresses.
func RecognizedPayments(payments []*rpc.RPCPayment) (map[string]*Address, []*rpc.RPCPayment) {
var recPayments = []*rpc.RPCPayment{}
var addrsMap = map[string]*Address{}
addrStrs := []string{}
for _, payment := range payments {
addrStrs = append(addrStrs, payment.Address)
}
addrs := LoadKnownAddresses(addrStrs)
for _, addr := range addrs {
addrsMap[addr.Address] = addr
}
for _, payment := range payments {
if addrsMap[payment.Address] == nil { continue }
recPayments = append(recPayments, payment)
}
return addrsMap, recPayments
}
// SPENDING OUTPUTS
func CheckoutPaymentsToSpend(paymentIds []interface{}, wtxId int64) {
err := db.DoBeginSerializable(func(tx *db.ModelTx) {
UpdatePaymentsSpent(tx, paymentIds, PAYMENT_SPENT_STATUS_AVAILABLE,
PAYMENT_SPENT_STATUS_CHECKEDOUT, wtxId)
})
if err != nil { panic(err) }
}
func MarkPaymentsAsSpent(paymentIds []interface{}, wtxId int64) {
err := db.DoBeginSerializable(func(tx *db.ModelTx) {
UpdatePaymentsSpent(tx, paymentIds, PAYMENT_SPENT_STATUS_CHECKEDOUT,
PAYMENT_SPENT_STATUS_SPENT, wtxId)
})
if err != nil { panic(err) }
}
// CONVERSIONS TO/FROM RPC
func FromRPCPayment(p *rpc.RPCPayment) *Payment {
return &Payment{
Coin: p.Coin,
TxId: p.TxId,
Vout: p.Vout,
Blockhash: p.Blockhash,
Blockheight: p.Blockheight,
Address: p.Address,
Amount: p.Amount,
ScriptPK: p.ScriptPK,
Time: p.Time,
}
}
func FromRPCBlock(b *rpc.RPCBlock) *Block {
return &Block {
Coin: b.Coin,
Height: b.Height,
Hash: b.Hash,
Time: b.Time,
}
}
func ToRPCPayment(p *Payment) *rpc.RPCPayment {
return &rpc.RPCPayment{
Coin: p.Coin,
TxId: p.TxId,
Vout: p.Vout,
Blockhash: p.Blockhash,
Blockheight: p.Blockheight,
Address: p.Address,
Amount: p.Amount,
ScriptPK: p.ScriptPK,
Time: p.Time,
}
}
func ToRPCPayments(ps []*Payment) []*rpc.RPCPayment {
rps := []*rpc.RPCPayment{}
for _, p := range ps { rps = append(rps, ToRPCPayment(p)) }
return rps
}
func ToRPCBlock(b *Block) *rpc.RPCBlock {
return &rpc.RPCBlock {
Coin: b.Coin,
Height: b.Height,
Hash: b.Hash,
Time: b.Time,
}
}
|
package model
//MsIdcaid 测试PG ltree性能
type MsIdcaid struct {
Id int `xorm:"not null pk INT(4) autoincr"`
Name string `xorm:"NVARCHAR(4000)"`
Cardno string `xorm:"NVARCHAR(4000)"`
Descriot string `xorm:"NVARCHAR(4000)"`
Ctftp string `xorm:"NVARCHAR(4000)"`
Ctfid string `xorm:"NVARCHAR(4000)"`
Gender string `xorm:"NVARCHAR(4000)"`
Birthday string `xorm:"NVARCHAR(4000)"`
Address string `xorm:"NVARCHAR(4000)"`
Zip string `xorm:"NVARCHAR(4000)"`
Dirty string `xorm:"NVARCHAR(4000)"`
District1 string `xorm:"NVARCHAR(4000)"`
District2 string `xorm:"NVARCHAR(4000)"`
District3 string `xorm:"NVARCHAR(4000)"`
District4 string `xorm:"NVARCHAR(4000)"`
District5 string `xorm:"NVARCHAR(4000)"`
District6 string `xorm:"NVARCHAR(4000)"`
Firstnm string `xorm:"NVARCHAR(4000)"`
Lastnm string `xorm:"NVARCHAR(4000)"`
Duty string `xorm:"NVARCHAR(4000)"`
Mobile string `xorm:"NVARCHAR(4000)"`
Tel string `xorm:"NVARCHAR(4000)"`
Fax string `xorm:"NVARCHAR(4000)"`
Email string `xorm:"NVARCHAR(4000)"`
Nation string `xorm:"NVARCHAR(4000)"`
Taste string `xorm:"NVARCHAR(4000)"`
Education string `xorm:"NVARCHAR(4000)"`
Company string `xorm:"NVARCHAR(4000)"`
Ctel string `xorm:"NVARCHAR(4000)"`
Caddress string `xorm:"NVARCHAR(4000)"`
Czip string `xorm:"NVARCHAR(4000)"`
Family string `xorm:"NVARCHAR(4000)"`
Version string `xorm:"NVARCHAR(4000)"`
}
//TableName 更改数据库表名
func (MsIdcaid) TableName() string {
return "cdsgus"
}
|
package pkggraph
import (
"encoding/json"
"sort"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/go/vcs"
"github.com/loov/goda/internal/stat"
)
type Graph struct {
Packages map[string]*Node
Sorted []*Node
stat.Stat
}
func (g *Graph) AddNode(n *Node) {
g.Packages[n.ID] = n
n.Graph = g
}
type Node struct {
*packages.Package
Repo *vcs.RepoRoot
ImportsNodes []*Node
// Stats about the current node.
stat.Stat
// Stats about upstream nodes.
Up stat.Stat
// Stats about downstream nodes.
Down stat.Stat
Errors []error
Graph *Graph
}
func (n *Node) Pkg() *packages.Package { return n.Package }
// From creates a new graph from a map of packages.
func From(pkgs map[string]*packages.Package) *Graph {
g := &Graph{Packages: map[string]*Node{}}
// Create the graph nodes.
for _, p := range pkgs {
n := LoadNode(p)
g.Sorted = append(g.Sorted, n)
g.AddNode(n)
g.Stat.Add(n.Stat)
}
SortNodes(g.Sorted)
// TODO: find ways to improve performance.
cache := allImportsCache(pkgs)
// Populate the graph's Up and Down stats.
for _, n := range g.Packages {
importsIDs := cache[n.ID]
for _, id := range importsIDs {
imported, ok := g.Packages[id]
if !ok {
// we may not want to print info about every package
continue
}
n.Down.Add(imported.Stat)
imported.Up.Add(n.Stat)
}
}
// Build node imports from package imports.
for _, n := range g.Packages {
for id := range n.Package.Imports {
direct, ok := g.Packages[id]
if !ok {
// TODO:
// should we include dependencies where Y is hidden?
// X -> [Y] -> Z
continue
}
n.ImportsNodes = append(n.ImportsNodes, direct)
}
}
for _, n := range g.Packages {
SortNodes(n.ImportsNodes)
}
return g
}
func LoadNode(p *packages.Package) *Node {
node := &Node{}
node.Package = p
if repo, err := vcs.RepoRootForImportPath(p.PkgPath, false); err != nil {
node.Errors = append(node.Errors, err)
node.Repo = &vcs.RepoRoot{
VCS: &vcs.Cmd{},
Repo: p.PkgPath,
Root: p.PkgPath,
}
} else {
node.Repo = repo
}
stat, errs := stat.Package(p)
node.Errors = append(node.Errors, errs...)
node.Stat = stat
return node
}
func SortNodes(xs []*Node) {
sort.Slice(xs, func(i, k int) bool { return xs[i].ID < xs[k].ID })
}
type flatNode struct {
Package struct {
ID string
Name string `json:",omitempty"`
PkgPath string `json:",omitempty"`
Errors []packages.Error `json:",omitempty"`
GoFiles []string `json:",omitempty"`
CompiledGoFiles []string `json:",omitempty"`
OtherFiles []string `json:",omitempty"`
IgnoredFiles []string `json:",omitempty"`
ExportFile string `json:",omitempty"`
Imports map[string]string `json:",omitempty"`
}
ImportsNodes []string `json:",omitempty"`
Stat stat.Stat
Up stat.Stat
Down stat.Stat
Errors []error `json:",omitempty"`
}
func (p *Node) MarshalJSON() ([]byte, error) {
flat := flatNode{
Stat: p.Stat,
Up: p.Up,
Down: p.Down,
Errors: p.Errors,
}
flat.Package.ID = p.Package.ID
flat.Package.Name = p.Package.Name
flat.Package.PkgPath = p.Package.PkgPath
flat.Package.GoFiles = p.Package.GoFiles
flat.Package.CompiledGoFiles = p.Package.CompiledGoFiles
flat.Package.OtherFiles = p.Package.OtherFiles
flat.Package.IgnoredFiles = p.Package.IgnoredFiles
flat.Package.ExportFile = p.Package.ExportFile
for _, n := range p.ImportsNodes {
flat.ImportsNodes = append(flat.ImportsNodes, n.ID)
}
if len(p.Package.Imports) > 0 {
flat.Package.Imports = make(map[string]string, len(p.Imports))
for path, ipkg := range p.Imports {
flat.Package.Imports[path] = ipkg.ID
}
}
return json.Marshal(flat)
}
|
package cmd
import (
"bytes"
"fmt"
"io"
"os"
"github.com/spf13/cobra"
"github.com/lets-cli/lets/commands/command"
"github.com/lets-cli/lets/config"
"github.com/lets-cli/lets/runner"
)
// cut all elements before command name
func prepareArgs(cmd command.Command, originalArgs []string) []string {
nameIdx := 0
for idx, arg := range originalArgs {
if arg == cmd.Name {
nameIdx = idx
}
}
return originalArgs[nameIdx:]
}
// newCmdGeneric creates new cobra root sub command from Command
func newCmdGeneric(cmdToRun command.Command, conf *config.Config, out io.Writer) *cobra.Command {
subCmd := &cobra.Command{
Use: cmdToRun.Name,
Short: cmdToRun.Description,
RunE: func(cmd *cobra.Command, args []string) error {
only, exclude, err := parseAndValidateOnlyAndExclude(cmd)
if err != nil {
return err
}
cmdToRun.Only = only
cmdToRun.Exclude = exclude
cmdToRun.Args = prepareArgs(cmdToRun, os.Args)
envs, err := parseAndValidateEnvFlag(cmd)
if err != nil {
return err
}
cmdToRun.OverrideEnv = envs
return runner.RunCommand(cmd.Context(), cmdToRun, conf, out)
},
// we use docopt to parse flags on our own, so any flag is valid flag here
FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true},
Args: cobra.ArbitraryArgs,
DisableFlagParsing: false,
SilenceUsage: true,
}
// try print docopt as help for command
subCmd.SetHelpFunc(func(c *cobra.Command, strings []string) {
buf := new(bytes.Buffer)
if cmdToRun.Description != "" {
buf.WriteString(fmt.Sprintf("%s\n\n", cmdToRun.Description))
}
buf.WriteString(cmdToRun.RawOptions)
_, err := buf.WriteTo(c.OutOrStdout())
if err != nil {
c.Println(err)
}
})
//initOnlyAndExecFlags(subCmd)
return subCmd
}
// initialize all commands dynamically from config
func initSubCommands(rootCmd *cobra.Command, conf *config.Config, out io.Writer) {
for _, cmdToRun := range conf.Commands {
rootCmd.AddCommand(newCmdGeneric(cmdToRun, conf, out))
}
}
func parseAndValidateOnlyAndExclude(cmd *cobra.Command) (only []string, exclude []string, err error) {
onlyCmds, err := cmd.Parent().Flags().GetStringArray("only")
if err != nil {
return []string{}, []string{}, err
}
excludeCmds, err := cmd.Parent().Flags().GetStringArray("exclude")
if err != nil {
return []string{}, []string{}, err
}
if len(excludeCmds) > 0 && len(onlyCmds) > 0 {
return []string{}, []string{}, fmt.Errorf(
"you must use either 'only' or 'exclude' flag but not both at the same time")
}
return onlyCmds, excludeCmds, nil
}
func parseAndValidateEnvFlag(cmd *cobra.Command) (map[string]string, error) {
// TraversChildren enabled for parent so we will have parent flags here
envs, err := cmd.Parent().Flags().GetStringToString("env")
if err != nil {
return map[string]string{}, err
}
return envs, nil
}
|
// Copyright 2018 Saferwall. All rights reserved.
// Use of this source code is governed by Apache v2 license
// license that can be found in the LICENSE file.
package aggregator
import (
"bytes"
"context"
"encoding/json"
"errors"
store "github.com/saferwall/saferwall/internal/db"
"github.com/saferwall/saferwall/internal/log"
pb "github.com/saferwall/saferwall/services/proto"
"google.golang.org/protobuf/proto"
gonsq "github.com/nsqio/go-nsq"
"github.com/saferwall/saferwall/internal/pubsub"
"github.com/saferwall/saferwall/internal/pubsub/nsq"
s "github.com/saferwall/saferwall/internal/storage"
"github.com/saferwall/saferwall/services/config"
)
// Config represents our application config.
type Config struct {
LogLevel string `mapstructure:"log_level"`
Consumer config.ConsumerCfg `mapstructure:"consumer"`
DB store.Config `mapstructure:"db"`
Storage config.StorageCfg `mapstructure:"storage"`
}
// Service represents the PE scan service. It adheres to the nsq.Handler
// interface. This allows us to define our own custom handlers for our messages.
// Think of these handlers much like you would an http handler.
type Service struct {
cfg Config
logger log.Logger
sub pubsub.Subscriber
db store.DB
storage s.Storage
}
// New create a new aggregator scanner service.
func New(cfg Config, logger log.Logger) (Service, error) {
svc := Service{}
var err error
svc.sub, err = nsq.NewSubscriber(
cfg.Consumer.Topic,
cfg.Consumer.Channel,
cfg.Consumer.Lookupds,
cfg.Consumer.Concurrency,
&svc,
)
if err != nil {
return Service{}, err
}
svc.db, err = store.Open(cfg.DB.Server, cfg.DB.Username,
cfg.DB.Password, cfg.DB.BucketName)
if err != nil {
return Service{}, err
}
opts := s.Options{}
switch cfg.Storage.DeploymentKind {
case "aws":
opts.Region = cfg.Storage.S3.Region
opts.AccessKey = cfg.Storage.S3.AccessKey
opts.SecretKey = cfg.Storage.S3.SecretKey
case "minio":
opts.Region = cfg.Storage.Minio.Region
opts.AccessKey = cfg.Storage.Minio.AccessKey
opts.SecretKey = cfg.Storage.Minio.SecretKey
opts.MinioEndpoint = cfg.Storage.Minio.Endpoint
case "local":
opts.LocalRootDir = cfg.Storage.Local.RootDir
}
opts.Bucket = cfg.Storage.Bucket
sto, err := s.New(cfg.Storage.DeploymentKind, opts)
if err != nil {
return Service{}, err
}
svc.cfg = cfg
svc.logger = logger
svc.storage = sto
return svc, nil
}
// Start kicks in the service to start consuming events.
func (s *Service) Start() error {
s.logger.Infof("start consuming from topic: %s ...", s.cfg.Consumer.Topic)
s.sub.Start()
return nil
}
// HandleMessage is the only requirement needed to fulfill the nsq.Handler.
func (s *Service) HandleMessage(m *gonsq.Message) error {
if len(m.Body) == 0 {
return errors.New("body is blank re-enqueue message")
}
msg := &pb.Message{}
err := proto.Unmarshal(m.Body, msg)
if err != nil {
s.logger.Errorf("failed to unmarshal msg: %v", err)
return err
}
sha256 := msg.Sha256
ctx := context.Background()
logger := s.logger.With(ctx, "sha256", sha256)
for _, payload := range msg.Payload {
key := payload.Key
path := payload.Path
switch payload.Kind {
case pb.Message_DBUPDATE:
var jsonPayload interface{}
err = json.Unmarshal(payload.Body, &jsonPayload)
if err != nil {
logger.Errorf("failed to unmarshal json payload: %v", err)
continue
}
logger.Debugf("payload is %v", jsonPayload)
err = s.db.Update(ctx, key, path, jsonPayload)
if err != nil {
logger.Errorf("failed to update db: %v", err)
}
case pb.Message_DBCREATE:
var jsonPayload interface{}
err = json.Unmarshal(payload.Body, &jsonPayload)
if err != nil {
logger.Errorf("failed to unmarshal json payload: %v", err)
continue
}
logger.Debugf("payload is %v", jsonPayload)
err = s.db.Create(ctx, key, jsonPayload)
if err != nil {
logger.Errorf("failed to create document key: %s in db: %v", key, err)
}
case pb.Message_UPLOAD:
obj := bytes.NewReader(payload.Body)
err = s.storage.Upload(ctx, s.cfg.Storage.Bucket, key, obj)
if err != nil {
logger.Errorf("failed to upload object %s err: %v", key, err)
}
}
}
return nil
}
|
package server
import (
"github.com/astaxie/beego"
"net"
"strings"
"github.com/jxufeliujj/tcp-server/core"
"github.com/jxufeliujj/tcp-server/socket"
"time"
)
func StartTCP() {
go start_843Port()
tcpAddr, errAddr := net.ResolveTCPAddr("tcp4", beego.AppConfig.String("bind"))
if errAddr != nil {
panic(errAddr.Error())
}
server, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
panic(err.Error())
} else {
core.Writelog(core.LevelInfo, "fun:main.startTCP", "TCP Server on:", tcpAddr.String(), "-", "-")
}
for {
conn, err := server.AcceptTCP()
if err != nil {
core.Writelog(core.LevelError, "fun:main.startTCP", "client connect error", err.Error(), "-", "-")
continue
}
core.Writelog(core.LevelInfo, "fun:main.startTCP", "client connect:", conn.RemoteAddr().String(), "-", "-")
go socket.Start(conn)
}
}
func start_843Port() {
addr_port := strings.Split(beego.AppConfig.String("bind"), ":")
addr843 := addr_port[0] + ":843"
tcpAddr, errAddr := net.ResolveTCPAddr("tcp4", addr843)
if errAddr != nil {
panic(errAddr.Error())
}
listen, err := net.ListenTCP("tcp", tcpAddr)
if err != nil {
panic(err.Error())
} else {
core.Writelog(core.LevelInfo, "fun:main.start_843Port", "TCP Server on:", tcpAddr.String(), "-", "-")
}
for {
conn, err := listen.AcceptTCP()
if err != nil {
core.Writelog(core.LevelError, "fun:main.start_843Port", "client connect error", err.Error(), "-", "-")
continue
}
core.Writelog(core.LevelInfo, "fun:main.start_843Port", "client connect:", conn.RemoteAddr().String(), "-", "-")
go sendFirstMsg(conn)
}
}
func sendFirstMsg(conn *net.TCPConn) {
xml := `<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM "/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
<site-control permitted-cross-domain-policies="master-only"/>
<allow-access-from domain="*" to-ports="*" />
</cross-domain-policy>`
conn.Write([]byte(xml))
time.Sleep(time.Second)
conn.Close()
core.Writelog(core.LevelInfo, "fun:main.sendFirstMsg", "已经回应策略文件:crossdomain.xml", conn.RemoteAddr().String(), "-", "-")
}
|
package main
import (
"fmt"
"sort"
"strings"
)
func main() {
testCase := []string{"eat", "tea", "tan", "ate", "nat", "bat"}
sortWords := make([]string, 0)
for i :=0; i < len(testCase); i++ {
slice := strings.Split(testCase[i], "")
sort.Strings(slice)
sortWords = append(sortWords, strings.Join(slice, ""))
}
wordMaps := make(map[string]int)
count := 1
for i := 0; i < len(testCase); i++ {
if _, ok := wordMaps[sortWords[i]]; !ok {
wordMaps[sortWords[i]] = count
count += 1
}
}
resultMaps := make(map[string]int)
for i := 0; i < len(testCase); i++ {
for key, value := range wordMaps{
if checkAnagram(testCase[i], key) {
resultMaps[testCase[i]] = value
}
}
}
wordArray := make([][]string, len(wordMaps))
for key, value := range resultMaps{
wordArray[value -1] = append(wordArray[value-1], key)
}
fmt.Print(wordArray)
}
func checkAnagram(str string, mapKey string) bool{
slice := strings.Split(str, "")
sort.Strings(slice)
joinStr := strings.Join(slice, "")
if joinStr == mapKey {
return true
}
return false
} |
package main
import "fmt"
func main() {
// String values
var a = "Avijit"
fmt.Println(a)
// Integer values
var b, c int = 1, 2
fmt.Println(b, c)
// Boolean value
var d = true
fmt.Println(d)
// Default value
var e int
fmt.Println(e)
// String value
f := "apple"
fmt.Println(f)
}
|
package main
import (
"fmt"
"log"
"os"
"strconv"
"github.com/brotherlogic/goserver/utils"
"google.golang.org/grpc"
pb "github.com/brotherlogic/tracer/proto"
//Needed to pull in gzip encoding init
_ "google.golang.org/grpc/encoding/gzip"
)
func main() {
host, port, err := utils.Resolve("tracer", "tracer-cli")
if err != nil {
log.Fatalf("Unable to reach tracer: %v", err)
}
conn, err := grpc.Dial(host+":"+strconv.Itoa(int(port)), grpc.WithInsecure())
defer conn.Close()
if err != nil {
log.Fatalf("Unable to dial: %v", err)
}
client := pb.NewTracerServiceClient(conn)
ctx, cancel := utils.BuildContext("tracercli-"+os.Args[1], "tracer")
defer cancel()
switch os.Args[1] {
case "trace":
val, err := client.Trace(ctx, &pb.TraceRequest{Id: os.Args[2]})
if err != nil {
log.Fatalf("Failed on trace request: %v", err)
}
if len(val.Traces) == 0 {
fmt.Printf("No traces match!")
}
if len(val.Traces[0].Events) == 0 {
fmt.Printf("No events in trace")
}
for _, event := range val.Traces[0].Events {
fmt.Printf("Event: %v\n", event)
}
case "default":
fmt.Printf("Unknown command\n")
}
}
|
package cache
import (
"path"
"sync"
pub "github.com/go-ap/activitypub"
h "github.com/go-ap/handlers"
)
type (
iriMap map[pub.IRI]pub.Item
store struct {
enabled bool
w sync.RWMutex
c iriMap
}
CanStore interface {
Set(iri pub.IRI, it pub.Item)
Get(iri pub.IRI) pub.Item
Remove(iris ...pub.IRI) bool
}
)
func New(enabled bool) *store {
return &store{enabled: enabled, c: make(iriMap)}
}
func (r *store) Get(iri pub.IRI) pub.Item {
if r == nil || !r.enabled {
return nil
}
r.w.RLock()
defer r.w.RUnlock()
if it, ok := r.c[iri]; ok {
return it
}
return nil
}
func (r *store) Set(iri pub.IRI, it pub.Item) {
if r == nil || !r.enabled {
return
}
r.w.Lock()
defer r.w.Unlock()
if r.c == nil {
r.c = make(map[pub.IRI]pub.Item)
}
r.c[iri] = it
}
func (r *store) Remove(iris ...pub.IRI) bool {
if r == nil || !r.enabled {
return true
}
toInvalidate := pub.IRIs(iris)
for _, iri := range iris {
if h.ValidCollectionIRI(iri) {
continue
}
c := pub.IRI(path.Dir(iri.String()))
if !toInvalidate.Contains(c) {
toInvalidate = append(toInvalidate, c)
}
}
r.w.Lock()
defer r.w.Unlock()
for _, iri := range toInvalidate {
for key := range r.c {
// TODO(marius): I need to play around with this a bit
if key.Contains(iri, false) {
delete(r.c, key)
}
}
}
return true
}
func removeAccum(toRemove *pub.IRIs, iri pub.IRI, col h.CollectionType) {
if repl := col.IRI(iri); !toRemove.Contains(repl) {
*toRemove = append(*toRemove, repl)
}
}
func accumForProperty(it pub.Item, toRemove *pub.IRIs, col h.CollectionType) {
if pub.IsNil(it) {
return
}
if pub.IsItemCollection(it) {
pub.OnItemCollection(it, func(c *pub.ItemCollection) error {
for _, ob := range c.Collection() {
removeAccum(toRemove, ob.GetLink(), col)
}
return nil
})
} else {
removeAccum(toRemove, it.GetLink(), col)
}
}
func aggregateItemIRIs(toRemove *pub.IRIs, it pub.Item) error {
if it == nil {
return nil
}
if obIRI := it.GetLink(); len(obIRI) > 0 && !toRemove.Contains(obIRI) {
*toRemove = append(*toRemove, obIRI)
}
if !it.IsObject() {
return nil
}
return pub.OnObject(it, func(o *pub.Object) error {
accumForProperty(o.InReplyTo, toRemove, h.Replies)
accumForProperty(o.AttributedTo, toRemove, h.Outbox)
return nil
})
}
func aggregateActivityIRIs(toRemove *pub.IRIs, a *pub.Activity, typ h.CollectionType) error {
for _, r := range a.Recipients() {
if r.GetLink().Equals(pub.PublicNS, false) {
continue
}
if iri := r.GetLink(); h.ValidCollectionIRI(iri) {
// TODO(marius): for followers, following collections this should dereference the members
if !toRemove.Contains(iri) {
*toRemove = append(*toRemove, iri)
}
} else {
accumForProperty(r, toRemove, h.Inbox)
}
}
if destCol := typ.IRI(a.Actor); !toRemove.Contains(destCol) {
*toRemove = append(*toRemove, destCol)
}
if aIRI := a.GetLink(); len(aIRI) > 0 && !toRemove.Contains(aIRI) {
*toRemove = append(*toRemove, aIRI)
}
withSideEffects := pub.ActivityVocabularyTypes{pub.UpdateType, pub.UndoType, pub.DeleteType}
if withSideEffects.Contains(a.GetType()) {
base := path.Dir(a.Object.GetLink().String())
*toRemove = append(*toRemove, pub.IRI(base))
*toRemove = append(*toRemove, a.Object.GetLink())
}
return aggregateItemIRIs(toRemove, a.Object)
}
func ActivityPurge(cache CanStore, a *pub.Activity, typ h.CollectionType) error {
toRemove := make(pub.IRIs, 0)
err := aggregateActivityIRIs(&toRemove, a, typ)
if err != nil {
return err
}
if len(toRemove) > 0 {
cache.Remove(toRemove...)
}
return nil
}
|
package main
import (
"fmt"
// "golang.org/x/net/http2"
"io/ioutil"
// "log"
"net/http"
"net/url"
// "os"
"strings"
"time"
// "mtime.com/framework/schedule"
// "mtime.com/framework/utility/convert"
// "strconv"
)
// var (
// BUILD_TIME string
// )
// func main() {
// d := new(daemon.Daemon)
// d.SetVersion(BUILD_TIME)
// d.Run(startSyncService)
// }
func substr(s string, pos, length int) string {
runes := []rune(s)
l := pos + length
if l > len(runes) {
l = len(runes)
}
return string(runes[pos:l])
}
func getParentDirectory(dirctory string) string {
return substr(dirctory, 0, strings.LastIndex(dirctory, "/"))
}
func test1() {
// println(time.Hour * 24 * 365 * (2017 - 1970))
//定时推送库存数据入库
//inventoryData: 库存数据models实体
//返回值: Type: *models.InvokeResult - InvokeResult结果
// c, err := remote.Client("ecommerce")
// if err != nil {
// fmt.Println("get client error")
// }
// r, err := c.Invoke("ExpressService", "GetShippingInfo", 2, "", 25, "80000001")
// if err != nil {
// fmt.Println("get invoke error")
// }
// if r.IsNil() {
// fmt.Println("get result error")
// }
// v, err := r.Proto(reflect.TypeOf((*models.GetShippingInfoResult)(nil)))
// if err != nil {
// fmt.Println("get invoke error")
// }
// for _, info := range v.(*models.GetShippingInfoResult).GetInfos() {
// for _, log := range info.GetLogs() {
// fmt.Println(log.GetDescription())
// }
// }
// filePath := "/Users/chendan/Project/Golang/test.txt"
// file, _ := os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
// fmt.Println(file.Name())
// fmt.Println(getParentDirectory(file.Name()))
// file.Close()
// result := LoadRemoteConfig("http://192.168.50.151:8011", "/msg/sub", nil)
// fmt.Println(result.Success, result.Error, result.Value)
// fmt.Println(0x1f495)
// strs := []string{"a", "b", "c"}
// for _, str := range strs {
// a := str
// go func() {
// fmt.Println(a)
// }()
// }
// time.Sleep(time.Second)
// if u := t11(); u == nil {
// fmt.Println("OK")
// } else {
// fmt.Println(u.Get())
// }
// select {}
// users := []User{}
// str, err := json.Marshal(users)
// if err != nil {
// fmt.Println(err.Error())
// }
// fmt.Println(string(str))
// b, e := ioutil.ReadFile("test.txt")
// if e != nil {
// fmt.Println(e)
// }
// fmt.Println(string(b))
// emails := []string{}
// if len(emails) > 0 {
// fmt.Println("len of emails is %d, emails is %v", len(emails), emails)
// } else {
// fmt.Println("i am wrong")
// fmt.Println("len of emails is %d, emails is %v", len(emails), emails)
// }
// fmt.Println((1 << 31) - 2)
// var ret int
// ret = 1
// start := time.Now()
// Host := "logx.mtime.cn"
// fmt.Println(Host[5:])
// fmt.Println(ret != -1)
// fmt.Println(time.Unix(0, 0).String())
// fmt.Println(time.Time{}.String())
// fmt.Println(TimeToUnixMilli(time.Now().Add(time.Hour * 24 * 365 * 10)))
// fmt.Println(TimeToUnixMilli(time.Now()))
// fmt.Println(time.Parse("2006-01-02 15:04:05", "0001-01-01 00:00:00"))
// fmt.Println(time.Unix(1000000, 0).String())
// fmt.Println(UnixMilliToTime(0).String())
// fmt.Println(len(strings.Split("china;chendan;xiangxiang", ";")))
fmt.Println(UnixMilliToTime(1513058727000).String())
// fmt.Println("cookie time is ", time.Now().Add(5256000*time.Minute).String())
// cron, err := schedule.NewExpression("0 6 15 22 5 ? 2017")
// if err != nil {
// fmt.Println(err.Error())
// return
// }
// tm := time.Now()
// tm = cron.Next(tm)
// for i := 0; i < 10; i++ {
// if tm.IsZero() {
// fmt.Println("tm is IsZero")
// }
// fmt.Println(convert.TimeToString(tm))
// tm = cron.Next(tm)
// }
// val, _ := strconv.Atoi("12")
// fmt.Println(int32(val))
// fmt.Println(time.Now().Sub(start).String())
// fmt.Println(time.Unix(1516171800, 0).String())
// type Token struct {
// Name string
// }
// var tokens []*Token
// token := &Token{"wo"}
// tokens = append(tokens, token)
// bts, _ := json.Marshal(tokens)
// fmt.Println(string(bts))
// strs := []string{"q", "w", "e", "r", "t", "y", "yu"}
// for _, str := range strs[1:] {
// fmt.Println(str)
// }
// fmt.Println(5 << 20)
// fmt.Println(5 * 1024 * 1024)
// mp := map[string]string{}
// mp["_tt_"] = "_fdsaf"
// mp["a"] = "fdsafdsa"
// bts, _ := json.Marshal(mp)
// fmt.Println(string(bts))
// btsStr := "{\"a\":{\"c\":\"d\"}}"
// mp2 := map[string]interface{}{}
// json.Unmarshal([]byte(btsStr), &mp2)
// mp2["chedan"] = "emplyee"
// mp2["mtime"] = 12
// fmt.Printf("%v\n", mp2)
// bts, _ := json.Marshal(mp2)
// fmt.Println(string(bts))
}
func main() {
// cwd, err := os.Getwd()
// if err != nil {
// log.Fatal(err)
// }
// srv := &http.Server{
// Addr: ":8000", // Normally ":443"
// Handler: http.FileServer(http.Dir(cwd)),
// }
// http2.ConfigureServer(srv, &http2.Server{})
// log.Fatal(srv.ListenAndServeTLS("server.crt", "server.key"))
// fmt.Println(strings.ToUpper(fmt.Sprintf("%x", md5.Sum([]byte(bson.NewObjectId())))))
fmt.Println(UnixMilliToTime(1513058727000).String())
}
type User struct {
name string
}
func TimeToUnixMilli(t time.Time) int64 {
return t.Unix()*1000 + int64(t.Nanosecond())/1e6
// return t.UnixNano() / 1e6
}
func UnixMilliToTime(milliseconds int64) time.Time {
seconds := milliseconds / 1000
nanoseconds := (milliseconds % 1000) * 1e6
return time.Unix(seconds, nanoseconds)
}
type Operate interface {
Get() string
}
func (u *User) Get() (name string) {
return u.name
}
func t11() Operate {
var user *User = nil
return user
}
// remoteResult 远程配置加载结果
type remoteResult struct {
Success bool `json:"success"`
Error string `json:"error"`
Value string `json:"value"`
}
func LoadRemoteConfig(server, path string, args url.Values) (r remoteResult) {
var u string
if len(args) == 0 {
u = server + path
} else {
u = fmt.Sprintf("http://%s%s?%s", server, path, args.Encode())
}
client := http.Client{
Timeout: time.Second * 5,
}
resp, err := client.Get(u)
if err != nil {
r.Error = err.Error()
return
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.Error = err.Error()
} else if resp.StatusCode == http.StatusOK {
r.Success = true
r.Value = string(data)
return
} else {
r.Error = string(data)
return
}
r.Error = "get no data"
return
}
|
package response
import (
pb "github.com/LILILIhuahuahua/ustc_tencent_game/api/proto"
"github.com/LILILIhuahuahua/ustc_tencent_game/framework"
"github.com/LILILIhuahuahua/ustc_tencent_game/framework/event"
)
type EnterGameResponse struct {
framework.BaseEvent
Result bool
HeroId int32
}
func (e *EnterGameResponse) FromMessage(obj interface{}) {
pbMsg := obj.(*pb.EnterGameResponse)
e.SetCode(int32(pb.GAME_MSG_CODE_ENTER_GAME_RESPONSE))
e.Result = pbMsg.GetChangeResult()
e.HeroId = pbMsg.GetHeroId()
}
func (e *EnterGameResponse) CopyFromMessage(obj interface{}) event.Event {
pbMsg := obj.(*pb.Response).EnterGameResponse
resp := &EnterGameResponse{
Result: pbMsg.GetChangeResult(),
HeroId: pbMsg.GetHeroId(),
}
resp.SetCode(int32(pb.GAME_MSG_CODE_ENTER_GAME_RESPONSE))
return resp
}
func (e *EnterGameResponse) ToMessage() interface{} {
return pb.EnterGameResponse{
ChangeResult: e.Result,
HeroId: e.HeroId,
}
}
|
package main
import (
"github.com/user/aggtodo/controller"
"log"
"net/http"
)
func main() {
/*
Serve up the home page
*/
http.HandleFunc("/", controller.HttpHomeHandler)
/*
Serve up the list page
*/
http.HandleFunc("/my-list", controller.HttpListHandler)
/*
URL to handle OAuth2 responses
*/
http.HandleFunc("/basecamp-oauth2/", controller.BasecampOauthHandler)
/*
Since we're refrencing from the GOROOT, lets strip the /css/ parts out of the file paths so we're not double-refrencing them from our HTML files
*/
http.Handle("/css/", http.StripPrefix("/css/", http.FileServer(http.Dir("css"))))
log.Fatal(http.ListenAndServe(":8081", nil))
}
|
package api
import (
"./app"
"github.com/gorilla/mux"
)
var api *mux.Router = nil
func Route(router *mux.Router) {
app.Route(router.PathPrefix("/api").Subrouter())
}
|
package internal
import "github.com/onsi/gomega"
func CoerceToMatcher(i interface{}) gomega.OmegaMatcher {
if m, ok := i.(gomega.OmegaMatcher); ok {
return m
}
return gomega.Equal(i)
}
|
package internal
import commentpb "github.com/tomasbasham/blunderlist-comment/blunderlist_comment_v1"
// Repository provides the storage requirements through which to access
// comments.
type Repository interface {
GetComments(uint) []*commentpb.CommentResponse
GetComment(uint) (*commentpb.CommentResponse, error)
CreateComment(*commentpb.CommentCreateRequest) (*commentpb.CommentResponse, error)
UpdateComment(*commentpb.CommentUpdateRequest) (*commentpb.CommentResponse, error)
DeleteComment(uint) error
// Used to check of the storge media is ready to use. If not this method
// should return an error.
IsAvailable() error
}
// Store proxies requests to the underlying storage implementation.
type Store struct {
repo Repository
}
// NewStore returns a Store containing a repository.
func NewStore(repo Repository) *Store {
return &Store{repo: repo}
}
// GetComments proxies to the repo.
func (s *Store) GetComments(parentID uint) []*commentpb.CommentResponse {
return s.repo.GetComments(parentID)
}
// GetComment proxies to the repo.
func (s *Store) GetComment(id uint) (*commentpb.CommentResponse, error) {
return s.repo.GetComment(id)
}
// CreateComment proxies to the repo.
func (s *Store) CreateComment(comment *commentpb.CommentCreateRequest) (*commentpb.CommentResponse, error) {
return s.repo.CreateComment(comment)
}
// UpdateComment proxies to the repo.
func (s *Store) UpdateComment(comment *commentpb.CommentUpdateRequest) (*commentpb.CommentResponse, error) {
return s.repo.UpdateComment(comment)
}
// DeleteComment proxies to the repo.
func (s *Store) DeleteComment(id uint) error {
return s.repo.DeleteComment(id)
}
// IsAvailable proxies to the repo.
func (s *Store) IsAvailable() error {
return s.repo.IsAvailable()
}
|
package main // import "github.com/nix-community/vgo2nix"
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"math"
"os"
"os/exec"
"regexp"
"sort"
"strings"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
"golang.org/x/tools/go/vcs"
)
type Package struct {
GoPackagePath string
URL string
Rev string
Sha256 string
ModuleDir string
}
type PackageResult struct {
Package *Package
Error error
}
type modEntry struct {
importPath string
repo string
rev string
moduleDir string
}
const depNixFormat = ` {
goPackagePath = "%s";
fetch = {
type = "%s";
url = "%s";
rev = "%s";
sha256 = "%s";
moduleDir = "%s";
};
}`
var versionNumber = regexp.MustCompile(`^v\d+`)
func getModules() ([]*modEntry, error) {
var entries []*modEntry
var stderr bytes.Buffer
cmd := exec.Command("go", "list", "-mod", "mod", "-json", "-m", "all")
cmd.Stderr = &stderr
cmd.Env = append(os.Environ(),
"GO111MODULE=on",
)
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
if err := cmd.Start(); err != nil {
return nil, err
}
type goModReplacement struct {
Path string
Version string
}
type goMod struct {
Path string
Main bool
Version string
Replace *goModReplacement
}
var mods []goMod
dec := json.NewDecoder(stdout)
for {
var mod goMod
if err := dec.Decode(&mod); err == io.EOF {
break
} else if err != nil {
return nil, err
}
if !mod.Main {
mods = append(mods, mod)
}
}
if err := cmd.Wait(); err != nil {
return nil, fmt.Errorf("'go list -m all' failed with %s:\n%s", err, stderr.String())
}
for _, mod := range mods {
replacedPath := mod.Path
version := mod.Version
if mod.Replace != nil {
replacedPath = mod.Replace.Path
version = mod.Replace.Version
}
// find repo, and codeRoot
repo, err := vcs.RepoRootForImportPath(replacedPath, false)
if err != nil {
return nil, err
}
// https://github.com/golang/go/blob/7bb6fed9b53494e9846689520b41b8e679bd121d/src/cmd/go/internal/modfetch/coderepo.go#L65
pathPrefix := replacedPath
if repo.Root != replacedPath {
var ok bool
pathPrefix, _, ok = module.SplitPathVersion(pathPrefix)
if !ok {
return nil, fmt.Errorf("invalid mod path: %s", replacedPath)
}
}
// find submodule relative directory
// https://github.com/golang/go/blob/7bb6fed9b53494e9846689520b41b8e679bd121d/src/cmd/go/internal/modfetch/coderepo.go#L74
moduleDir := ""
if pathPrefix != repo.Root {
moduleDir = strings.TrimPrefix(pathPrefix, repo.Root+"/")
}
// convert version to git ref
// https://github.com/golang/go/blob/7bb6fed9b53494e9846689520b41b8e679bd121d/src/cmd/go/internal/modfetch/coderepo.go#L656
build := semver.Build(version) // +incompatible
gitRef := strings.TrimSuffix(version, build)
if strings.Count(gitRef, "-") >= 2 {
// pseudo-version, use the commit hash
gitRef = gitRef[strings.LastIndex(gitRef, "-")+1:]
} else {
if len(moduleDir) > 0 {
// fix tag for submodule
gitRef = moduleDir + "/" + gitRef
}
}
fmt.Println(fmt.Sprintf("goPackagePath %s has rev: %s, module: %s", mod.Path, gitRef, moduleDir))
entries = append(entries, &modEntry{
importPath: mod.Path,
repo: repo.Repo,
rev: gitRef,
moduleDir: moduleDir,
})
}
return entries, nil
}
func getPackages(keepGoing bool, numJobs int, prevDeps map[string]*Package) ([]*Package, error) {
entries, err := getModules()
if err != nil {
return nil, err
}
processEntry := func(entry *modEntry) (*Package, error) {
wrapError := func(err error) error {
return fmt.Errorf("Error processing import path \"%s\": %v", entry.importPath, err)
}
if prevPkg, ok := prevDeps[entry.importPath]; ok {
if prevPkg.URL == entry.repo && prevPkg.Rev == entry.rev {
prevPkg.ModuleDir = entry.moduleDir
return prevPkg, nil
}
}
fmt.Println(fmt.Sprintf("Fetching %s %s", entry.importPath, entry.repo))
// The options for nix-prefetch-git need to match how buildGoPackage
// calls fetchgit:
// https://github.com/NixOS/nixpkgs/blob/8d8e56824de52a0c7a64d2ad2c4ed75ed85f446a/pkgs/development/go-modules/generic/default.nix#L54-L56
// and fetchgit's defaults:
// https://github.com/NixOS/nixpkgs/blob/8d8e56824de52a0c7a64d2ad2c4ed75ed85f446a/pkgs/build-support/fetchgit/default.nix#L15-L23
jsonOut, err := exec.Command(
"nix-prefetch-git",
"--quiet",
"--fetch-submodules",
"--no-deepClone",
"--url", entry.repo,
"--rev", entry.rev).Output()
if err != nil {
exitError, ok := err.(*exec.ExitError)
if ok {
return nil, wrapError(fmt.Errorf("nix-prefetch-git --fetch-submodules --no-deepClone --url %s --rev %s failed:\n%s",
entry.repo,
entry.rev,
exitError.Stderr))
}
return nil, wrapError(fmt.Errorf("failed to execute nix-prefetch-git: %v", err))
}
fmt.Println(fmt.Sprintf("Finished fetching %s", entry.importPath))
var resp map[string]interface{}
if err := json.Unmarshal(jsonOut, &resp); err != nil {
return nil, wrapError(err)
}
sha256 := resp["sha256"].(string)
if sha256 == "0sjjj9z1dhilhpc8pq4154czrb79z9cm044jvn75kxcjv6v5l2m5" {
return nil, wrapError(fmt.Errorf("Bad SHA256 for repo %s with rev: %s", entry.repo, entry.rev))
}
return &Package{
GoPackagePath: entry.importPath,
URL: entry.repo,
Rev: entry.rev,
Sha256: sha256,
ModuleDir: entry.moduleDir,
}, nil
}
worker := func(entries <-chan *modEntry, results chan<- *PackageResult) {
for entry := range entries {
pkg, err := processEntry(entry)
result := &PackageResult{
Package: pkg,
Error: err,
}
results <- result
}
}
jobs := make(chan *modEntry, len(entries))
results := make(chan *PackageResult, len(entries))
for w := 1; w <= int(math.Min(float64(len(entries)), float64(numJobs))); w++ {
go worker(jobs, results)
}
for _, entry := range entries {
jobs <- entry
}
close(jobs)
pkgsMap := make(map[string]*Package)
for j := 1; j <= len(entries); j++ {
result := <-results
if result.Error != nil {
if !keepGoing {
return nil, result.Error
}
msg := fmt.Sprintf("Encountered error: %v", result.Error)
fmt.Println(msg)
continue
}
pkgsMap[result.Package.GoPackagePath] = result.Package
}
// Make output order stable
var packages []*Package
keys := make([]string, 0, len(pkgsMap))
for k := range pkgsMap {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
packages = append(packages, pkgsMap[k])
}
return packages, nil
}
func main() {
var keepGoing = flag.Bool("keep-going", false, "Whether to panic or not if a rev cannot be resolved (default \"false\")")
var goDir = flag.String("dir", "./", "Go project directory")
var out = flag.String("outfile", "deps.nix", "deps.nix output file (relative to project directory)")
var in = flag.String("infile", "deps.nix", "deps.nix input file (relative to project directory)")
var jobs = flag.Int("jobs", 20, "Number of parallel jobs")
flag.Parse()
err := os.Chdir(*goDir)
if err != nil {
panic(err)
}
// Load previous deps from deps.nix so we can reuse hashes for known revs
prevDeps := loadDepsNix(*in)
packages, err := getPackages(*keepGoing, *jobs, prevDeps)
if err != nil {
panic(err)
}
outfile, err := os.Create(*out)
if err != nil {
panic(err)
}
defer func() {
if err := outfile.Close(); err != nil {
panic(err)
}
}()
write := func(line string) {
bytes := []byte(line + "\n")
if _, err := outfile.Write(bytes); err != nil {
panic(err)
}
}
write("# file generated from go.mod using vgo2nix (https://github.com/nix-community/vgo2nix)")
write("[")
for _, pkg := range packages {
write(fmt.Sprintf(depNixFormat,
pkg.GoPackagePath, "git", pkg.URL,
pkg.Rev, pkg.Sha256, pkg.ModuleDir))
}
write("]")
fmt.Println(fmt.Sprintf("Wrote %s", *out))
}
|
package secretless
import "fmt"
// Version field is a SemVer that should indicate the baked-in version
// of the broker
var Version = "1.7.14"
// Tag field denotes the specific build type for the broker. It may
// be replaced by compile-time variables if needed to provide the git
// commit information in the final binary. See `Static long version tags`
// in the `Building` section of `CONTRIBUTING.md` for more information on
// this variable.
var Tag = "dev"
// FullVersionName is the user-visible aggregation of version and tag
// of this codebase
var FullVersionName = fmt.Sprintf("%s-%s", Version, Tag)
|
package model
import (
"time"
"encoding/json"
"log"
"fmt"
"errors"
)
type User struct {
Id int `json:"id" gorm:"id;AUTO_INCREMENT"`
UserName string `json:"userName" gorm:"user_name"`
Password string `json:"-" gorm:"password"`
CreateTime time.Time `json:"createTime" gorm:"create_time"`
}
func (User) TableName() string {
return "user"
}
func (u *User) Serialization() ([]byte,error){
data,err:=json.Marshal(u)
if err!=nil {
return nil,err
}
return data,nil
}
func (u *User) UnSerialization(str []byte)error{
err := json.Unmarshal(str,u)
return err
}
func (u *User) ToString() string{
data,err:=u.Serialization()
if err!=nil {
log.Fatal(err)
return ""
}
return string(data)
}
/**
调用此方法的时候,一定要保证ID不等于0
*/
func (u *User) GetStringKey() string{
if u.Id == 0{
panic(errors.New("the id not == 0"))
}
return fmt.Sprintf("db:user:str:%d",u.Id)
}
|
package greet
var emoji = "👋"
func English() string {
return "Hi " + emoji
}
func Italian() string {
return "Ciao " + emoji
}
|
package superscan
import (
_ "net/http/pprof"
"testing"
)
func Test_Scan(t *testing.T) {
}
func Test_pingNet(t *testing.T) {
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package merger
import (
"errors"
"fmt"
)
var errMergeAttempted = fmt.Errorf("merge attempted")
// IsMergeAttemptedError returns true if the err is a MergeAttemptedError type
func IsMergeAttemptedError(err error) bool {
return errors.Is(err, errMergeAttempted)
}
|
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package grant
import (
"strings"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/macaroon"
"storj.io/common/paths"
"storj.io/common/storj"
)
func TestLimitTo(t *testing.T) {
// these strings can be of the form <bucket>|<path> or just <path> where the
// bucket will be implied to be the string "bucket".
tests := []struct {
Groups [][]string
Valid []string
Invalid []string
}{
{ // no limit means any path is valid
Groups: nil,
Valid: []string{"a", "b", "c", "a/a", "b/b", "c/c"},
},
{ // limited to a
Groups: [][]string{
{"a"},
},
Valid: []string{"a", "a/b", "a/b/c"},
Invalid: []string{"b", "b/a", "c/a"},
},
{ // multiple layers
Groups: [][]string{
{"a", "f"},
{"c", "a/b", "f/e"},
{"a/b/c", "c", "f"},
},
Valid: []string{"a/b/c", "a/b/c/d", "f/e", "f/e/e"},
Invalid: []string{"a", "a/b", "f", "c", "c/d"},
},
{ // check distinct buckets
Groups: [][]string{
{"bucket1|", "bucket2|", "bucket3|"},
{"bucket2|", "bucket3|", "bucket4|"},
},
Valid: []string{"bucket2|anything/here", "bucket3|", "bucket3|whatever"},
Invalid: []string{"bucket1|", "bucket1|path/ignored", "bucket4|huh", "bucket5|"},
},
{ // check buckets with paths
Groups: [][]string{
{"b1|p1", "b1|p2", "b2|p3", "b2|p4"},
{"b1|p1", "b1|p2", "b2|p3"},
},
Valid: []string{"b1|p1", "b1|p1/whatever", "b1|p2", "b2|p3/foo"},
Invalid: []string{"b3|", "b2|p4", "b1|p3"},
},
}
split := func(prefix string) (bucket, path string) {
if idx := strings.IndexByte(prefix, '|'); idx >= 0 {
return prefix[:idx], prefix[idx+1:]
}
return "bucket", prefix
}
toCaveat := func(group []string) (caveat macaroon.Caveat) {
for _, prefix := range group {
bucket, path := split(prefix)
caveat.AllowedPaths = append(caveat.AllowedPaths, &macaroon.Caveat_Path{
Bucket: []byte(bucket),
EncryptedPathPrefix: []byte(path),
})
}
return caveat
}
for _, test := range tests {
apiKey, err := macaroon.NewAPIKey(nil)
require.NoError(t, err)
for _, group := range test.Groups {
apiKey, err = apiKey.Restrict(toCaveat(group))
require.NoError(t, err)
}
encAccess := NewEncryptionAccessWithDefaultKey(&storj.Key{})
encAccess.SetDefaultPathCipher(storj.EncNull)
encAccess.LimitTo(apiKey)
for _, valid := range test.Valid {
bucket, path := split(valid)
_, _, base := encAccess.Store.LookupEncrypted(bucket, paths.NewEncrypted(path))
require.NotNil(t, base, "searched for %q", valid)
}
for _, invalid := range test.Invalid {
bucket, path := split(invalid)
_, _, base := encAccess.Store.LookupEncrypted(bucket, paths.NewEncrypted(path))
require.Nil(t, base, "searched for %q", invalid)
}
}
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"github.com/koderover/zadig/lib/microservice/warpdrive/config"
"github.com/koderover/zadig/lib/microservice/warpdrive/core/service/types/task"
)
type Product struct {
//ID bson.ObjectId `bson:"_id,omitempty" json:"id"`
ProductName string `bson:"product_name" json:"product_name"`
CreateTime int64 `bson:"create_time" json:"create_time"`
UpdateTime int64 `bson:"update_time" json:"update_time"`
Namespace string `bson:"namespace,omitempty" json:"namespace,omitempty"`
Status string `bson:"status" json:"status"`
Revision int64 `bson:"revision" json:"revision"`
Enabled bool `bson:"enabled" json:"enabled"`
EnvName string `bson:"env_name" json:"env_name"`
UpdateBy string `bson:"update_by" json:"update_by"`
Auth []*ProductAuth `bson:"auth" json:"auth"`
Visibility string `bson:"-" json:"visibility"`
Services [][]*Service `bson:"services" json:"services"`
Render *task.RenderInfo `bson:"render" json:"render"`
Error string `bson:"error" json:"error"`
Vars []*RenderKV `bson:"vars,omitempty" json:"vars,omitempty"`
ChartInfos []*RenderChart `bson:"-" json:"chart_infos,omitempty"`
IsPublic bool `bson:"is_public" json:"isPublic"`
RoleIDs []int64 `bson:"role_ids" json:"roleIds"`
ClusterId string `bson:"cluster_id,omitempty" json:"cluster_id,omitempty"`
RecycleDay int `bson:"recycle_day" json:"recycle_day"`
Source string `bson:"source" json:"source"`
// used for cache
//KubeClient kubecli.Client `bson:"-" json:"-"`
//HelmClient helmclient.Client `bson:"-" json:"-"`
// TODO: temp flag
IsForkedProduct bool `bson:"-" json:"-"`
}
type RenderKV struct {
Key string `bson:"key" json:"key"`
Value string `bson:"value" json:"value"`
Alias string `bson:"alias" json:"alias"`
State string `bson:"state" json:"state"`
Services []string `bson:"services" json:"services"`
}
type RenderChart struct {
ServiceName string `bson:"service_name,omitempty" json:"service_name,omitempty"`
ChartVersion string `bson:"chart_version,omitempty" json:"chart_version,omitempty"`
ValuesYaml string `bson:"values_yaml,omitempty" json:"values_yaml,omitempty"`
}
type ProductAuth struct {
Type config.ProductAuthType `bson:"type" json:"type"`
Name string `bson:"name" json:"name"`
Permissions []config.ProductPermission `bson:"permissions" json:"permissions"`
}
type Service struct {
ServiceName string `bson:"service_name" json:"service_name"`
Type string `bson:"type" json:"type"`
Revision int64 `bson:"revision" json:"revision"`
Containers []*Container `bson:"containers" json:"containers,omitempty"`
Configs []*Config `bson:"configs,omitempty" json:"configs,omitempty"`
Render *task.RenderInfo `bson:"render,omitempty" json:"render,omitempty"` // 记录每个服务render信息 便于更新单个服务
EnvConfigs []*EnvConfig `bson:"-" json:"env_configs,omitempty"`
}
//type EnvConfig struct {
// EnvName string `json:"env_name"`
// HostIDs []string `json:"host_ids"`
//}
//
//// Container ...
//type Container struct {
// Name string `bson:"name" json:"name"`
// Image string `bson:"image" json:"image"`
//}
// Config ...
type Config struct {
ConfigName string `bson:"config_name" json:"config_name"`
Revision int64 `bson:"revision" json:"revision"`
}
|
package main
func main() {
funcMui(1,2)
}
// 在函数有多个返回值时,只要有一个返回值有命名,其他的也必须命名。如果有多个返回值必须加上括 号();如果只有一个返回值且命名也需要加上括号()。这里的第一个返回值有命名sum,第二个没有命名
func funcMui(x,y int) (sum int,error) {
return x+y,nil
}
|
//go:build 386 || amd64 || amd64p32 || arm || arm64 || mipsle || mips64le || mips64p32le || ppc64le || riscv || riscv64
// +build 386 amd64 amd64p32 arm arm64 mipsle mips64le mips64p32le ppc64le riscv riscv64
package column
import (
"io"
"unsafe"
)
func (c *Base[T]) readyBufferHook() {
}
// slice is the runtime representation of a slice.
// It cannot be used safely or portably and its representation may
// change in a later release.
// Moreover, the Data field is not sufficient to guarantee the data
// it references will not be garbage collected, so programs must keep
// a separate, correctly typed pointer to the underlying data.
type slice struct {
Data uintptr
Len int
Cap int
}
func (c *Base[T]) WriteTo(w io.Writer) (int64, error) {
s := *(*slice)(unsafe.Pointer(&c.values))
s.Len *= c.size
s.Cap *= c.size
var n int64
src := *(*[]byte)(unsafe.Pointer(&s))
nw, err := w.Write(src)
return int64(nw) + n, err
}
|
//go:build integration
// +build integration
package integration
import (
"context"
"fmt"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestDisableK8s(t *testing.T) {
f := newK8sFixture(t, "disable")
f.TiltUp()
ctx, cancel := context.WithTimeout(f.ctx, time.Minute)
defer cancel()
f.WaitForAllPodsReady(ctx, "app=disabletest")
setDisabled(f.fixture, "disabletest", true)
f.WaitUntil(ctx, "pod gone", func() (string, error) {
out, err := f.runCommand("kubectl", "get", "pod", namespaceFlag, "-lapp=disabletest", "--no-headers")
return out.String(), err
}, "No resources found")
setDisabled(f.fixture, "disabletest", false)
f.WaitForAllPodsReady(ctx, "app=disabletest")
}
func TestDisableDC(t *testing.T) {
f := newDCFixture(t, "disable")
f.dockerKillAll("tilt")
f.TiltUp("-f", "Tiltfile.dc")
ctx, cancel := context.WithTimeout(f.ctx, time.Minute)
defer cancel()
f.WaitUntil(ctx, "uiresource available", func() (string, error) {
out, err := f.tilt.Get(ctx, "uiresource")
return string(out), err
}, "disabletest")
f.runOrFail(f.tilt.cmd(ctx, []string{"wait", "--for=condition=Ready", "uiresource/disabletest"}, nil), "wait")
psArgs := []string{
"ps", "-f", "name=disabletest", "--format", "{{.Image}}",
}
out, err := f.dockerCmdOutput(psArgs)
require.NoError(t, err)
require.Contains(t, out, "disabletest")
f.WaitUntil(ctx, "disable configmap available", func() (string, error) {
out, err := f.tilt.Get(ctx, "configmap")
return string(out), err
}, "disabletest-disable")
setDisabled(f.fixture, "disabletest", true)
require.Eventually(t, func() bool {
out, _ := f.dockerCmdOutput(psArgs)
return len(out) == 0
}, time.Minute, 15*time.Millisecond, "dc service stopped")
setDisabled(f.fixture, "disabletest", false)
f.WaitUntil(ctx, "service up", func() (string, error) {
return f.dockerCmdOutput(psArgs)
}, "disabletest")
}
func setDisabled(f *fixture, resourceName string, isDisabled bool) {
err := f.tilt.Patch(
f.ctx,
"configmap",
fmt.Sprintf("{\"data\": {\"isDisabled\": \"%s\"}}", strconv.FormatBool(isDisabled)),
fmt.Sprintf("%s-disable", resourceName),
)
require.NoErrorf(f.t, err, "setting disable state for %s to %v", resourceName, isDisabled)
}
|
package main
import (
"api-documentos/bd"
"api-documentos/controladores"
"log"
"net/http"
"github.com/gorilla/mux"
"github.com/rs/cors"
)
func main() {
log.Println("El servidor se encuentra corriendo sin problema")
rout()
}
func rout() {
bd.NuevaConexionBD()
defer bd.CerrarBD()
gorillaRoute := mux.NewRouter().StrictSlash(true)
gorillaRoute.HandleFunc("/grancompu/devoluciones", controladores.TodosDevolucionesEndpoint).Methods("GET")
gorillaRoute.HandleFunc("/grancompu/cotizacion", controladores.InserCorizacionEndpoint).Methods("POST")
gorillaRoute.HandleFunc("/mxgourmet/ordenCompra", controladores.InserOrdenEndpoint).Methods("POST")
gorillaRoute.HandleFunc("/mxgourmet/ordenes", controladores.SelectOrdenesEndpoint).Methods("GET")
//Config
gorillaRoute.PathPrefix("/")
http.Handle("/", gorillaRoute)
handlerCORS := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
AllowedMethods: []string{http.MethodGet, http.MethodPost, http.MethodDelete, http.MethodPut},
AllowedHeaders: []string{"*"},
ExposedHeaders: []string{"*"},
AllowCredentials: true,
/*MaxAge: 7200,
OptionsPassthrough: true,
Debug: true,*/
}).Handler(gorillaRoute)
//log.Fatal(http.ListenAndServe(":9090", handlerCORS))
log.Fatal(http.ListenAndServe(":8083", handlerCORS))
}
|
package main
import (
"path/filepath"
"io/ioutil"
"gopkg.in/yaml.v2"
// "github.com/davecgh/go-spew/spew"
)
// Yaml Structures for Defaults and Elasticsearch configs
type BlitzferConfigs struct {
Configs map[string] Options
}
// Config yaml structure.
type Options struct {
Blitzfer struct {
Debug bool
Directory string
}
Elasticsearch struct {
Ip string
Port string
Index string
}
}
func loadBlitzferConfigs() *BlitzferConfigs {
// Read our config file
filename, _ := filepath.Abs(configFile)
yamlFile, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
var blitzferConfigs BlitzferConfigs
// UnMarshal the config into our config structure.
err = yaml.Unmarshal(yamlFile, &blitzferConfigs.Configs)
if err != nil {
panic(err)
}
/*
if ( blitzferConfigs.Configs["configs"].Blitzfer.Debug == true) {
spew.Dump(blitzferConfigs.Configs)
}
*/
return &blitzferConfigs
}
|
package utils
import (
"math/rand"
"sync"
"time"
)
// Rand return an random number between [min, max)
func Rand(min, max int) int {
return rand.Intn(max-min) + min
}
const (
AlphaNumber = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
Number = "0123456789"
Alpha = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
)
const (
letterIdxBits = 6 // 6 bits to represent a letter index
letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
)
var randSrcPool *sync.Pool
// nolint:gochecknoinits
func init() {
randSrcPool = &sync.Pool{
New: func() interface{} {
return rand.NewSource(time.Now().UnixNano())
},
}
}
// RandString return a random string of length n
func RandString(n int, letterBytes string) string {
// nolint:errcheck
src := randSrcPool.Get().(rand.Source)
b := make([]byte, n)
// A src.Int63() generates 63 random bits, enough for letterIdxMax characters!
for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
if remain == 0 {
cache, remain = src.Int63(), letterIdxMax
}
if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
b[i] = letterBytes[idx]
i--
}
cache >>= letterIdxBits
remain--
}
randSrcPool.Put(src)
return string(b)
}
|
package tiltfile
import (
"bytes"
"context"
"fmt"
"path/filepath"
"reflect"
"strconv"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/tilt-dev/tilt/internal/container"
configmap2 "github.com/tilt-dev/tilt/internal/controllers/apis/configmap"
"github.com/tilt-dev/tilt/internal/controllers/apis/uibutton"
"github.com/tilt-dev/tilt/internal/controllers/fake"
"github.com/tilt-dev/tilt/internal/docker"
"github.com/tilt-dev/tilt/internal/k8s/testyaml"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/internal/testutils/configmap"
"github.com/tilt-dev/tilt/internal/testutils/manifestbuilder"
"github.com/tilt-dev/tilt/internal/testutils/tempdir"
"github.com/tilt-dev/tilt/internal/tiltfile"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/model"
"github.com/tilt-dev/wmclient/pkg/analytics"
)
func TestDefault(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
f.tempdir.WriteFile(p, "print('hello-world')")
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
},
}
ts := time.Now()
f.Create(&tf)
// Make sure the FileWatch was created
var fw v1alpha1.FileWatch
fwKey := types.NamespacedName{Name: "configs:my-tf"}
f.MustGet(fwKey, &fw)
assert.Equal(t, tf.Spec.Path, fw.Spec.WatchedPaths[0])
f.waitForRunning(tf.Name)
f.popQueue()
f.waitForTerminatedAfter(tf.Name, ts)
f.Delete(&tf)
// Ensure the FileWatch was deleted.
assert.False(t, f.Get(fwKey, &fw))
}
func TestSteadyState(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
f.tempdir.WriteFile(p, "print('hello-world')")
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
},
}
f.createAndWaitForLoaded(&tf)
// Make sure a second reconcile doesn't update the status again.
var tf2 = v1alpha1.Tiltfile{}
f.MustReconcile(types.NamespacedName{Name: "my-tf"})
f.MustGet(types.NamespacedName{Name: "my-tf"}, &tf2)
assert.Equal(t, tf.ResourceVersion, tf2.ResourceVersion)
}
func TestLiveUpdate(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
luSpec := v1alpha1.LiveUpdateSpec{
BasePath: f.tempdir.Path(),
StopPaths: []string{filepath.Join("src", "package.json")},
Syncs: []v1alpha1.LiveUpdateSync{{LocalPath: "src", ContainerPath: "/src"}},
}
expectedSpec := *(luSpec.DeepCopy())
expectedSpec.Sources = []v1alpha1.LiveUpdateSource{{
FileWatch: "image:sancho-image",
ImageMap: "sancho-image",
}}
expectedSpec.Selector.Kubernetes = &v1alpha1.LiveUpdateKubernetesSelector{
ImageMapName: "sancho-image",
DiscoveryName: "sancho",
ApplyName: "sancho",
}
sanchoImage := model.MustNewImageTarget(container.MustParseSelector("sancho-image")).
WithLiveUpdateSpec("sancho:sancho-image", luSpec).
WithDockerImage(v1alpha1.DockerImageSpec{Context: f.tempdir.Path()})
sancho := manifestbuilder.New(f.tempdir, "sancho").
WithImageTargets(sanchoImage).
WithK8sYAML(testyaml.SanchoYAML).
Build()
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{sancho},
}
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
},
}
f.createAndWaitForLoaded(&tf)
assert.Equal(t, "", tf.Status.Terminated.Error)
var luList = v1alpha1.LiveUpdateList{}
f.List(&luList)
if assert.Equal(t, 1, len(luList.Items)) {
assert.Equal(t, "sancho:sancho-image", luList.Items[0].Name)
assert.Equal(t, expectedSpec, luList.Items[0].Spec)
}
}
func TestCluster(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
f.r.k8sContextOverride = "context-override"
f.r.k8sNamespaceOverride = "namespace-override"
expected := &v1alpha1.ClusterConnection{
Kubernetes: &v1alpha1.KubernetesClusterConnection{
Context: string(f.r.k8sContextOverride),
Namespace: string(f.r.k8sNamespaceOverride),
},
}
sancho := manifestbuilder.New(f.tempdir, "sancho").
WithK8sYAML(testyaml.SanchoYAML).
Build()
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{sancho},
}
name := model.MainTiltfileManifestName.String()
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
},
}
f.createAndWaitForLoaded(&tf)
assert.Equal(t, "", tf.Status.Terminated.Error)
var clList = v1alpha1.ClusterList{}
f.List(&clList)
if assert.Equal(t, 1, len(clList.Items)) {
assert.Equal(t, "default", clList.Items[0].Name)
assert.Equal(t, expected, clList.Items[0].Spec.Connection)
}
}
func TestLocalServe(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
m := manifestbuilder.New(f.tempdir, "foo").WithLocalServeCmd(".").Build()
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{m},
}
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
},
}
f.createAndWaitForLoaded(&tf)
assert.Equal(t, "", tf.Status.Terminated.Error)
a := f.st.WaitForAction(t, reflect.TypeOf(ConfigsReloadedAction{})).(ConfigsReloadedAction)
require.Equal(t, 1, len(a.Manifests))
m = a.Manifests[0]
require.Equal(t, model.ManifestName("foo"), m.Name)
require.IsType(t, model.LocalTarget{}, m.DeployTarget)
lt := m.DeployTarget.(model.LocalTarget)
require.NotNil(t, lt.ServeCmdDisableSource, "ServeCmdDisableSource is nil")
require.NotNil(t, lt.ServeCmdDisableSource.ConfigMap, "ServeCmdDisableSource.ConfigMap is nil")
require.Equal(t, "foo-disable", lt.ServeCmdDisableSource.ConfigMap.Name)
}
func TestDockerMetrics(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
sanchoImage := model.MustNewImageTarget(container.MustParseSelector("sancho-image")).
WithDockerImage(v1alpha1.DockerImageSpec{Context: f.tempdir.Path()})
sancho := manifestbuilder.New(f.tempdir, "sancho").
WithImageTargets(sanchoImage).
WithK8sYAML(testyaml.SanchoYAML).
Build()
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{sancho},
}
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
},
}
f.createAndWaitForLoaded(&tf)
connectEvt := analytics.CountEvent{
Name: "api.tiltfile.docker.connect",
Tags: map[string]string{
"server.arch": "amd64",
"server.version": "20.10.11",
"status": "connected",
},
N: 1,
}
assert.ElementsMatch(t, []analytics.CountEvent{connectEvt}, f.ma.Counts)
}
func TestArgsChangeResetsEnabledResources(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
m1 := manifestbuilder.New(f.tempdir, "m1").WithLocalServeCmd("hi").Build()
m2 := manifestbuilder.New(f.tempdir, "m2").WithLocalServeCmd("hi").Build()
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{m1, m2},
EnabledManifests: []model.ManifestName{"m1", "m2"},
}
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
Args: []string{"m1", "m2"},
},
}
f.createAndWaitForLoaded(&tf)
ts := time.Now()
f.setArgs("my-tf", []string{"m2"})
f.tfl.Result.EnabledManifests = []model.ManifestName{"m2"}
f.MustReconcile(types.NamespacedName{Name: "my-tf"})
f.waitForRunning("my-tf")
f.popQueue()
f.waitForTerminatedAfter("my-tf", ts)
f.requireEnabled(m1, false)
f.requireEnabled(m2, true)
}
func TestRunWithoutArgsChangePreservesEnabledResources(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
m1 := manifestbuilder.New(f.tempdir, "m1").WithLocalServeCmd("hi").Build()
m2 := manifestbuilder.New(f.tempdir, "m2").WithLocalServeCmd("hi").Build()
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{m1, m2},
EnabledManifests: []model.ManifestName{"m1", "m2"},
}
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
Args: []string{"m1"},
},
}
f.createAndWaitForLoaded(&tf)
err := configmap.UpsertDisableConfigMap(f.Context(), f.Client, "m2-disable", "isDisabled", false)
require.NoError(t, err)
f.setArgs("my-tf", tf.Spec.Args)
f.triggerRun("my-tf")
ts := time.Now()
f.MustReconcile(types.NamespacedName{Name: "my-tf"})
f.waitForRunning("my-tf")
f.popQueue()
f.waitForTerminatedAfter("my-tf", ts)
f.requireEnabled(m1, true)
f.requireEnabled(m2, true)
}
func TestTiltfileFailurePreservesEnabledResources(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
m1 := manifestbuilder.New(f.tempdir, "m1").WithLocalServeCmd("hi").Build()
m2 := manifestbuilder.New(f.tempdir, "m2").WithLocalServeCmd("hi").Build()
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{m1, m2},
EnabledManifests: []model.ManifestName{"m1"},
}
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
Args: []string{"m1"},
},
}
f.createAndWaitForLoaded(&tf)
f.tfl.Result = tiltfile.TiltfileLoadResult{
Manifests: []model.Manifest{m1, m2},
EnabledManifests: []model.ManifestName{},
Error: errors.New("unknown manifest: m3"),
}
f.triggerRun("my-tf")
ts := time.Now()
f.MustReconcile(types.NamespacedName{Name: "my-tf"})
f.waitForRunning("my-tf")
f.popQueue()
f.waitForTerminatedAfter("my-tf", ts)
f.requireEnabled(m1, true)
f.requireEnabled(m2, false)
}
func TestCancel(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
f.tempdir.WriteFile(p, "print('hello-world')")
f.tfl.Delegate = newBlockingTiltfileLoader()
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
StopOn: &v1alpha1.StopOnSpec{UIButtons: []string{uibutton.StopBuildButtonName("my-tf")}},
},
}
cancelButton := uibutton.StopBuildButton(tf.Name)
err := f.Client.Create(f.Context(), cancelButton)
require.NoError(t, err)
ts := time.Now()
f.Create(&tf)
f.waitForRunning(tf.Name)
cancelButton.Status.LastClickedAt = metav1.NowMicro()
f.UpdateStatus(cancelButton)
require.NoError(t, err)
f.MustReconcile(types.NamespacedName{Name: tf.Name})
f.popQueue()
f.waitForTerminatedAfter(tf.Name, ts)
f.Get(types.NamespacedName{Name: tf.Name}, &tf)
require.NotNil(t, tf.Status.Terminated)
require.Equal(t, "build canceled", tf.Status.Terminated.Error)
}
func TestCancelClickedBeforeLoad(t *testing.T) {
f := newFixture(t)
p := f.tempdir.JoinPath("Tiltfile")
f.tempdir.WriteFile(p, "print('hello-world')")
tfl := newBlockingTiltfileLoader()
f.tfl.Delegate = tfl
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: "my-tf",
},
Spec: v1alpha1.TiltfileSpec{
Path: p,
StopOn: &v1alpha1.StopOnSpec{UIButtons: []string{uibutton.StopBuildButtonName("my-tf")}},
},
}
cancelButton := uibutton.StopBuildButton(tf.Name)
cancelButton.Status.LastClickedAt = metav1.NewMicroTime(time.Now().Add(-time.Second))
err := f.Client.Create(f.Context(), cancelButton)
require.NoError(t, err)
nn := types.NamespacedName{Name: tf.Name}
ts := time.Now()
f.Create(&tf)
f.waitForRunning(tf.Name)
// give the reconciler a chance to observe the cancel button click
f.MustReconcile(nn)
// finish the build
tfl.Complete()
f.MustReconcile(nn)
f.popQueue()
f.waitForTerminatedAfter(tf.Name, ts)
f.Get(nn, &tf)
require.NotNil(t, tf.Status.Terminated)
require.Equal(t, "", tf.Status.Terminated.Error)
}
type testStore struct {
*store.TestingStore
out *bytes.Buffer
}
func NewTestingStore() *testStore {
return &testStore{
TestingStore: store.NewTestingStore(),
out: bytes.NewBuffer(nil),
}
}
func (s *testStore) Dispatch(action store.Action) {
s.TestingStore.Dispatch(action)
logAction, ok := action.(store.LogAction)
if ok {
_, _ = fmt.Fprintf(s.out, "%s", logAction.Message())
}
}
type fixture struct {
*fake.ControllerFixture
tempdir *tempdir.TempDirFixture
st *testStore
r *Reconciler
q workqueue.RateLimitingInterface
tfl *tiltfile.FakeTiltfileLoader
ma *analytics.MemoryAnalytics
}
func newFixture(t *testing.T) *fixture {
cfb := fake.NewControllerFixtureBuilder(t)
tf := tempdir.NewTempDirFixture(t)
st := NewTestingStore()
tfl := tiltfile.NewFakeTiltfileLoader()
d := docker.NewFakeClient()
r := NewReconciler(st, tfl, d, cfb.Client, v1alpha1.NewScheme(), store.EngineModeUp, "", "", 0)
q := workqueue.NewRateLimitingQueue(
workqueue.NewItemExponentialFailureRateLimiter(time.Millisecond, time.Millisecond))
_ = r.requeuer.Start(context.Background(), handler.Funcs{}, q)
return &fixture{
ControllerFixture: cfb.Build(r),
tempdir: tf,
st: st,
r: r,
q: q,
tfl: tfl,
ma: cfb.Analytics(),
}
}
// Wait for the next item on the workqueue, then run reconcile on it.
func (f *fixture) popQueue() {
f.T().Helper()
done := make(chan error)
go func() {
item, _ := f.q.Get()
_, err := f.r.Reconcile(f.Context(), item.(reconcile.Request))
f.q.Done(item)
done <- err
}()
select {
case <-time.After(time.Second):
f.T().Fatal("timeout waiting for workqueue")
case err := <-done:
assert.NoError(f.T(), err)
}
}
func (f *fixture) waitForTerminatedAfter(name string, ts time.Time) {
require.Eventually(f.T(), func() bool {
var tf v1alpha1.Tiltfile
f.MustGet(types.NamespacedName{Name: name}, &tf)
return tf.Status.Terminated != nil && tf.Status.Terminated.FinishedAt.After(ts)
}, time.Second, time.Millisecond, "waiting for tiltfile to finish running")
}
func (f *fixture) waitForRunning(name string) {
require.Eventually(f.T(), func() bool {
var tf v1alpha1.Tiltfile
f.MustGet(types.NamespacedName{Name: name}, &tf)
return tf.Status.Running != nil
}, time.Second, time.Millisecond, "waiting for tiltfile to start running")
}
func (f *fixture) createAndWaitForLoaded(tf *v1alpha1.Tiltfile) {
ts := time.Now()
f.Create(tf)
f.waitForRunning(tf.Name)
f.popQueue()
f.waitForTerminatedAfter(tf.Name, ts)
f.MustGet(types.NamespacedName{Name: tf.Name}, tf)
}
func (f *fixture) triggerRun(name string) {
queue := configmap2.TriggerQueueCreate([]configmap2.TriggerQueueEntry{{Name: model.ManifestName(name)}})
f.Create(&queue)
}
func (f *fixture) setArgs(name string, args []string) {
tf := v1alpha1.Tiltfile{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
_, err := controllerutil.CreateOrUpdate(f.Context(), f.Client, &tf, func() error {
tf.Spec.Args = args
return nil
})
require.NoError(f.T(), err)
}
func (f *fixture) requireEnabled(m model.Manifest, isEnabled bool) {
var cm v1alpha1.ConfigMap
f.MustGet(types.NamespacedName{Name: disableConfigMapName(m)}, &cm)
isDisabled, err := strconv.ParseBool(cm.Data["isDisabled"])
require.NoError(f.T(), err)
actualIsEnabled := !isDisabled
require.Equal(f.T(), isEnabled, actualIsEnabled, "is %s enabled", m.Name)
}
// builds block until canceled or manually completed
type blockingTiltfileLoader struct {
completionChan chan struct{}
}
func newBlockingTiltfileLoader() blockingTiltfileLoader {
return blockingTiltfileLoader{completionChan: make(chan struct{})}
}
func (b blockingTiltfileLoader) Load(ctx context.Context, tf *v1alpha1.Tiltfile, prevResult *tiltfile.TiltfileLoadResult) tiltfile.TiltfileLoadResult {
select {
case <-ctx.Done():
case <-b.completionChan:
}
return tiltfile.TiltfileLoadResult{}
}
func (b blockingTiltfileLoader) Complete() {
close(b.completionChan)
}
|
package standalone
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/Dynatrace/dynatrace-operator/src/arch"
"github.com/Dynatrace/dynatrace-operator/src/config"
"github.com/pkg/errors"
)
type containerInfo struct {
Name string `json:"name"`
Image string `json:"image"`
}
type environment struct {
Mode config.InstallMode `json:"mode"`
FailurePolicy bool `json:"failurePolicy"`
InstallerUrl string `json:"installerUrl"`
InstallerFlavor string `json:"installerFlavor"`
InstallVersion string `json:"installVersion"`
InstallerTech []string `json:"installerTech"`
InstallPath string `json:"installPath"`
Containers []containerInfo `json:"containers"`
K8NodeName string `json:"k8NodeName"`
K8PodName string `json:"k8PodName"`
K8PodUID string `json:"k8BasePodUID"`
K8ClusterID string `json:"k8ClusterID"`
K8BasePodName string `json:"k8BasePodName"`
K8Namespace string `json:"k8Namespace"`
WorkloadKind string `json:"workloadKind"`
WorkloadName string `json:"workloadName"`
OneAgentInjected bool `json:"oneAgentInjected"`
DataIngestInjected bool `json:"dataIngestInjected"`
}
func newEnv() (*environment, error) {
log.Info("checking envvars")
env := &environment{}
env.setMutationTypeFields()
err := env.setRequiredFields()
if err != nil {
return nil, err
}
env.setOptionalFields()
log.Info("envvars checked", "env", env)
return env, nil
}
func (env *environment) setRequiredFields() error {
errs := []error{}
requiredFieldSetters := []func() error{
env.addFailurePolicy,
}
if env.OneAgentInjected {
requiredFieldSetters = append(requiredFieldSetters, env.getOneAgentFieldSetters()...)
}
if env.DataIngestInjected {
requiredFieldSetters = append(requiredFieldSetters, env.getDataIngestFieldSetters()...)
}
for _, setField := range requiredFieldSetters {
if err := setField(); err != nil {
errs = append(errs, err)
log.Info(err.Error())
}
}
if len(errs) != 0 {
return errors.Errorf("%d envvars missing", len(errs))
}
return nil
}
func (env *environment) getCommonFieldSetters() []func() error {
return []func() error{
env.addK8PodName,
env.addK8PodUID,
env.addK8Namespace,
}
}
func (env *environment) getOneAgentFieldSetters() []func() error {
return append(env.getCommonFieldSetters(),
env.addMode,
env.addInstallerTech,
env.addInstallPath,
env.addContainers,
env.addK8NodeName,
env.addK8BasePodName,
)
}
func (env *environment) getDataIngestFieldSetters() []func() error {
return append(env.getCommonFieldSetters(),
env.addWorkloadKind,
env.addWorkloadName,
env.addK8ClusterID,
)
}
func (env *environment) setOptionalFields() {
env.addInstallerUrl()
env.addInstallerFlavor()
env.addInstallVersion()
}
func (env *environment) setMutationTypeFields() {
env.addOneAgentInjected()
env.addDataIngestInjected()
}
func (env *environment) addMode() error {
mode, err := checkEnvVar(config.AgentInstallModeEnv)
if err != nil {
return err
}
env.Mode = config.InstallMode(mode)
return nil
}
func (env *environment) addFailurePolicy() error {
failurePolicy, err := checkEnvVar(config.InjectionFailurePolicyEnv)
if err != nil {
return err
}
env.FailurePolicy = failurePolicy == "fail"
return nil
}
func (env *environment) addInstallerFlavor() {
flavor, _ := checkEnvVar(config.AgentInstallerFlavorEnv)
if flavor == "" {
env.InstallerFlavor = arch.Flavor
} else {
env.InstallerFlavor = flavor
}
}
func (env *environment) addInstallerTech() error {
technologies, err := checkEnvVar(config.AgentInstallerTechEnv)
if err != nil {
return err
}
env.InstallerTech = strings.Split(technologies, ",")
return nil
}
func (env *environment) addInstallPath() error {
installPath, err := checkEnvVar(config.AgentInstallPathEnv)
if err != nil {
return err
}
env.InstallPath = installPath
return nil
}
func (env *environment) addContainers() error {
containers := []containerInfo{}
containerCountStr, err := checkEnvVar(config.AgentContainerCountEnv)
if err != nil {
return err
}
countCount, err := strconv.Atoi(containerCountStr)
if err != nil {
return err
}
for i := 1; i <= countCount; i++ {
nameEnv := fmt.Sprintf(config.AgentContainerNameEnvTemplate, i)
imageEnv := fmt.Sprintf(config.AgentContainerImageEnvTemplate, i)
containerName, err := checkEnvVar(nameEnv)
if err != nil {
return err
}
imageName, err := checkEnvVar(imageEnv)
if err != nil {
return err
}
containers = append(containers, containerInfo{
Name: containerName,
Image: imageName,
})
}
env.Containers = containers
return nil
}
func (env *environment) addK8NodeName() error {
nodeName, err := checkEnvVar(config.K8sNodeNameEnv)
if err != nil {
return err
}
env.K8NodeName = nodeName
return nil
}
func (env *environment) addK8PodName() error {
podName, err := checkEnvVar(config.K8sPodNameEnv)
if err != nil {
return err
}
env.K8PodName = podName
return nil
}
func (env *environment) addK8PodUID() error {
podUID, err := checkEnvVar(config.K8sPodUIDEnv)
if err != nil {
return err
}
env.K8PodUID = podUID
return nil
}
func (env *environment) addK8ClusterID() error {
clusterID, err := checkEnvVar(config.K8sClusterIDEnv)
if err != nil {
return err
}
env.K8ClusterID = clusterID
return nil
}
func (env *environment) addK8BasePodName() error {
basePodName, err := checkEnvVar(config.K8sBasePodNameEnv)
if err != nil {
return err
}
env.K8BasePodName = basePodName
return nil
}
func (env *environment) addK8Namespace() error {
namespace, err := checkEnvVar(config.K8sNamespaceEnv)
if err != nil {
return err
}
env.K8Namespace = namespace
return nil
}
func (env *environment) addWorkloadKind() error {
workloadKind, err := checkEnvVar(config.EnrichmentWorkloadKindEnv)
if err != nil {
return err
}
if workloadKind == config.EnrichmentUnknownWorkload {
env.WorkloadKind = ""
} else {
env.WorkloadKind = workloadKind
}
return nil
}
func (env *environment) addWorkloadName() error {
workloadName, err := checkEnvVar(config.EnrichmentWorkloadNameEnv)
if err != nil {
return err
}
if workloadName == config.EnrichmentUnknownWorkload {
env.WorkloadName = ""
} else {
env.WorkloadName = workloadName
}
return nil
}
func (env *environment) addInstallerUrl() {
url, _ := checkEnvVar(config.AgentInstallerUrlEnv)
env.InstallerUrl = url
}
func (env *environment) addInstallVersion() {
version, _ := checkEnvVar(config.AgentInstallerVersionEnv)
env.InstallVersion = version
}
func (env *environment) addOneAgentInjected() {
oneAgentInjected, _ := checkEnvVar(config.AgentInjectedEnv)
env.OneAgentInjected = oneAgentInjected == "true"
}
func (env *environment) addDataIngestInjected() {
dataIngestInjected, _ := checkEnvVar(config.EnrichmentInjectedEnv)
env.DataIngestInjected = dataIngestInjected == "true"
}
func checkEnvVar(envvar string) (string, error) {
result := os.Getenv(envvar)
if result == "" {
return "", errors.Errorf("%s environment variable is missing", envvar)
}
return result, nil
}
|
package auth
import (
"context"
"encoding/json"
"fmt"
"net/http"
"github.com/morikuni/failure"
"github.com/nerocrux/webauthn/domain"
"github.com/nerocrux/webauthn/errors"
)
type loginRequest struct {
Username string
}
func (s *Server) LoginEndpoint(rw http.ResponseWriter, req *http.Request) error {
ctx := context.Background()
decoder := json.NewDecoder(req.Body)
var r loginRequest
if err := decoder.Decode(&r); err != nil {
errorHandler(rw, http.StatusBadRequest, "bad json request body")
return err
}
username := r.Username
if username == "" {
errorHandler(rw, http.StatusBadRequest, "bad username")
return nil
}
txn := s.spannerClient.ReadOnlyTransaction()
defer txn.Close()
user, err := s.UserService.GetUserByUsername(ctx, txn, username)
code, _ := failure.CodeOf(err)
if code == errors.NotFoundEntity || !user.Registered {
errorHandler(rw, http.StatusBadRequest, fmt.Sprintf("user not exists: %v", username))
return err
}
authenticators, err := s.AuthenticatorService.GetAuthenticatorByUserID(ctx, txn, user.ID)
if err != nil {
errorHandler(rw, http.StatusInternalServerError, "failed to find authenticators")
return err
}
getAssertion := generateServerGetAssertion(user, authenticators)
getAssertion.Status = STATUS_OK
res, _ := json.Marshal(&getAssertion)
setCookie(rw, "id", user.Username)
setCookie(rw, "displayName", user.Name)
setCookie(rw, "userID", user.ID)
if err := s.UserService.SaveLoginChallenge(ctx, s.spannerClient, user.ID, getAssertion.Challenge); err != nil {
errorHandler(rw, http.StatusInternalServerError, "cannot store login challenge")
return err
}
rw.Header().Set("Content-Type", "application/json")
rw.Write(res)
return nil
}
type getAssertion struct {
Status string `json:"status"`
Challenge string `json:"challenge"`
AllowCredentials []allowCredential `json:"allowCredentials,omitempty"`
UserVerification domain.UserVerificationRequirement `json:"userVerification,omitempty"`
Timeout int `json:"timeout,omitempty"`
RpId string `json:"rpId,omitempty"`
}
type allowCredential struct {
Type string `json:"type"`
ID string `json:"id"`
Transports []string `json:"transports,omitempty"`
}
func generateServerGetAssertion(user *domain.User, authenticators []*domain.Authenticator) getAssertion {
var allowCredentials []allowCredential
authenticatorAttachment := domain.AuthenticatorAttachment(user.AuthenticatorAttachment)
transports := []string{"internal"}
if authenticatorAttachment == domain.CrossPlatform {
transports = []string{"usb", "nfc", "ble"}
}
if !user.RequireResidentKey {
for _, authr := range authenticators {
allowCredentials = append(allowCredentials, allowCredential{
Type: CRED_TYPE_PUBLIC_KEY,
ID: authr.ID,
Transports: transports,
})
}
}
userVerification := domain.UserVerificationRequirement(user.UserVerification)
return getAssertion{
Challenge: randomBase64URLBuffer(32),
AllowCredentials: allowCredentials,
UserVerification: userVerification,
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package video
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/local/upstart"
"chromiumos/tast/shutil"
"chromiumos/tast/testing"
)
// v4l2SummaryRegExp is the regexp to find the summary result from the binary log.
var v4l2SummaryRegExp = regexp.MustCompile(`Total.*: \d+, Succeeded: \d+, Failed: \d+, Warnings: \d+`)
func init() {
testing.AddTest(&testing.Test{
Func: PlatformV4L2,
Desc: "Runs v4l2 compliance tests",
Contacts: []string{
"stevecho@chromium.org",
"chromeos-gfx-video@google.com",
},
Attr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
SoftwareDeps: []string{"v4l2_codec"},
Timeout: 2 * time.Minute,
Params: []testing.Param{{
// -v: Turn on verbose reporting.
Name: "decoder",
Val: []string{"v4l2-compliance", "-d", "/dev/video-dec0", "-v"},
}},
})
}
// PlatformV4L2 runs v4l2-compliance binary test.
func PlatformV4L2(ctx context.Context, s *testing.State) {
// Test doesn't use the graphicsNoChrome fixture since the driver may
// write errors to the kernel logs which are picked up by the GPU
// watchdog.
if err := upstart.StopJob(ctx, "ui"); err != nil {
s.Fatal("Failed to stop ui job: ", err)
}
defer upstart.EnsureJobRunning(ctx, "ui")
command := s.Param().([]string)
s.Log("Running ", shutil.EscapeSlice(command))
logFile := filepath.Join(s.OutDir(), filepath.Base(command[0])+".txt")
f, err := os.Create(logFile)
if err != nil {
s.Fatal("Failed to create a log file: ", err)
}
defer f.Close()
cmd := testexec.CommandContext(ctx, command[0], command[1:]...)
cmd.Stdout = f
cmd.Stderr = f
if err := cmd.Run(testexec.DumpLogOnError); err != nil {
exitCode, ok := testexec.ExitCode(err)
if !ok {
s.Fatalf("Failed to run %s: %v", command[0], err)
}
contents, err := ioutil.ReadFile(logFile)
if err != nil {
s.Fatal("Failed to read the log file: ", err)
}
matches := v4l2SummaryRegExp.FindAllStringSubmatch(string(contents), -1)
if matches == nil {
s.Fatal("Failed to find matches for summary result")
}
if len(matches) != 1 {
s.Fatalf("Found %d matches for summary result; want 1", len(matches))
}
if exitCode > 0 {
s.Errorf("%s", matches)
} else {
s.Logf("%s", matches)
}
}
}
|
package dynamic_programming
import (
"testing"
)
//目标和 sum(P)-sum(N)=target sum(P)+sum(N)+sum(P)-sum(N)=sum(P)+sum(N)+target 2*sum(P)=target+sum(N)
func findTargetSumWays(nums []int, S int) int {
sum := 0
for i := 0; i < len(nums); i++ {
sum += nums[i]
}
//和小于S sum+S其实就是2*sum(P)
if sum < S || (sum+S)&1 == 1 {
return 0
}
target := (sum + S) / 2
//容量为target的有多少种方案
dp := make([]int, target+1)
dp[0] = 1
for i := 0; i < len(nums); i++ {
for j := target; j >= nums[i]; j-- {
dp[j] = dp[j] + dp[j-nums[i]]
}
}
return dp[target]
}
//目标和 从前往后递归
var result = 0
func findTargetSumWays2(nums []int, S int) int {
result = 0
calculate(nums, 0, 0, S)
return result
}
func calculate(nums []int, index int, sum int, S int) {
if index == len(nums) {
if sum == S {
result++
}
} else {
calculate(nums, index+1, sum+nums[index], S)
calculate(nums, index+1, sum-nums[index], S)
}
}
//目标和 从后往前递归
func findTargetSumWays1(nums []int, S int) int {
return findTarget(nums, S, len(nums)-1)
}
func findTarget(nums []int, target int, index int) int {
if index == 0 {
if nums[index] == 0 && target == 0 {
return 2
} else if nums[index] == target || nums[index] == -target {
return 1
} else {
return 0
}
}
nextIndex := index - 1
return findTarget(nums, target-nums[index], nextIndex) + findTarget(nums, target+nums[index], nextIndex)
}
func Test_494(t *testing.T) {
t.Log(findTargetSumWays([]int{1, 1, 1, 1, 1}, 3)) //5
t.Log(findTargetSumWays([]int{1, 0}, 1)) //2
t.Log(findTargetSumWays([]int{0, 1}, 1)) //2
t.Log(findTargetSumWays([]int{0, 0, 0, 0, 0, 0, 0, 0, 1}, 1)) //256
}
|
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT License was not distributed with this
// file, you can obtain one at https://opensource.org/licenses/MIT.
//
// Copyright (c) DUSK NETWORK. All rights reserved.
package score
import (
"context"
"github.com/dusk-network/dusk-blockchain/pkg/core/consensus"
"github.com/dusk-network/dusk-blockchain/pkg/core/consensus/header"
"github.com/dusk-network/dusk-blockchain/pkg/p2p/wire/message"
crypto "github.com/dusk-network/dusk-crypto/hash"
)
type mock struct {
*consensus.Emitter
inert bool
}
func (m *mock) Generate(ctx context.Context, r consensus.RoundUpdate, step uint8) message.ScoreProposal {
hash, _ := crypto.RandEntropy(32)
hdr := header.Header{
Round: r.Round,
Step: step,
BlockHash: hash,
PubKeyBLS: m.Keys.BLSPubKeyBytes,
}
if m.inert {
return message.EmptyScoreProposal(hdr)
}
return message.MockScoreProposal(hdr)
}
// Mock a score generator.
func Mock(e *consensus.Emitter, inert bool) Generator {
return &mock{e, inert}
}
|
/*
* Copyright 2021 American Express
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package utils
import (
"log"
"net/url"
"os"
"path/filepath"
"regexp"
"runtime"
"strings"
"unicode"
"github.com/howeyc/gopass"
)
var gitURLPattern = regexp.MustCompile(`([^/]*)(?:.git)`)
// Contains Does an array/slice contain a string
func Contains(haystack []string, needle string) bool {
for _, item := range haystack {
if item == needle {
return true
}
}
return false
}
// PathMustExist exit if path is invalid
func PathMustExist(path string) {
if fileExists, err := Exists(path); !fileExists {
if err != nil {
log.Fatal(errInvalidPath)
}
}
}
// GetConfigDir Determine the operating system and pull the path to the go-earlybird config directory
func GetConfigDir() (configDir string) {
if strings.HasSuffix(os.Args[0], ".test") { // Return repository config directory when testing ../../config
return ".." + string(os.PathSeparator) + ".." + string(os.PathSeparator) + "config" + string(os.PathSeparator)
}
userHomeDir, err := os.UserHomeDir()
if err != nil {
log.Fatal("Home directory doesn't exist", err)
}
cwd := MustGetED()
overrideDir := string(os.PathSeparator) + ebConfFileDir + string(os.PathSeparator)
localOverrideDir := cwd + overrideDir
localOverrideFileCheck := localOverrideDir + ebConfFileName
if fe, _ := Exists(localOverrideFileCheck); fe {
configDir = localOverrideDir
log.Println("Using local config directory: ", localOverrideDir)
} else {
switch runtime.GOOS {
case "windows":
configDir += ebWinConfFileDir
case "linux": // also can be specified to FreeBSD
configDir += overrideDir
case "darwin":
configDir += overrideDir
}
configDir = userHomeDir + configDir
}
return configDir
}
// MustGetED Get the executable directory or exit
func MustGetED() string {
ex, err := os.Executable()
if err != nil {
log.Fatal(err)
}
return filepath.Dir(ex)
}
// MustGetWD Get the CWD for the default target Directory or exit
func MustGetWD() string {
cwd, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
return cwd
}
// GetTargetType returns the file scan context
func GetTargetType(GitStagedFlag, GitTrackedFlag bool) (targetType string) {
if GitStagedFlag {
targetType = Staged
} else if GitTrackedFlag {
targetType = Tracked
} else {
targetType = All
}
return targetType
}
// GetEnabledModulesMap returns a map of module name to filename enabled by default or explicitly defined with CLI paramters
func GetEnabledModulesMap(enableFlags []string, availableModules map[string]string) (enabledModules map[string]string) {
enabledModules = make(map[string]string)
if len(enableFlags) == 0 {
return availableModules
}
for _, moduleName := range enableFlags {
enabledModules[moduleName] = availableModules[moduleName]
}
return enabledModules
}
// GetDisplayList Build the string to display an array in a human readable format
func GetDisplayList(levelNames []string) string {
return "[ " + strings.Join(levelNames, " | ") + " ]"
}
// DeleteGit Check if we've cloned a git repo, if so delete it
func DeleteGit(ptrRepo string, path string) {
if ptrRepo != "" {
err := os.RemoveAll(path)
if err != nil {
log.Println(errGitDelete, err)
}
}
}
// GetGitRepo Parse repository name from URL
func GetGitRepo(gitURL string) (repository string) {
if strings.Contains(gitURL, "github.com/") {
u, err := url.Parse(gitURL)
if err != nil {
return
}
repository = strings.TrimPrefix(u.Path, "/")
} else {
items := gitURLPattern.FindStringSubmatch(gitURL)
if len(items) > 1 {
repository = items[1]
}
}
return repository
}
// GetBBProject Parse project name from bitbucket URL
func GetBBProject(bbURL string) (project string) {
re := regexp.MustCompile(`(?:projects/)([^/]*)`)
results := re.FindStringSubmatch(bbURL) // Match second capture group, 1 = project/XXX, 2 = XXX
if len(results) < 1 {
log.Println("Failed To Get BB Project from URL:", bbURL)
os.Exit(1)
} else {
project = results[1]
}
return project
}
// ParseBBURL Parse the base URL, Path and project name from BB URL
func ParseBBURL(bbURL string) (baseurl, path, project string) {
u, err := url.Parse(bbURL)
if err != nil {
log.Println("Failed to parse Bitbucket URL")
return
}
baseurl = u.Scheme + "://" + u.Host // Parse Base URL
parts := strings.Split(bbURL, "/projects/") // Get URL before /projects
path = strings.Replace(parts[0], baseurl, "", -1) // Delete the base url leaving the path
return baseurl, path, GetBBProject(bbURL)
}
// GetGitProject parse project from URL
func GetGitProject(gitURL string) (project string) {
u, err := url.Parse(gitURL)
if err != nil {
return
}
return strings.TrimPrefix(u.Path, "/")
}
// GetGitURL Format GI URL and parse/prompt user password
func GetGitURL(ptrRepo, ptrRepoUser *string) (Password string) {
// Parse Username from URL
u, err := url.Parse(*ptrRepo)
if err != nil {
return
}
// Remove Username prefix
*ptrRepo = strings.Replace(*ptrRepo, u.User.Username()+"@", "", 1)
if *ptrRepoUser == "" {
*ptrRepoUser = u.User.Username()
}
// Remove HTTP and HTTPS prefix
*ptrRepo = strings.Replace(*ptrRepo, gitHTTP, "", 1)
*ptrRepo = strings.Replace(*ptrRepo, gitHTTPS, "", 1)
*ptrRepo = gitHTTPS + *ptrRepo
if *ptrRepoUser != "" {
log.Print(gitPasswdPrompt)
RepoPass, err := gopass.GetPasswdMasked()
if err != nil {
log.Println(errGitPasswd, err)
os.Exit(1)
}
// Format git URL with user and password
return string(RepoPass)
}
return ""
}
// Exists Check to see if a path exists
func Exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, err
}
if os.IsNotExist(err) {
return false, nil
}
return true, nil
}
// GetAlphaNumericValues returns the alphanumeric part of the input string
func GetAlphaNumericValues(input string) string {
return strings.Map(func(r rune) rune {
if unicode.IsLetter(r) || unicode.IsNumber(r) {
return r
}
return -1
}, input)
}
|
//nolint
package recursive
import (
"fmt"
"math/rand"
"testing"
"github.com/slonegd/structstringer/examples/simple"
"github.com/stretchr/testify/assert"
)
func TestAString(t *testing.T) {
tests := []struct {
a A
want string
fmtWant string
}{
{
a: A{i: 42, flag: true, b: B{i: 43}},
want: `
recursive.A{
i int 42
flag bool true
b recursive.B {
i int 43
flag bool false
c recursive.C {
i int 0
flag bool false
}
}
}`,
fmtWant: "recursive.A{i:42, flag:true, b:recursive.B{i:43, flag:false, c:recursive.C{i:0, flag:false}}}",
},
}
for _, tt := range tests {
assert.Equal(t, tt.want, tt.a.String())
assert.Equal(t, tt.fmtWant, fmt.Sprintf("%#v", tt.a))
}
}
func TestDString(t *testing.T) {
tests := []struct {
d D
want string
fmtWant string
}{
{
d: D{i: 42, flag: true, b: simple.B{I: 43}},
want: `
recursive.D{
i int 42
flag bool true
b simple.B {
I int 43
}
}`,
fmtWant: "recursive.D{i:42, flag:true, b:simple.B{I:43}}",
},
}
for _, tt := range tests {
assert.Equal(t, tt.want, tt.d.String())
assert.Equal(t, tt.fmtWant, fmt.Sprintf("%#v", tt.d))
}
}
func TestEString(t *testing.T) {
tests := []struct {
e E
want string
fmtWant string
}{
{
e: E{i: 42, flag: true, c: simple.C{I: 43}},
want: `
recursive.E{
i int 42
flag bool true
c simple.C {
I int 43
flag bool not_implemented_unexported_fields
}
}`,
fmtWant: "recursive.E{i:42, flag:true, c:simple.C{I:43, flag:false}}",
},
}
for _, tt := range tests {
assert.Equal(t, tt.want, tt.e.String())
assert.Equal(t, tt.fmtWant, fmt.Sprintf("%#v", tt.e))
}
}
func BenchmarkRecursiveAString(b *testing.B) {
b.ResetTimer()
for i := 0; i < 1000; i++ {
a := randomA()
b.StartTimer()
_ = a.String()
b.StopTimer()
}
}
func BenchmarkRecursiveAfmt(b *testing.B) {
b.ResetTimer()
for i := 0; i < 1000; i++ {
a := randomA()
b.StartTimer()
_ = fmt.Sprintf("%#v", a)
b.StopTimer()
}
}
func randomA() A {
return A{
i: rand.Int(),
flag: rand.Int()%2 == 0,
b: B{
i: rand.Int(),
flag: rand.Int()%2 == 0,
c: C{
i: rand.Int(),
flag: rand.Int()%2 == 0,
},
},
}
}
|
package models
import (
"github.com/jinzhu/gorm"
)
// Services defines the shape of the service struct
type Services struct {
db *gorm.DB
User UserService
}
// NewServices is used to define the service shape
func NewServices(connectionString string) (*Services, error) {
db, err := gorm.Open("postgres", connectionString)
db.LogMode(true)
userService := NewUserService(db)
if err != nil {
return nil, err
}
return &Services{
User: userService,
db: db,
}, nil
}
// AutoMigrate creates the tables in the database
func (s *Services) AutoMigrate() error {
if err := s.db.AutoMigrate(User{}).Error; err != nil {
return err
}
return nil
}
// DestructiveConstruct destroys db and recreates
func (s *Services) DestructiveConstruct() error {
if err := s.db.DropTableIfExists(User{}).Error; err != nil {
return err
}
return s.AutoMigrate()
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package batcheval
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval/result"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/spanset"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
func init() {
RegisterReadOnlyCommand(roachpb.QueryIntent, declareKeysQueryIntent, QueryIntent)
}
func declareKeysQueryIntent(
_ ImmutableRangeState, _ roachpb.Header, req roachpb.Request, latchSpans, _ *spanset.SpanSet,
) {
// QueryIntent requests read the specified keys at the maximum timestamp in
// order to read any intent present, if one exists, regardless of the
// timestamp it was written at.
latchSpans.AddNonMVCC(spanset.SpanReadOnly, req.Header().Span())
}
// QueryIntent checks if an intent exists for the specified transaction at the
// given key. If the intent is missing, the request prevents the intent from
// ever being written at the specified timestamp (but the actual prevention
// happens during the timestamp cache update).
//
// QueryIntent returns an error if the intent is missing and its ErrorIfMissing
// field is set to true. This error is typically an IntentMissingError, but the
// request is special-cased to return a SERIALIZABLE retry error if a transaction
// queries its own intent and finds it has been pushed.
func QueryIntent(
ctx context.Context, reader storage.Reader, cArgs CommandArgs, resp roachpb.Response,
) (result.Result, error) {
args := cArgs.Args.(*roachpb.QueryIntentRequest)
h := cArgs.Header
reply := resp.(*roachpb.QueryIntentResponse)
ownTxn := false
if h.Txn != nil {
// Determine if the request is querying an intent in its own
// transaction. If not, the request is rejected as querying one
// transaction's intent from within another transaction is unsupported.
if h.Txn.ID == args.Txn.ID {
ownTxn = true
} else {
return result.Result{}, ErrTransactionUnsupported
}
}
if h.WriteTimestamp().Less(args.Txn.WriteTimestamp) {
// This condition must hold for the timestamp cache update in
// Replica.updateTimestampCache to be safe.
return result.Result{}, errors.AssertionFailedf("QueryIntent request timestamp %s less than txn WriteTimestamp %s",
h.Timestamp, args.Txn.WriteTimestamp)
}
// Read at the specified key at the maximum timestamp. This ensures that we
// see an intent if one exists, regardless of what timestamp it is written
// at.
_, intent, err := storage.MVCCGet(ctx, reader, args.Key, hlc.MaxTimestamp, storage.MVCCGetOptions{
// Perform an inconsistent read so that intents are returned instead of
// causing WriteIntentErrors.
Inconsistent: true,
// Even if the request header contains a txn, perform the engine lookup
// without a transaction so that intents for a matching transaction are
// not returned as values (i.e. we don't want to see our own writes).
Txn: nil,
})
if err != nil {
return result.Result{}, err
}
var curIntentPushed bool
if intent != nil {
// See comment on QueryIntentRequest.Txn for an explanation of this
// comparison.
// TODO(nvanbenschoten): Now that we have a full intent history,
// we can look at the exact sequence! That won't serve as much more
// than an assertion that QueryIntent is being used correctly.
reply.FoundIntent = (args.Txn.ID == intent.Txn.ID) &&
(args.Txn.Epoch == intent.Txn.Epoch) &&
(args.Txn.Sequence <= intent.Txn.Sequence)
// If we found a matching intent, check whether the intent was pushed
// past its expected timestamp.
if reply.FoundIntent {
// If the request is querying an intent for its own transaction, forward
// the timestamp we compare against to the provisional commit timestamp
// in the batch header.
cmpTS := args.Txn.WriteTimestamp
if ownTxn {
cmpTS.Forward(h.Txn.WriteTimestamp)
}
if cmpTS.Less(intent.Txn.WriteTimestamp) {
// The intent matched but was pushed to a later timestamp. Consider a
// pushed intent a missing intent.
curIntentPushed = true
log.VEventf(ctx, 2, "found pushed intent")
reply.FoundIntent = false
// If the request was querying an intent in its own transaction, update
// the response transaction.
if ownTxn {
reply.Txn = h.Txn.Clone()
reply.Txn.WriteTimestamp.Forward(intent.Txn.WriteTimestamp)
}
}
}
}
if !reply.FoundIntent && args.ErrorIfMissing {
if ownTxn && curIntentPushed {
// If the transaction's own intent was pushed, go ahead and
// return a TransactionRetryError immediately with an updated
// transaction proto. This is an optimization that can help
// the txn use refresh spans more effectively.
return result.Result{}, roachpb.NewTransactionRetryError(roachpb.RETRY_SERIALIZABLE, "intent pushed")
}
return result.Result{}, roachpb.NewIntentMissingError(args.Key, intent)
}
return result.Result{}, nil
}
|
package stealth
import (
"bytes"
"fmt"
"net/http"
"net/url"
"strconv"
)
const googleBaseURL = "http://www.google.com/search?"
// TODO: Repleace this global var with something
var googleSearchTypes = map[string]string{
"shopping": "tbm=shop",
"image": "tbm=isch",
"news": "tbm=nws",
"video": "tbm=vid",
}
type GoogleEngine struct {
Engine
}
func (e *GoogleEngine) BaseURL() string {
if e.baseURL == "" {
return e.buildBaseURL()
}
return e.baseURL
}
func (e *GoogleEngine) buildBaseURL() string {
// Return web type search if searchType not in list of tbmType
val, ok := googleSearchTypes[e.sty]
if !ok {
e.baseURL = googleBaseURL
return e.baseURL
}
var buf bytes.Buffer
buf.WriteString(googleBaseURL + val)
e.baseURL = buf.String()
return e.baseURL
}
func (e *GoogleEngine) Fetch(keyword, language string, num, start int) (*http.Request, error) {
if keyword == "" {
return nil, fmt.Errorf("Empty string passed")
}
var buf bytes.Buffer
buf.WriteString(e.BaseURL() + "q=" + url.QueryEscape(keyword))
if language != "" {
buf.WriteString("&hl=" + language)
}
if num != 0 {
buf.WriteString("&num=" + strconv.Itoa(num))
}
if start != 0 {
buf.WriteString("&start=" + calculateStart(num, start))
}
return http.NewRequest("GET", buf.String(), nil)
}
|
package main
import "fmt"
func main() {
x := 0
y := 3
// goにwhileはなくループは全てfor
for x < y {
x++
fmt.Println(x)
}
// いつものfor
for n := 0; n < 5; n++ {
fmt.Println(n)
}
// 無限ループ
m := 0
for {
m++
if m > 10 {
break
} else if m%2 == 1 {
continue
} else {
fmt.Println(m)
}
}
// 配列やスライスなどiterateなものにはrangeでforeach的なループ
colors := [...]string{"zaku", "gouf", "dom"}
for i, color := range colors {
fmt.Printf("%d: %s\n", i, color)
}
}
|
package Utill
import (
"../../../bin/amqp"
"../DTO"
"../DataBase"
"fmt"
"log"
)
func GetRabbitConnection() (*amqp.Connection) {
conn, err := amqp.Dial("amqp://guest@localhost:5672")
failOnError(err, "Failed to connect to RabbitMQ")
return conn
}
func CreateChannel(conn *amqp.Connection) (channel *amqp.Channel){
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
return ch
}
func ExchangeDeclare(channel *amqp.Channel, exchangeName string,exchangeType string) {
var error = channel.ExchangeDeclare(
exchangeName, // name
exchangeType, // type
false, // durable
true, // auto-deleted
false, // internal
false, // no-wait
nil, // arguments
)
if error != nil {
failOnError(error, "Failed to Declare Exchange")
}
}
func QDeclare(channel *amqp.Channel, qName string) (q amqp.Queue){
qDeclate, err := channel.QueueDeclare(
qName, // name
false, // durable
true, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
failOnError(err, "Failed to Declare Q")
}
return qDeclate
}
func QBindWithExchange(channel *amqp.Channel ,QDeatils amqp.Queue,routingKey string ,exchangeName string){
var err = channel.QueueBind(QDeatils.Name, routingKey, exchangeName, false, nil)
if err != nil {
failOnError(err, "Failed to Bind Exchange with Q")
}
}
func ConsumeMessage(channel *amqp.Channel,QDeatils amqp.Queue)(incomingMessages <-chan amqp.Delivery){
incomingMessages, err := channel.Consume(
QDeatils.Name, // name
"", // consumer tag
true, // auto ack
false, // exclusive
false, // no-local
false, // no-wait,
nil,
)
if err != nil {
failOnError(err, "Failed to Consume Messages")
}
return
}
func HandleMessages(incomingMessages <-chan amqp.Delivery) {
log.Println("Entering HandleMessages")
for MessageString := range incomingMessages {
// Process message here.
log.Printf( " \nMessage Recieved ----------> %s \n\n\n\n ",MessageString.Body )
//Persiting Messages to DataBase.
dto:= &DTO.MessageDTO{}
dto.Data = string(MessageString.Body)
DataBase.SaveMessages(dto)
}
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
panic(fmt.Sprintf("%s: %s", msg, err))
}
} |
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package bios
import (
"testing"
pb "chromiumos/tast/services/cros/firmware"
)
func TestCalcGBB(t *testing.T) {
// 1 bit
m := calcGBBMask([]pb.GBBFlag{pb.GBBFlag_DEV_SCREEN_SHORT_DELAY})
if m != 0x0001<<pb.GBBFlag_DEV_SCREEN_SHORT_DELAY {
t.Fatalf("unexpected mask for 1 bit: %v", m)
}
f := calcGBBFlags(m)
if len(f) != 1 || f[0] != pb.GBBFlag_DEV_SCREEN_SHORT_DELAY {
t.Fatalf("unexpected flagfor 1 bit: %v", f)
}
// 2 bits
m = calcGBBMask([]pb.GBBFlag{pb.GBBFlag_DEV_SCREEN_SHORT_DELAY, pb.GBBFlag_FORCE_DEV_BOOT_FASTBOOT_FULL_CAP})
if m != (0x0001<<pb.GBBFlag_DEV_SCREEN_SHORT_DELAY)|(0x0001<<pb.GBBFlag_FORCE_DEV_BOOT_FASTBOOT_FULL_CAP) {
t.Fatalf("unexpected mask for 2 bits: %v", m)
}
f = calcGBBFlags(m)
if len(f) != 2 || f[0] != pb.GBBFlag_DEV_SCREEN_SHORT_DELAY || f[1] != pb.GBBFlag_FORCE_DEV_BOOT_FASTBOOT_FULL_CAP {
t.Fatalf("unexpected flags for 2 bits: %v", f)
}
}
func TestCalcGBBBits(t *testing.T) {
tests := []struct {
curr uint32
clear uint32
set uint32
want uint32
}{
{0b0101, 0b0000, 0b0000, 0b0101},
{0b0101, 0b1111, 0b0000, 0b0000},
{0b0100, 0b0000, 0b0001, 0b0101},
{0b0010, 0b0010, 0b0001, 0b0001},
{0b0011, 0b1100, 0b1100, 0b1111},
{0b0101, 0b1010, 0b0101, 0b0101},
}
for _, tc := range tests {
got := calcGBBBits(tc.curr, tc.clear, tc.set)
if got != tc.want {
t.Errorf("calcGBBBits, updating %04b with %04b(clear) and %04b(set), got %04b, want %04b", tc.curr, tc.clear, tc.set, got, tc.want)
}
}
}
func TestReadSectionData(t *testing.T) {
s := map[ImageSection]SectionInfo{GBBImageSection: {1, 16}}
i := Image{[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}, s}
var flag uint32
err := i.readSectionData(GBBImageSection, 12, 4, &flag)
if err != nil {
t.Fatal(err)
}
if flag != 0x04030201 {
t.Fatalf("unexpected flags read %x from image %v", flag, i)
}
}
func TestWriteSectionData(t *testing.T) {
s := map[ImageSection]SectionInfo{GBBImageSection: {1, 16}}
i := Image{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}, s}
var flag uint32
flag = 0x04030201
if err := i.writeSectionData(GBBImageSection, 12, flag); err != nil {
t.Fatal(err)
}
var got [18]byte
copy(got[:], i.Data[:])
want := [18]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 1, 2, 3, 4, 18}
if got != want {
t.Fatalf("image data incorrect, got: %v, want: %v", got, want)
}
}
func TestShortGBBSection(t *testing.T) {
s := map[ImageSection]SectionInfo{GBBImageSection: {0, 15}}
i := Image{[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}, s}
var flag uint32
err := i.readSectionData(GBBImageSection, 12, 4, &flag)
if err == nil {
t.Fatal("Short section not detected: ", err)
}
}
func TestGetGBBFlags(t *testing.T) {
s := map[ImageSection]SectionInfo{GBBImageSection: {1, 16}}
i := Image{[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0, 0, 0xff}, s}
cf, sf, err := i.GetGBBFlags()
if err != nil {
t.Fatalf("failed to perform GetGBBFlags: %v", err)
}
if len(cf) != len(pb.GBBFlag_name)-2 {
t.Errorf("cleared flags count incorrect, wanted %v, got %v: %v", len(pb.GBBFlag_name)-2, len(cf), cf)
}
if len(sf) != 2 {
t.Fatalf("set flags count incorrect, wanted 2, got %v: %v", len(sf), sf)
}
if int(sf[0]) != 0 {
t.Fatalf("1st set flag incorrect: %v", sf)
}
if int(sf[1]) != 8 {
t.Fatalf("2nd set flag incorrect: %v", sf)
}
}
func TestClearAndSetGBBFlags(t *testing.T) {
beforeBytes := [13]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}
afterBytes := [1]byte{14}
dataSlice := make([]byte, 18)
copy(dataSlice[0:13], beforeBytes[:])
copy(dataSlice[17:18], afterBytes[:])
s := map[ImageSection]SectionInfo{GBBImageSection: {1, 16}}
i := Image{dataSlice, s}
if err := i.ClearAndSetGBBFlags([]pb.GBBFlag{}, []pb.GBBFlag{pb.GBBFlag_DEV_SCREEN_SHORT_DELAY, pb.GBBFlag_FORCE_DEV_BOOT_FASTBOOT_FULL_CAP}); err != nil {
t.Fatal("failed to initially ClearAndSetGBBFlags: ", err)
}
cf, sf, err := i.GetGBBFlags()
if err != nil {
t.Fatalf("failed to initially perform GetGBBFlags: %v", err)
}
if len(cf) != len(pb.GBBFlag_name)-2 {
t.Errorf("cleared initial flags count incorrect, wanted %v, got %v: %v", len(pb.GBBFlag_name)-2, len(cf), cf)
}
if len(sf) != 2 {
t.Fatalf("set initial flags count incorrect, wanted 2, got %v: %v", len(sf), sf)
}
if sf[0] != pb.GBBFlag_DEV_SCREEN_SHORT_DELAY {
t.Fatalf("1st set initial flag incorrect: %v", sf)
}
if sf[1] != pb.GBBFlag_FORCE_DEV_BOOT_FASTBOOT_FULL_CAP {
t.Fatalf("2nd set initial flag incorrect: %v", sf)
}
if err := i.ClearAndSetGBBFlags([]pb.GBBFlag{pb.GBBFlag_DEV_SCREEN_SHORT_DELAY}, []pb.GBBFlag{pb.GBBFlag_DISABLE_LID_SHUTDOWN}); err != nil {
t.Fatal("failed to ClearAndSetGBBFlags: ", err)
}
cf, sf, err = i.GetGBBFlags()
if err != nil {
t.Fatalf("failed to perform GetGBBFlags: %v", err)
}
if len(cf) != len(pb.GBBFlag_name)-2 {
t.Errorf("cleared flags count incorrect, wanted %d, got %d: %v", len(pb.GBBFlag_name)-2, len(cf), cf)
}
if len(sf) != 2 {
t.Fatalf("set flags count incorrect, wanted 2, got %d: %v", len(sf), sf)
}
if sf[0] != pb.GBBFlag_DISABLE_LID_SHUTDOWN {
t.Fatalf("1st set flag incorrect: %v", sf)
}
if sf[1] != pb.GBBFlag_FORCE_DEV_BOOT_FASTBOOT_FULL_CAP {
t.Fatalf("2nd set flag incorrect: %v", sf)
}
var resBeforeBytes [13]byte
var resAfterBytes [1]byte
copy(resBeforeBytes[:], i.Data[:13])
copy(resAfterBytes[:], i.Data[17:])
if resBeforeBytes != beforeBytes {
t.Fatalf("bytes before GBB header changed, got %v, want %v", resBeforeBytes, beforeBytes)
}
if resAfterBytes != afterBytes {
t.Fatalf("bytes after GBB header changed, got %v, want %v", resAfterBytes, afterBytes)
}
}
func TestGetLayout(t *testing.T) {
s := map[ImageSection]SectionInfo{GBBImageSection: {1, 16}}
i := Image{[]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4}, s}
expectedLayout := "0x00000001:0x00000010 FV_GBB\n"
layout := string(i.GetLayout())
if layout != expectedLayout {
t.Fatalf("unexpected layout, want %s, got %s", expectedLayout, layout)
}
}
|
package ferry
import (
"net"
)
type Tunnel struct {
net.Conn
priority int
index int
}
func (self *Tunnel) Send(data []byte) (n int, err error) {
// 加密一下发送
return self.Write(data)
}
func (self *Tunnel) Read(buffer []byte) (n int, err error) {
// 解密
n, err = self.Conn.Read(buffer)
return
}
type TunnelHeap []*Tunnel
func (self TunnelHeap) Len() int {
return len(self)
}
func (self TunnelHeap) Less(i, j int) bool {
return self[i].priority < self[j].priority
}
func (self TunnelHeap) Swap(i, j int) {
self[i], self[j] = self[j], self[i]
self[i].index = i
self[j].index = j
}
func (self *TunnelHeap) Push(t interface{}) {
item := t.(*Tunnel)
n := len(*self)
item.index = n
*self = append(*self, item)
}
func (self *TunnelHeap) Pop() interface{} {
old := *self
n := len(old)
item := old[n-1]
item.index = -1
*self = old[0 : n-1]
return item
}
|
package iteration
func Contain(str, substr string) bool {
for i := 0; i <= len(str)-len(substr); i++ {
if substr == str[i:i+len(substr)] {
return true
}
}
return false
}
|
// Copyright 2020 Trey Dockendorf
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collectors
import (
"context"
"fmt"
"os/exec"
"strings"
"testing"
"time"
"github.com/alecthomas/kingpin/v2"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus/testutil"
)
var (
mmrepquotaStdout = `
*** Report for FILESET quotas on project
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::project:FILESET:0:root:337419744:0:0:163840:none:1395:0:0:400:none:i:on:off:::
mmrepquota::0:1:::project:FILESET:408:PZS1003:341467872:2147483648:2147483648:0:none:6286:2000000:2000000:0:none:e:on:off:::
*** Report for FILESET quotas on scratch
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::scratch:FILESET:0:root:928235294208:0:0:5308909920:none:141909093:0:0:140497:none:i:on:off:::
`
mmrepquotaStdoutAll = `
*** Report for FILESET quotas on project
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::project:FILESET:0:root:337419744:0:0:163840:none:1395:0:0:400:none:i:on:off:::
mmrepquota::0:1:::project:FILESET:408:PZS1003:341467872:2147483648:2147483648:0:none:6286:2000000:2000000:0:none:e:on:off:::
*** Report for FILESET quotas on scratch
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::scratch:FILESET:0:root:928235294208:0:0:5308909920:none:141909093:0:0:140497:none:i:on:off:::
*** Report for USR quotas on home
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::home:USR:0:root:337419744:0:0:163840:none:1395:0:0:400:none:i:on:off:::
mmrepquota::0:1:::home:USR:408:PZS1003:341467872:2147483648:2147483648:0:none:6286:2000000:2000000:0:none:e:on:off:::
*** Report for USR quotas on scratch
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::scratch:USR:0:root:928235294208:0:0:5308909920:none:141909093:0:0:140497:none:i:on:off:::
*** Report for GRP quotas on project
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::project:GRP:0:root:337419744:0:0:163840:none:1395:0:0:400:none:i:on:off:::
mmrepquota::0:1:::project:GRP:408:PZS1003:341467872:2147483648:2147483648:0:none:6286:2000000:2000000:0:none:e:on:off:::
*** Report for GRP quotas on scratch
mmrepquota::HEADER:version:reserved:reserved:filesystemName:quotaType:id:name:blockUsage:blockQuota:blockLimit:blockInDoubt:blockGrace:filesUsage:filesQuota:filesLimit:filesInDoubt:filesGrace:remarks:quota:defQuota:fid:filesetname:
mmrepquota::0:1:::scratch:GRP:0:root:928235294208:0:0:5308909920:none:141909093:0:0:140497:none:i:on:off:::
`
)
func TestMmrepquota(t *testing.T) {
execCommand = fakeExecCommand
mockedExitStatus = 0
mockedStdout = "foo"
defer func() { execCommand = exec.CommandContext }()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
out, err := mmrepquota(ctx, "-j")
if err != nil {
t.Errorf("Unexpected error: %s", err.Error())
}
if out != mockedStdout {
t.Errorf("Unexpected out: %s", out)
}
}
func TestMmrepquotaError(t *testing.T) {
execCommand = fakeExecCommand
mockedExitStatus = 1
mockedStdout = "foo"
defer func() { execCommand = exec.CommandContext }()
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
out, err := mmrepquota(ctx, "-j")
if err == nil {
t.Errorf("Expected error")
}
if out != "" {
t.Errorf("Unexpected out: %s", out)
}
}
func TestMmrepquotaTimeout(t *testing.T) {
execCommand = fakeExecCommand
mockedExitStatus = 1
mockedStdout = "foo"
defer func() { execCommand = exec.CommandContext }()
ctx, cancel := context.WithTimeout(context.Background(), 0*time.Second)
defer cancel()
out, err := mmrepquota(ctx, "-j")
if err != context.DeadlineExceeded {
t.Errorf("Expected DeadlineExceeded")
}
if out != "" {
t.Errorf("Unexpected out: %s", out)
}
}
func TestParseMmrepquota(t *testing.T) {
metrics := parse_mmrepquota(mmrepquotaStdout, log.NewNopLogger())
if len(metrics) != 3 {
t.Errorf("Unexpected metric count: %d", len(metrics))
return
}
if val := metrics[0].BlockUsage; val != 345517817856 {
t.Errorf("Unexpected BlockUsage got %v", val)
}
if val := metrics[0].BlockQuota; val != 0 {
t.Errorf("Unexpected BlockQuota got %v", val)
}
if val := metrics[0].BlockLimit; val != 0 {
t.Errorf("Unexpected BlockLimit got %v", val)
}
if val := metrics[0].BlockInDoubt; val != 167772160 {
t.Errorf("Unexpected BlockInDoubt got %v", val)
}
}
func TestParseMmrepquotaAll(t *testing.T) {
metrics := parse_mmrepquota(mmrepquotaStdoutAll, log.NewNopLogger())
if len(metrics) != 9 {
t.Errorf("Unexpected metric count: %d", len(metrics))
return
}
if val := metrics[0].BlockUsage; val != 345517817856 {
t.Errorf("Unexpected BlockUsage got %v", val)
}
if val := metrics[0].BlockQuota; val != 0 {
t.Errorf("Unexpected BlockQuota got %v", val)
}
if val := metrics[0].BlockLimit; val != 0 {
t.Errorf("Unexpected BlockLimit got %v", val)
}
if val := metrics[0].BlockInDoubt; val != 167772160 {
t.Errorf("Unexpected BlockInDoubt got %v", val)
}
}
func TestMmrepquotaCollector(t *testing.T) {
if _, err := kingpin.CommandLine.Parse([]string{}); err != nil {
t.Fatal(err)
}
mmrepquotaExec = func(ctx context.Context, typeArg string) (string, error) {
return mmrepquotaStdout, nil
}
expected := `
# HELP gpfs_exporter_collect_error Indicates if error has occurred during collection
# TYPE gpfs_exporter_collect_error gauge
gpfs_exporter_collect_error{collector="mmrepquota"} 0
# HELP gpfs_exporter_collect_timeout Indicates the collector timed out
# TYPE gpfs_exporter_collect_timeout gauge
gpfs_exporter_collect_timeout{collector="mmrepquota"} 0
# HELP gpfs_fileset_in_doubt_bytes GPFS fileset quota block in doubt
# TYPE gpfs_fileset_in_doubt_bytes gauge
gpfs_fileset_in_doubt_bytes{fileset="PZS1003",fs="project"} 0
gpfs_fileset_in_doubt_bytes{fileset="root",fs="project"} 167772160
gpfs_fileset_in_doubt_bytes{fileset="root",fs="scratch"} 5436323758080
# HELP gpfs_fileset_in_doubt_files GPFS fileset quota files in doubt
# TYPE gpfs_fileset_in_doubt_files gauge
gpfs_fileset_in_doubt_files{fileset="PZS1003",fs="project"} 0
gpfs_fileset_in_doubt_files{fileset="root",fs="project"} 400
gpfs_fileset_in_doubt_files{fileset="root",fs="scratch"} 140497
# HELP gpfs_fileset_limit_bytes GPFS fileset quota block limit
# TYPE gpfs_fileset_limit_bytes gauge
gpfs_fileset_limit_bytes{fileset="PZS1003",fs="project"} 2199023255552
gpfs_fileset_limit_bytes{fileset="root",fs="project"} 0
gpfs_fileset_limit_bytes{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_limit_files GPFS fileset quota files limit
# TYPE gpfs_fileset_limit_files gauge
gpfs_fileset_limit_files{fileset="PZS1003",fs="project"} 2000000
gpfs_fileset_limit_files{fileset="root",fs="project"} 0
gpfs_fileset_limit_files{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_quota_bytes GPFS fileset block quota
# TYPE gpfs_fileset_quota_bytes gauge
gpfs_fileset_quota_bytes{fileset="PZS1003",fs="project"} 2199023255552
gpfs_fileset_quota_bytes{fileset="root",fs="project"} 0
gpfs_fileset_quota_bytes{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_quota_files GPFS fileset files quota
# TYPE gpfs_fileset_quota_files gauge
gpfs_fileset_quota_files{fileset="PZS1003",fs="project"} 2000000
gpfs_fileset_quota_files{fileset="root",fs="project"} 0
gpfs_fileset_quota_files{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_used_bytes GPFS fileset quota used
# TYPE gpfs_fileset_used_bytes gauge
gpfs_fileset_used_bytes{fileset="PZS1003",fs="project"} 349663100928
gpfs_fileset_used_bytes{fileset="root",fs="project"} 345517817856
gpfs_fileset_used_bytes{fileset="root",fs="scratch"} 950512941268992
# HELP gpfs_fileset_used_files GPFS fileset quota files used
# TYPE gpfs_fileset_used_files gauge
gpfs_fileset_used_files{fileset="PZS1003",fs="project"} 6286
gpfs_fileset_used_files{fileset="root",fs="project"} 1395
gpfs_fileset_used_files{fileset="root",fs="scratch"} 141909093
`
collector := NewMmrepquotaCollector(log.NewNopLogger())
gatherers := setupGatherer(collector)
if val, err := testutil.GatherAndCount(gatherers); err != nil {
t.Errorf("Unexpected error: %v", err)
} else if val != 27 {
t.Errorf("Unexpected collection count %d, expected 27", val)
}
if err := testutil.GatherAndCompare(gatherers, strings.NewReader(expected),
"gpfs_exporter_collect_error", "gpfs_exporter_collect_timeout",
"gpfs_fileset_in_doubt_bytes", "gpfs_fileset_in_doubt_files",
"gpfs_fileset_limit_bytes", "gpfs_fileset_limit_files",
"gpfs_fileset_quota_bytes", "gpfs_fileset_quota_files",
"gpfs_fileset_used_bytes", "gpfs_fileset_used_files"); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
}
func TestMmrepquotaCollectorAll(t *testing.T) {
if _, err := kingpin.CommandLine.Parse([]string{}); err != nil {
t.Fatal(err)
}
mmrepquotaExec = func(ctx context.Context, typeArg string) (string, error) {
return mmrepquotaStdoutAll, nil
}
expected := `
# HELP gpfs_exporter_collect_error Indicates if error has occurred during collection
# TYPE gpfs_exporter_collect_error gauge
gpfs_exporter_collect_error{collector="mmrepquota"} 0
# HELP gpfs_exporter_collect_timeout Indicates the collector timed out
# TYPE gpfs_exporter_collect_timeout gauge
gpfs_exporter_collect_timeout{collector="mmrepquota"} 0
# HELP gpfs_fileset_in_doubt_bytes GPFS fileset quota block in doubt
# TYPE gpfs_fileset_in_doubt_bytes gauge
gpfs_fileset_in_doubt_bytes{fileset="PZS1003",fs="project"} 0
gpfs_fileset_in_doubt_bytes{fileset="root",fs="project"} 167772160
gpfs_fileset_in_doubt_bytes{fileset="root",fs="scratch"} 5436323758080
# HELP gpfs_fileset_in_doubt_files GPFS fileset quota files in doubt
# TYPE gpfs_fileset_in_doubt_files gauge
gpfs_fileset_in_doubt_files{fileset="PZS1003",fs="project"} 0
gpfs_fileset_in_doubt_files{fileset="root",fs="project"} 400
gpfs_fileset_in_doubt_files{fileset="root",fs="scratch"} 140497
# HELP gpfs_fileset_limit_bytes GPFS fileset quota block limit
# TYPE gpfs_fileset_limit_bytes gauge
gpfs_fileset_limit_bytes{fileset="PZS1003",fs="project"} 2199023255552
gpfs_fileset_limit_bytes{fileset="root",fs="project"} 0
gpfs_fileset_limit_bytes{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_limit_files GPFS fileset quota files limit
# TYPE gpfs_fileset_limit_files gauge
gpfs_fileset_limit_files{fileset="PZS1003",fs="project"} 2000000
gpfs_fileset_limit_files{fileset="root",fs="project"} 0
gpfs_fileset_limit_files{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_quota_bytes GPFS fileset block quota
# TYPE gpfs_fileset_quota_bytes gauge
gpfs_fileset_quota_bytes{fileset="PZS1003",fs="project"} 2199023255552
gpfs_fileset_quota_bytes{fileset="root",fs="project"} 0
gpfs_fileset_quota_bytes{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_quota_files GPFS fileset files quota
# TYPE gpfs_fileset_quota_files gauge
gpfs_fileset_quota_files{fileset="PZS1003",fs="project"} 2000000
gpfs_fileset_quota_files{fileset="root",fs="project"} 0
gpfs_fileset_quota_files{fileset="root",fs="scratch"} 0
# HELP gpfs_fileset_used_bytes GPFS fileset quota used
# TYPE gpfs_fileset_used_bytes gauge
gpfs_fileset_used_bytes{fileset="PZS1003",fs="project"} 349663100928
gpfs_fileset_used_bytes{fileset="root",fs="project"} 345517817856
gpfs_fileset_used_bytes{fileset="root",fs="scratch"} 950512941268992
# HELP gpfs_fileset_used_files GPFS fileset quota files used
# TYPE gpfs_fileset_used_files gauge
gpfs_fileset_used_files{fileset="PZS1003",fs="project"} 6286
gpfs_fileset_used_files{fileset="root",fs="project"} 1395
gpfs_fileset_used_files{fileset="root",fs="scratch"} 141909093
# HELP gpfs_user_in_doubt_bytes GPFS user quota block in doubt
# TYPE gpfs_user_in_doubt_bytes gauge
gpfs_user_in_doubt_bytes{fs="home",user="PZS1003"} 0
gpfs_user_in_doubt_bytes{fs="home",user="root"} 167772160
gpfs_user_in_doubt_bytes{fs="scratch",user="root"} 5436323758080
# HELP gpfs_user_in_doubt_files GPFS user quota files in doubt
# TYPE gpfs_user_in_doubt_files gauge
gpfs_user_in_doubt_files{fs="home",user="PZS1003"} 0
gpfs_user_in_doubt_files{fs="home",user="root"} 400
gpfs_user_in_doubt_files{fs="scratch",user="root"} 140497
# HELP gpfs_user_limit_bytes GPFS user quota block limit
# TYPE gpfs_user_limit_bytes gauge
gpfs_user_limit_bytes{fs="home",user="PZS1003"} 2199023255552
gpfs_user_limit_bytes{fs="home",user="root"} 0
gpfs_user_limit_bytes{fs="scratch",user="root"} 0
# HELP gpfs_user_limit_files GPFS user quota files limit
# TYPE gpfs_user_limit_files gauge
gpfs_user_limit_files{fs="home",user="PZS1003"} 2000000
gpfs_user_limit_files{fs="home",user="root"} 0
gpfs_user_limit_files{fs="scratch",user="root"} 0
# HELP gpfs_user_quota_bytes GPFS user block quota
# TYPE gpfs_user_quota_bytes gauge
gpfs_user_quota_bytes{fs="home",user="PZS1003"} 2199023255552
gpfs_user_quota_bytes{fs="home",user="root"} 0
gpfs_user_quota_bytes{fs="scratch",user="root"} 0
# HELP gpfs_user_quota_files GPFS user files quota
# TYPE gpfs_user_quota_files gauge
gpfs_user_quota_files{fs="home",user="PZS1003"} 2000000
gpfs_user_quota_files{fs="home",user="root"} 0
gpfs_user_quota_files{fs="scratch",user="root"} 0
# HELP gpfs_user_used_bytes GPFS user quota used
# TYPE gpfs_user_used_bytes gauge
gpfs_user_used_bytes{fs="home",user="PZS1003"} 349663100928
gpfs_user_used_bytes{fs="home",user="root"} 345517817856
gpfs_user_used_bytes{fs="scratch",user="root"} 950512941268992
# HELP gpfs_user_used_files GPFS user quota files used
# TYPE gpfs_user_used_files gauge
gpfs_user_used_files{fs="home",user="PZS1003"} 6286
gpfs_user_used_files{fs="home",user="root"} 1395
gpfs_user_used_files{fs="scratch",user="root"} 141909093
# HELP gpfs_group_in_doubt_bytes GPFS group quota block in doubt
# TYPE gpfs_group_in_doubt_bytes gauge
gpfs_group_in_doubt_bytes{fs="project",group="PZS1003"} 0
gpfs_group_in_doubt_bytes{fs="project",group="root"} 167772160
gpfs_group_in_doubt_bytes{fs="scratch",group="root"} 5436323758080
# HELP gpfs_group_in_doubt_files GPFS group quota files in doubt
# TYPE gpfs_group_in_doubt_files gauge
gpfs_group_in_doubt_files{fs="project",group="PZS1003"} 0
gpfs_group_in_doubt_files{fs="project",group="root"} 400
gpfs_group_in_doubt_files{fs="scratch",group="root"} 140497
# HELP gpfs_group_limit_bytes GPFS group quota block limit
# TYPE gpfs_group_limit_bytes gauge
gpfs_group_limit_bytes{fs="project",group="PZS1003"} 2199023255552
gpfs_group_limit_bytes{fs="project",group="root"} 0
gpfs_group_limit_bytes{fs="scratch",group="root"} 0
# HELP gpfs_group_limit_files GPFS group quota files limit
# TYPE gpfs_group_limit_files gauge
gpfs_group_limit_files{fs="project",group="PZS1003"} 2000000
gpfs_group_limit_files{fs="project",group="root"} 0
gpfs_group_limit_files{fs="scratch",group="root"} 0
# HELP gpfs_group_quota_bytes GPFS group block quota
# TYPE gpfs_group_quota_bytes gauge
gpfs_group_quota_bytes{fs="project",group="PZS1003"} 2199023255552
gpfs_group_quota_bytes{fs="project",group="root"} 0
gpfs_group_quota_bytes{fs="scratch",group="root"} 0
# HELP gpfs_group_quota_files GPFS group files quota
# TYPE gpfs_group_quota_files gauge
gpfs_group_quota_files{fs="project",group="PZS1003"} 2000000
gpfs_group_quota_files{fs="project",group="root"} 0
gpfs_group_quota_files{fs="scratch",group="root"} 0
# HELP gpfs_group_used_bytes GPFS group quota used
# TYPE gpfs_group_used_bytes gauge
gpfs_group_used_bytes{fs="project",group="PZS1003"} 349663100928
gpfs_group_used_bytes{fs="project",group="root"} 345517817856
gpfs_group_used_bytes{fs="scratch",group="root"} 950512941268992
# HELP gpfs_group_used_files GPFS group quota files used
# TYPE gpfs_group_used_files gauge
gpfs_group_used_files{fs="project",group="PZS1003"} 6286
gpfs_group_used_files{fs="project",group="root"} 1395
gpfs_group_used_files{fs="scratch",group="root"} 141909093
`
collector := NewMmrepquotaCollector(log.NewNopLogger())
gatherers := setupGatherer(collector)
if val, err := testutil.GatherAndCount(gatherers); err != nil {
t.Errorf("Unexpected error: %v", err)
} else if val != 75 {
t.Errorf("Unexpected collection count %d, expected 75", val)
}
if err := testutil.GatherAndCompare(gatherers, strings.NewReader(expected),
"gpfs_exporter_collect_error", "gpfs_exporter_collect_timeout",
"gpfs_fileset_in_doubt_bytes", "gpfs_fileset_in_doubt_files",
"gpfs_fileset_limit_bytes", "gpfs_fileset_limit_files",
"gpfs_fileset_quota_bytes", "gpfs_fileset_quota_files",
"gpfs_fileset_used_bytes", "gpfs_fileset_used_files",
"gpfs_user_in_doubt_bytes", "gpfs_user_in_doubt_files",
"gpfs_user_limit_bytes", "gpfs_user_limit_files",
"gpfs_user_quota_bytes", "gpfs_user_quota_files",
"gpfs_user_used_bytes", "gpfs_user_used_files",
"gpfs_group_in_doubt_bytes", "gpfs_group_in_doubt_files",
"gpfs_group_limit_bytes", "gpfs_group_limit_files",
"gpfs_group_quota_bytes", "gpfs_group_quota_files",
"gpfs_group_used_bytes", "gpfs_group_used_files"); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
}
func TestMMrepquotaCollectorError(t *testing.T) {
if _, err := kingpin.CommandLine.Parse([]string{}); err != nil {
t.Fatal(err)
}
mmrepquotaExec = func(ctx context.Context, typeArg string) (string, error) {
return "", fmt.Errorf("Error")
}
expected := `
# HELP gpfs_exporter_collect_error Indicates if error has occurred during collection
# TYPE gpfs_exporter_collect_error gauge
gpfs_exporter_collect_error{collector="mmrepquota"} 1
`
collector := NewMmrepquotaCollector(log.NewNopLogger())
gatherers := setupGatherer(collector)
if val, err := testutil.GatherAndCount(gatherers); err != nil {
t.Errorf("Unexpected error: %v", err)
} else if val != 3 {
t.Errorf("Unexpected collection count %d, expected 3", val)
}
if err := testutil.GatherAndCompare(gatherers, strings.NewReader(expected),
"gpfs_exporter_collect_error", "gpfs_fileset_used_bytes"); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
}
func TestMMrepquotaCollectorTimeout(t *testing.T) {
if _, err := kingpin.CommandLine.Parse([]string{}); err != nil {
t.Fatal(err)
}
mmrepquotaExec = func(ctx context.Context, typeArg string) (string, error) {
return "", context.DeadlineExceeded
}
expected := `
# HELP gpfs_exporter_collect_timeout Indicates the collector timed out
# TYPE gpfs_exporter_collect_timeout gauge
gpfs_exporter_collect_timeout{collector="mmrepquota"} 1
`
collector := NewMmrepquotaCollector(log.NewNopLogger())
gatherers := setupGatherer(collector)
if val, err := testutil.GatherAndCount(gatherers); err != nil {
t.Errorf("Unexpected error: %v", err)
} else if val != 3 {
t.Errorf("Unexpected collection count %d, expected 3", val)
}
if err := testutil.GatherAndCompare(gatherers, strings.NewReader(expected),
"gpfs_exporter_collect_timeout", "gpfs_fileset_used_bytes"); err != nil {
t.Errorf("unexpected collecting result:\n%s", err)
}
}
|
package request
type EventSource struct {
UserID string `json:"userId"`
SourceType string `json:"type"`
GroupID string `json:"groupId"`
RoomID string `json:"roomId"`
}
type EmojiMessage struct {
Index int `json:"index"`
Length int `json:"lenght"`
ProductID string `json:"productId"`
EmojiID string `json:"emojiId"`
}
type MessageContentProvider struct {
ContentType string `json:"type"`
OriginalContentURL string `json:"originalContentUrl"`
PreviewImageURL string `json:"previewImageUrl"`
}
type MessageUnsent struct {
MessageID string `json:"messageId"`
}
type EventMessage struct {
MessageType string `json:"type"`
ID string `json:"id"`
Text string `json:"text"`
Emojis []EmojiMessage `json:"emojis"`
PacakgeID string `json:"packageId"`
StickerID string `json:"stickerId"`
StickerResourceType string `json:"stickerResourceType"`
Duration int `json:"duration"`
ContentProvider MessageContentProvider `json:"contentProvider"`
FileName string `json:"filename"`
FileSize int `json:"fileSize"`
Title string `json:"title"`
Address string `json:"address"`
Latitude float64 `json:"latitude"`
Longtitude float64 `json:"longtitude"`
}
type Event struct {
MessageType string `json:"type"`
ReplyToken string `json:"replyToken"`
Source EventSource `json:"source"`
Timestamp int `json:"timestamp"`
Mode string `json:"mode"`
Message EventMessage `json:"message"`
MessageUnsent MessageUnsent `json:"unsend"`
// checkpoint member join
}
type WebhookEventOjbect struct {
Events []Event `json:"events"`
Destination string `json:"destination"`
}
|
package models
import (
"encoding/json"
"time"
)
type Routine struct {
RoutineId int `json:"routine_id"`
Title string `json:"title"`
TotalDuration int `json:"total_duration"`
Character string `json:"character"`
OriginalCreatorId int `json:"original_creator_id"`
CreatorId int `json:"creator_id"`
Created time.Time `json:"created"`
Popularity int `json:"popularity"`
Drills []Drill `json:"drills"`
Description string `json:"description"`
}
type Drill struct {
DrillTitle string `json:"drill_title"`
Duration int `json:"duration"`
}
func (db *DB) FindRoutineById(routineId int) (*Routine, error) {
var r Routine
query := `SELECT * FROM routines WHERE routine_id=$1;`
var d []byte
err := db.QueryRow(query, routineId).Scan(&r.RoutineId, &r.Title, &r.TotalDuration, &r.Character, &r.CreatorId, &r.Created, &r.Popularity, &d, &r.OriginalCreatorId, &r.Description)
if err != nil {
return nil, err
}
err = json.Unmarshal(d, &r.Drills)
if err != nil {
panic(err)
}
return &r, nil
}
func (db *DB) FindRoutinesByCreator(creatorId int) ([]*Routine, error) {
query := `SELECT * FROM routines WHERE creator_id = $1;`
rows, err := db.Query(query, creatorId)
if err != nil {
return nil, err
}
defer rows.Close()
routines := make([]*Routine, 0)
for rows.Next() {
r := &Routine{}
var d []byte
err := rows.Scan(&r.RoutineId, &r.Title, &r.TotalDuration, &r.Character, &r.CreatorId, &r.Created, &r.Popularity, &d, &r.OriginalCreatorId, &r.Description)
if err != nil {
return nil, err
}
err = json.Unmarshal(d, &r.Drills)
if err != nil {
return nil, err
}
routines = append(routines, r)
}
if err = rows.Err(); err != nil {
return nil, err
}
return routines, nil
}
func (db *DB) CreateRoutine(r *Routine) (int, error) {
query := `INSERT INTO routines(title, total_duration, character, original_creator_id, creator_id, drills, popularity, description)
VALUES($1, $2, $3, $4, $5, $6, $7, $8)
RETURNING routine_id;`
var routineId int
drills, err := json.Marshal(r.Drills)
if err != nil {
return -1, err
}
err = db.QueryRow(query, r.Title, r.TotalDuration, r.Character, r.OriginalCreatorId, r.CreatorId, drills, r.Popularity, r.Description).Scan(&routineId)
if err != nil {
return -1, err
}
return routineId, nil
}
func (db *DB) UpdateRoutine(routineId int, r *Routine) error {
query := `UPDATE routines
SET title = $2, total_duration = $3, character = $4, popularity = $5, drills = $6, description = $7
WHERE routine_id = $1;`
drills, err := json.Marshal(r.Drills)
if err != nil {
return err
}
_, err = db.Exec(query, routineId, r.Title, r.TotalDuration, r.Character, r.Popularity, drills, r.Description)
if err != nil {
return err
}
return nil
}
func (db *DB) DeleteRoutine(routineId int) error {
query := `DELETE FROM routines WHERE routine_id = $1;`
_, err := db.Exec(query, routineId)
if err != nil {
return nil
}
return nil
}
func (db *DB) GetAllRoutines() ([]*Routine, error) {
query := `SELECT * FROM routines`
rows, err := db.Query(query)
if err != nil {
return nil, err
}
defer rows.Close()
routines := make([]*Routine, 0)
for rows.Next() {
r := &Routine{}
err := rows.Scan(&r.RoutineId, &r.Title, &r.TotalDuration, &r.Character, &r.OriginalCreatorId, &r.CreatorId, &r.Created, &r.Popularity, &r.Drills, &r.Description)
if err != nil {
return nil, err
}
routines = append(routines, r)
}
if err = rows.Err(); err != nil {
return nil, err
}
return routines, nil
}
|
package cmd
import (
"log"
"os"
"workflow/stream"
"workflow/task"
)
const OSEXIT2 = 2
type config struct {
attr01 map[string]int
attr02 int
}
func Run() {
log.SetPrefix("[work flow]")
arg := parse()
_, err := stream.NewStream().
Next(task.Task01).
Next(task.Task02).
Go(arg)
if err != nil {
log.Println(err)
os.Exit(OSEXIT2)
}
log.Println("SUCCESS")
}
func parse() *config {
arg := &config{}
arg.attr01 = make(map[string]int, 1) // map需要初始化
return arg
}
func (c *config) Attr01() map[string]int {
return c.attr01
}
func (c *config) Attr02() int {
return c.attr02
}
|
package cmd
import (
"bufio"
"context"
"fmt"
"io"
"strings"
"sync"
"github.com/jonboulle/clockwork"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"github.com/tilt-dev/probe/pkg/probe"
"github.com/tilt-dev/probe/pkg/prober"
"github.com/tilt-dev/tilt/internal/controllers/apicmp"
"github.com/tilt-dev/tilt/internal/controllers/apis/configmap"
"github.com/tilt-dev/tilt/internal/controllers/apis/trigger"
"github.com/tilt-dev/tilt/internal/controllers/indexer"
"github.com/tilt-dev/tilt/internal/engine/local"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/internal/timecmp"
"github.com/tilt-dev/tilt/pkg/apis"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
)
// A controller that reads CmdSpec and writes CmdStatus
type Controller struct {
globalCtx context.Context
indexer *indexer.Indexer
execer Execer
procs map[types.NamespacedName]*currentProcess
proberManager ProberManager
client ctrlclient.Client
st store.RStore
clock clockwork.Clock
requeuer *indexer.Requeuer
mu sync.Mutex
}
var _ store.TearDowner = &Controller{}
func (r *Controller) CreateBuilder(mgr ctrl.Manager) (*builder.Builder, error) {
b := ctrl.NewControllerManagedBy(mgr).
For(&Cmd{}).
Watches(&ConfigMap{},
handler.EnqueueRequestsFromMapFunc(r.indexer.Enqueue)).
WatchesRawSource(r.requeuer, handler.Funcs{})
trigger.SetupControllerStartOn(b, r.indexer, func(obj ctrlclient.Object) *v1alpha1.StartOnSpec {
return obj.(*v1alpha1.Cmd).Spec.StartOn
})
trigger.SetupControllerRestartOn(b, r.indexer, func(obj ctrlclient.Object) *v1alpha1.RestartOnSpec {
return obj.(*v1alpha1.Cmd).Spec.RestartOn
})
return b, nil
}
func NewController(ctx context.Context, execer Execer, proberManager ProberManager, client ctrlclient.Client, st store.RStore, clock clockwork.Clock, scheme *runtime.Scheme) *Controller {
return &Controller{
globalCtx: ctx,
indexer: indexer.NewIndexer(scheme, indexCmd),
clock: clock,
execer: execer,
procs: make(map[types.NamespacedName]*currentProcess),
proberManager: proberManager,
client: client,
st: st,
requeuer: indexer.NewRequeuer(),
}
}
// Stop the command, and wait for it to finish before continuing.
func (c *Controller) stop(name types.NamespacedName) {
proc, ok := c.procs[name]
if !ok {
return
}
if proc.cancelFunc == nil {
return
}
proc.cancelFunc()
<-proc.doneCh
proc.probeWorker = nil
proc.cancelFunc = nil
proc.doneCh = nil
}
func (c *Controller) TearDown(ctx context.Context) {
for name := range c.procs {
c.stop(name)
}
}
func inputsFromButton(button *v1alpha1.UIButton) []input {
if button == nil {
return nil
}
statuses := make(map[string]v1alpha1.UIInputStatus)
for _, status := range button.Status.Inputs {
statuses[status.Name] = status
}
var ret []input
for _, spec := range button.Spec.Inputs {
ret = append(ret, input{
spec: spec,
status: statuses[spec.Name],
})
}
return ret
}
type triggerEvents struct {
lastRestartEventTime metav1.MicroTime
lastRestartButton *v1alpha1.UIButton
lastStartEventTime metav1.MicroTime
lastStartButton *v1alpha1.UIButton
}
func (c *Controller) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
c.mu.Lock()
defer c.mu.Unlock()
name := req.NamespacedName
cmd := &Cmd{}
err := c.client.Get(ctx, name, cmd)
c.indexer.OnReconcile(name, cmd)
if err != nil && !apierrors.IsNotFound(err) {
return ctrl.Result{}, fmt.Errorf("cmd reconcile: %v", err)
}
if apierrors.IsNotFound(err) || cmd.ObjectMeta.DeletionTimestamp != nil {
c.stop(name)
delete(c.procs, name)
return ctrl.Result{}, nil
}
disableStatus, err := configmap.MaybeNewDisableStatus(ctx, c.client, cmd.Spec.DisableSource, cmd.Status.DisableStatus)
if err != nil {
return ctrl.Result{}, err
}
proc := c.ensureProc(name)
proc.mutateStatus(func(status *v1alpha1.CmdStatus) {
status.DisableStatus = disableStatus
})
disabled := disableStatus.State == v1alpha1.DisableStateDisabled
if disabled {
// Disabling should both stop the process, and make it look like
// it didn't previously run.
c.stop(name)
proc.spec = v1alpha1.CmdSpec{}
proc.lastStartOnEventTime = metav1.MicroTime{}
proc.lastRestartOnEventTime = metav1.MicroTime{}
}
if cmd.Annotations[v1alpha1.AnnotationManagedBy] == "local_resource" ||
cmd.Annotations[v1alpha1.AnnotationManagedBy] == "cmd_image" {
// Until resource dependencies are expressed in the API,
// we can't use reconciliation to deploy Cmd objects
// that are part of local_resource or custom_build.
err := c.maybeUpdateObjectStatus(ctx, cmd)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
var te triggerEvents
te.lastRestartEventTime, te.lastRestartButton, _, err = trigger.LastRestartEvent(ctx, c.client, cmd.Spec.RestartOn)
if err != nil {
return ctrl.Result{}, err
}
te.lastStartEventTime, te.lastStartButton, err = trigger.LastStartEvent(ctx, c.client, cmd.Spec.StartOn)
if err != nil {
return ctrl.Result{}, err
}
startOn := cmd.Spec.StartOn
waitsOnStartOn := startOn != nil && len(startOn.UIButtons) > 0
lastSpec := proc.spec
lastRestartOnEventTime := proc.lastRestartOnEventTime
lastStartOnEventTime := proc.lastStartOnEventTime
restartOnTriggered := timecmp.After(te.lastRestartEventTime, lastRestartOnEventTime)
startOnTriggered := timecmp.After(te.lastStartEventTime, lastStartOnEventTime)
execSpecChanged := !cmdExecEqual(lastSpec, cmd.Spec)
if !disabled {
// any change to the spec means we should stop the command immediately
if execSpecChanged {
c.stop(name)
}
if execSpecChanged && waitsOnStartOn && !startOnTriggered {
// If the cmd spec has changed since the last run,
// and StartOn hasn't triggered yet, set the status to waiting.
proc.mutateStatus(func(status *v1alpha1.CmdStatus) {
status.Waiting = &CmdStateWaiting{
Reason: waitingOnStartOnReason,
}
status.Running = nil
status.Terminated = nil
status.Ready = false
})
} else if execSpecChanged || restartOnTriggered || startOnTriggered {
// Otherwise, any change, new start event, or new restart event
// should restart the process to pick up changes.
_ = c.runInternal(ctx, cmd, te)
}
}
err = c.maybeUpdateObjectStatus(ctx, cmd)
if err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (c *Controller) maybeUpdateObjectStatus(ctx context.Context, cmd *v1alpha1.Cmd) error {
newStatus := c.ensureProc(types.NamespacedName{Name: cmd.Name}).copyStatus()
if apicmp.DeepEqual(newStatus, cmd.Status) {
return nil
}
update := cmd.DeepCopy()
update.Status = newStatus
err := c.client.Status().Update(ctx, update)
if err != nil {
return err
}
c.st.Dispatch(local.NewCmdUpdateStatusAction(update))
return nil
}
// Forces the command to run now.
//
// This is a hack to get local_resource commands into the API server,
// even though the API server doesn't have a notion of resource deps yet.
//
// Blocks until the command is finished, then returns its status.
func (c *Controller) ForceRun(ctx context.Context, cmd *v1alpha1.Cmd) (*v1alpha1.CmdStatus, error) {
c.mu.Lock()
doneCh := c.runInternal(ctx, cmd, triggerEvents{})
c.mu.Unlock()
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-doneCh:
}
c.mu.Lock()
defer c.mu.Unlock()
status := c.ensureProc(types.NamespacedName{Name: cmd.Name}).copyStatus()
return &status, nil
}
func (i input) stringValue() string {
if i.status.Text != nil {
return i.status.Text.Value
} else if i.status.Bool != nil {
if i.status.Bool.Value {
if i.spec.Bool.TrueString != nil {
return *i.spec.Bool.TrueString
} else {
return "true"
}
} else {
if i.spec.Bool.FalseString != nil {
return *i.spec.Bool.FalseString
} else {
return "false"
}
}
} else if i.status.Hidden != nil {
return i.status.Hidden.Value
} else if i.status.Choice != nil {
for _, v := range i.spec.Choice.Choices {
if v == i.status.Choice.Value {
return v
}
}
// if value is invalid, we default to the first choice
return i.spec.Choice.Choices[0]
}
return ""
}
type input struct {
spec v1alpha1.UIInputSpec
status v1alpha1.UIInputStatus
}
// Ensures there's a current cmd tracker.
func (c *Controller) ensureProc(name types.NamespacedName) *currentProcess {
proc, ok := c.procs[name]
if !ok {
proc = ¤tProcess{}
c.procs[name] = proc
}
return proc
}
// Runs the command unconditionally, stopping any currently running command.
//
// The filewatches and buttons are needed for bookkeeping on how the command
// was triggered.
//
// Returns a channel that closes when the Cmd is finished.
func (c *Controller) runInternal(ctx context.Context,
cmd *v1alpha1.Cmd,
te triggerEvents) chan struct{} {
name := types.NamespacedName{Name: cmd.Name}
c.stop(name)
proc := c.ensureProc(name)
proc.spec = cmd.Spec
proc.isServer = cmd.ObjectMeta.Annotations[local.AnnotationOwnerKind] == "CmdServer"
proc.lastRestartOnEventTime = te.lastRestartEventTime
proc.lastStartOnEventTime = te.lastStartEventTime
var inputs []input
if timecmp.After(proc.lastRestartOnEventTime, proc.lastStartOnEventTime) {
inputs = inputsFromButton(te.lastRestartButton)
} else {
inputs = inputsFromButton(te.lastStartButton)
}
ctx, proc.cancelFunc = context.WithCancel(ctx)
proc.statusMu.Lock()
defer proc.statusMu.Unlock()
status := &(proc.statusInternal)
status.Running = nil
status.Waiting = &CmdStateWaiting{}
status.Terminated = nil
status.Ready = false
ctx = store.MustObjectLogHandler(ctx, c.st, cmd)
spec := cmd.Spec
if spec.ReadinessProbe != nil {
probeResultFunc := c.handleProbeResultFunc(ctx, name, proc)
probeWorker, err := probeWorkerFromSpec(
c.proberManager,
spec.ReadinessProbe,
probeResultFunc)
if err != nil {
logger.Get(ctx).Errorf("Invalid readiness probe: %v", err)
status.Terminated = &CmdStateTerminated{
ExitCode: 1,
Reason: fmt.Sprintf("Invalid readiness probe: %v", err),
}
status.Waiting = nil
status.Running = nil
status.Ready = false
proc.doneCh = make(chan struct{})
close(proc.doneCh)
return proc.doneCh
}
proc.probeWorker = probeWorker
}
startedAt := apis.NewMicroTime(c.clock.Now())
env := append([]string{}, spec.Env...)
for _, input := range inputs {
env = append(env, fmt.Sprintf("%s=%s", input.spec.Name, input.stringValue()))
}
cmdModel := model.Cmd{
Argv: spec.Args,
Dir: spec.Dir,
Env: env,
}
statusCh := c.execer.Start(ctx, cmdModel, logger.Get(ctx).Writer(logger.InfoLvl))
proc.doneCh = make(chan struct{})
go c.processStatuses(ctx, statusCh, proc, name, startedAt)
return proc.doneCh
}
func (c *Controller) handleProbeResultFunc(ctx context.Context, name types.NamespacedName, proc *currentProcess) probe.ResultFunc {
return func(result prober.Result, statusChanged bool, output string, err error) {
if ctx.Err() != nil {
return
}
// we try to balance logging important probe results without flooding the logs
// * ALL transitions are logged
// * success->{failure,warning} @ WARN
// * {failure,warning}->success @ INFO
// * subsequent non-successful results @ VERBOSE
// * expected healthy/steady-state is recurring success, and this is apparent
// from the "Ready" state, so logging every invocation is superfluous
loggerLevel := logger.NoneLvl
if statusChanged {
if result != prober.Success {
loggerLevel = logger.WarnLvl
} else {
loggerLevel = logger.InfoLvl
}
} else if result != prober.Success {
loggerLevel = logger.VerboseLvl
}
logProbeOutput(ctx, loggerLevel, result, output, nil)
if !statusChanged {
// the probe did not transition states, so the result is logged but not used to update status
return
}
ready := result == prober.Success || result == prober.Warning
// TODO(milas): this isn't quite right - we might end up setting
// a terminated process to ready, for example; in practice, we
// should update internal state on any goroutine/async trackers
// and trigger a reconciliation, which can then evaluate the full
// state + current spec
proc.statusMu.Lock()
defer proc.statusMu.Unlock()
status := &(proc.statusInternal)
if status.Ready != ready {
status.Ready = ready
c.requeuer.Add(name)
}
}
}
func logProbeOutput(ctx context.Context, level logger.Level, result prober.Result, output string, err error) {
l := logger.Get(ctx)
if level == logger.NoneLvl || !l.Level().ShouldDisplay(level) {
return
}
w := l.Writer(level)
if err != nil {
_, _ = fmt.Fprintf(w, "[readiness probe error] %v\n", err)
} else if output != "" {
var logMessage strings.Builder
s := bufio.NewScanner(strings.NewReader(output))
for s.Scan() {
logMessage.WriteString("[readiness probe: ")
logMessage.WriteString(string(result))
logMessage.WriteString("] ")
logMessage.Write(s.Bytes())
logMessage.WriteRune('\n')
}
_, _ = io.WriteString(w, logMessage.String())
}
}
const waitingOnStartOnReason = "cmd StartOn has not been triggered"
func (c *Controller) processStatuses(
ctx context.Context,
statusCh chan statusAndMetadata,
proc *currentProcess,
name types.NamespacedName,
startedAt metav1.MicroTime) {
defer close(proc.doneCh)
var initProbeWorker sync.Once
for sm := range statusCh {
if sm.status == Unknown {
continue
}
if sm.status == Error || sm.status == Done {
// This is a hack until CmdServer is a real object.
if proc.isServer && sm.exitCode == 0 {
logger.Get(ctx).Errorf("Server exited with exit code 0")
}
proc.mutateStatus(func(status *v1alpha1.CmdStatus) {
status.Waiting = nil
status.Running = nil
status.Terminated = &CmdStateTerminated{
PID: int32(sm.pid),
Reason: sm.reason,
ExitCode: int32(sm.exitCode),
StartedAt: startedAt,
FinishedAt: apis.NewMicroTime(c.clock.Now()),
}
})
c.requeuer.Add(name)
} else if sm.status == Running {
if proc.probeWorker != nil {
initProbeWorker.Do(func() {
go proc.probeWorker.Run(ctx)
})
}
proc.mutateStatus(func(status *v1alpha1.CmdStatus) {
status.Waiting = nil
status.Terminated = nil
status.Running = &CmdStateRunning{
PID: int32(sm.pid),
StartedAt: startedAt,
}
if proc.probeWorker == nil {
status.Ready = true
}
})
c.requeuer.Add(name)
}
}
}
// Find all the objects we need to watch based on the Cmd model.
func indexCmd(obj client.Object) []indexer.Key {
cmd := obj.(*v1alpha1.Cmd)
result := []indexer.Key{}
if cmd.Spec.DisableSource != nil {
cm := cmd.Spec.DisableSource.ConfigMap
if cm != nil {
gvk := v1alpha1.SchemeGroupVersion.WithKind("ConfigMap")
result = append(result, indexer.Key{
Name: types.NamespacedName{Name: cm.Name},
GVK: gvk,
})
}
}
return result
}
// currentProcess represents the current process for a Manifest, so that Controller can
// make sure there's at most one process per Manifest.
// (note: it may not be running yet, or may have already finished)
type currentProcess struct {
spec CmdSpec
cancelFunc context.CancelFunc
// closed when the process finishes executing, intentionally or not
doneCh chan struct{}
probeWorker *probe.Worker
isServer bool
lastRestartOnEventTime metav1.MicroTime
lastStartOnEventTime metav1.MicroTime
// We have a lock that ONLY protects the status.
statusMu sync.Mutex
statusInternal v1alpha1.CmdStatus
}
func (p *currentProcess) copyStatus() v1alpha1.CmdStatus {
p.statusMu.Lock()
defer p.statusMu.Unlock()
return *(p.statusInternal.DeepCopy())
}
func (p *currentProcess) mutateStatus(update func(status *v1alpha1.CmdStatus)) {
p.statusMu.Lock()
defer p.statusMu.Unlock()
update(&p.statusInternal)
}
type statusAndMetadata struct {
pid int
status status
exitCode int
reason string
}
type status int
const (
Unknown status = iota
Running status = iota
Done status = iota
Error status = iota
)
|
// fibonacciClosure project main.go
package main
import (
"fmt"
)
func fibonacci() func() int {
first := 0
second := 0
third := 1
return func() int {
first, second, third = second, third, second+third
return first
}
}
func main() {
f := fibonacci()
for i := 0; i < 10; i++ {
fmt.Println(f())
}
}
|
package v1
import (
"github.com/gin-gonic/gin"
"github.com/gomodule/redigo/redis"
"user/controller"
)
func SetV1Route(r *gin.Engine, pool *redis.Pool) {
userSvc := controller.NewUserService(pool)
userGroup := r.Group("/v1/user")
userGroup.GET("/", userSvc.GetUserList)
userGroup.POST("/", userSvc.AddUser)
userGroup.GET("/:userId", userSvc.GetUser)
userGroup.DELETE("/:userId", userSvc.DestroyUser)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protoutil
import (
"bytes"
"compress/gzip"
"io/ioutil"
"sync"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/protoc-gen-go/descriptor"
)
type (
// Described is the interface to something that self describes with a compressed
// FileDescriptorProto.
Described interface {
Descriptor() ([]byte, []int)
}
)
var (
fileCache = map[*byte]*descriptor.FileDescriptorProto{}
cacheLock = sync.Mutex{}
)
func GetFileDescriptor(data []byte) (*descriptor.FileDescriptorProto, error) {
if len(data) == 0 {
return nil, nil
}
cacheLock.Lock()
defer cacheLock.Unlock()
if d, found := fileCache[&data[0]]; found {
return d, nil
}
d, err := decodeFileDescriptor(data)
if err != nil {
return nil, err
}
fileCache[&data[0]] = d
return d, nil
}
func decodeFileDescriptor(data []byte) (*descriptor.FileDescriptorProto, error) {
r, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
defer r.Close()
b, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
fd := &descriptor.FileDescriptorProto{}
if err := proto.Unmarshal(b, fd); err != nil {
return nil, err
}
return fd, nil
}
// DescriptorOf returns the descriptor for a given proto message.
func DescriptorOf(msg Described) (*descriptor.DescriptorProto, error) {
data, path := msg.Descriptor()
fileDescriptor, err := GetFileDescriptor(data)
if err != nil {
return nil, err
}
messageDescriptor := fileDescriptor.MessageType[path[0]]
for _, i := range path[1:] {
messageDescriptor = messageDescriptor.NestedType[i]
}
return messageDescriptor, nil
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for Type6Enum enum
*/
type Type6Enum int
/**
* Value collection for Type6Enum enum
*/
const (
Type6_KFILESHARE Type6Enum = 1 + iota
Type6_KVOLUME
)
func (r Type6Enum) MarshalJSON() ([]byte, error) {
s := Type6EnumToValue(r)
return json.Marshal(s)
}
func (r *Type6Enum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := Type6EnumFromValue(s)
*r = v
return nil
}
/**
* Converts Type6Enum to its string representation
*/
func Type6EnumToValue(type6Enum Type6Enum) string {
switch type6Enum {
case Type6_KFILESHARE:
return "kFileShare"
case Type6_KVOLUME:
return "kVolume"
default:
return "kFileShare"
}
}
/**
* Converts Type6Enum Array to its string Array representation
*/
func Type6EnumArrayToValue(type6Enum []Type6Enum) []string {
convArray := make([]string,len( type6Enum))
for i:=0; i<len(type6Enum);i++ {
convArray[i] = Type6EnumToValue(type6Enum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func Type6EnumFromValue(value string) Type6Enum {
switch value {
case "kFileShare":
return Type6_KFILESHARE
case "kVolume":
return Type6_KVOLUME
default:
return Type6_KFILESHARE
}
}
|
package loader
import (
"testing"
"github.com/lukaszbudnik/migrator/config"
"github.com/stretchr/testify/assert"
)
func TestDiskReadDiskMigrationsNonExistingBaseDirError(t *testing.T) {
var config config.Config
config.BaseDir = "xyzabc"
loader := NewLoader(&config)
_, err := loader.GetDiskMigrations()
assert.Equal(t, "open xyzabc: no such file or directory", err.Error())
}
func TestDiskGetDiskMigrations(t *testing.T) {
var config config.Config
config.BaseDir = "../test/migrations"
config.SingleSchemas = []string{"config", "ref"}
config.TenantSchemas = []string{"tenants"}
loader := NewLoader(&config)
migrations, err := loader.GetDiskMigrations()
assert.Nil(t, err)
assert.Len(t, migrations, 8)
assert.Equal(t, "config/201602160001.sql", migrations[0].File)
assert.Equal(t, "config/201602160002.sql", migrations[1].File)
assert.Equal(t, "tenants/201602160002.sql", migrations[2].File)
assert.Equal(t, "ref/201602160003.sql", migrations[3].File)
assert.Equal(t, "tenants/201602160003.sql", migrations[4].File)
assert.Equal(t, "ref/201602160004.sql", migrations[5].File)
assert.Equal(t, "tenants/201602160004.sql", migrations[6].File)
assert.Equal(t, "tenants/201602160005.sql", migrations[7].File)
}
|
package cmd
import (
"github.com/spf13/cobra"
"github.com/guumaster/hostctl/pkg/host"
)
// enableCmd represents the enable command
var enableCmd = &cobra.Command{
Use: "enable",
Short: "Enable a profile on your hosts file.",
Long: `
Enables an existing profile.
It will be listed as "on" while it is enabled.
`,
PreRunE: func(cmd *cobra.Command, args []string) error {
profile, _ := cmd.Flags().GetString("profile")
all, _ := cmd.Flags().GetBool("all")
if !all && profile == "" {
return host.MissingProfileError
}
if profile == "default" {
return host.DefaultProfileError
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
profile, _ := cmd.Flags().GetString("profile")
all, _ := cmd.Flags().GetBool("all")
if all {
profile = ""
}
src, _ := cmd.Flags().GetString("host-file")
err := host.Enable(src, profile)
if err != nil {
return err
}
return host.ListProfiles(src, &host.ListOptions{
Profile: profile,
})
},
}
func init() {
rootCmd.AddCommand(enableCmd)
enableCmd.Flags().BoolP("all", "", false, "Enable all profiles")
}
|
// project euler (projecteuler.net) problem 70
// solution by Kevin Retzke (retzkek@gmail.com), May 2012
package main
import (
"fmt"
"math"
)
type Primes struct {
Primes []int
Last int
}
// Init initializes a Primes struct with the first two primes.
func (p *Primes) Init() {
p.Primes = []int{2, 3}
p.Last = 3
}
// Next computes, returns, and appends the next prime number.
func (p *Primes) Next() int {
next := 0
i := p.Last + 2
for next == 0 {
sqrti := math.Sqrt(float64(i))
isPrime := true
for _, p := range p.Primes {
if i%p == 0 {
isPrime = false
i += 2
break
}
if float64(p) > sqrti {
break
}
}
if isPrime {
next = i
}
}
p.Primes = append(p.Primes, next)
p.Last = next
return next
}
// Eratosthenes populates p with all primes up to max, computed with
// the Sieve of Eratosthenes.
func (p *Primes) Eratosthenes(max int) {
sieve := make([]bool, max)
for i := 2; i*i < max; i++ {
if !sieve[i] {
for j := 2 * i; j < max; j += i {
sieve[j] = true
}
}
}
p.Primes = []int{2}
for i := 3; i < max; i++ {
if !sieve[i] {
p.Last = i
p.Primes = append(p.Primes, p.Last)
}
}
}
func countDigits(n int) []int {
digits := make([]int,10)
for n > 0 {
digits[n%10]++
n = n/10
}
return digits
}
func arePermutations(n, m int) bool {
// test order of magnitude
//if int(math.Log10(float64(n))) != int(math.Log10(float64(m))) {
// return false
//}
nlist := countDigits(n)
mlist := countDigits(m)
for d := 0; d < 10; d++ {
if nlist[d] != mlist[d] {
return false
}
}
return true
}
func main() {
const maxn = 10000000
primes := new(Primes)
primes.Eratosthenes(maxn)
minn := 0
minnphin := float64(9999999)
var phi float64
for i,p := range primes.Primes {
for j := i; j < len(primes.Primes); j++ {
q := primes.Primes[j]
n := p*q
if n > maxn {
break
}
if p == q {
phi = float64(n-p)
} else {
phi = float64(n-(p+q)+1)
}
if nphin := float64(n) / phi; nphin < minnphin &&
arePermutations(n,int(phi)) {
minn = n
minnphin = nphin
fmt.Println(minn,int(phi), minnphin)
}
}
}
}
|
package storage
import (
"fmt"
"io/ioutil"
"path"
"path/filepath"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"context"
"github.com/twcclan/goback/backup"
"github.com/twcclan/goback/proto"
)
func NewSimpleObjectStore(base string) *SimpleChunkStore {
return &SimpleChunkStore{
base: base,
}
}
var _ backup.ObjectStore = (*SimpleChunkStore)(nil)
type SimpleChunkStore struct {
base string
db *leveldb.DB
}
func (s *SimpleChunkStore) Open() (err error) {
s.db, err = leveldb.OpenFile(s.base, &opt.Options{
NoSync: true,
})
return err
}
func (s *SimpleChunkStore) Close() error {
return s.db.Close()
}
func (s *SimpleChunkStore) Has(ctx context.Context, ref *proto.Ref) (bool, error) {
return s.db.Has(ref.Sha1, nil)
}
func (s *SimpleChunkStore) Put(ctx context.Context, obj *proto.Object) error {
err := s.db.Put(obj.Ref().Sha1, obj.Bytes(), nil)
if err != nil {
return err
}
return err
}
func (s *SimpleChunkStore) Delete(ctx context.Context, ref *proto.Ref) error {
return s.db.Delete(ref.Sha1, nil)
}
func (s *SimpleChunkStore) Get(ctx context.Context, ref *proto.Ref) (*proto.Object, error) {
data, err := s.db.Get(ref.Sha1, nil)
if err != nil {
return nil, err
}
return proto.NewObjectFromBytes(data)
}
func (s *SimpleChunkStore) Walk(ctx context.Context, load bool, chunkType proto.ObjectType, fn backup.ObjectReceiver) error {
matches, err := filepath.Glob(path.Join(s.base, fmt.Sprintf("%d-*", chunkType)))
if err != nil {
return err
}
for _, match := range matches {
var obj *proto.Object
if load {
data, err := ioutil.ReadFile(match)
if err != nil {
return err
}
obj, err = proto.NewObjectFromBytes(data)
if err != nil {
return err
}
}
err = fn(obj)
if err != nil {
return err
}
}
return nil
}
|
package main
import "fmt"
var table [][]int
func loadTable(a *[]int) {
table = make([][]int, len(*a))
for _, e := range *a {
h := hash(e, len(*a))
fmt.Printf(" value is %d and the hash is %d\n", e, h)
if table[h] == nil {
table[h] = make([]int, 0)
}
table[h] = append(table[h], e)
}
for i := 0 ; i < 10 ; i++ {
fmt.Printf("table value is %+v\n", table[i])
}
}
func hash(key int, size int) int {
var h int
for i := 0; i < key+1; i++ {
h = (h*137 + i) % size
}
return h
}
func search(a *[]int, t int) bool {
h := hash(t, len(*a))
fmt.Printf("hash is %d\n", h)
list := table[h]
if list == nil {
return false
}
for _, v := range list {
if v == t {
return true
}
}
return false
}
func main() {
a := []int{4, 19, 3, 8, 5, 16, 7, 2, 90, 10}
loadTable(&a)
fmt.Println(a)
fmt.Printf("0:%t\n", search(&a, 0))
fmt.Println(a)
fmt.Printf("2:%t\n", search(&a, 2))
fmt.Println(a)
fmt.Printf("12:%t\n", search(&a, 12))
}
|
package main
import "fmt"
// Dependency Inversion Principle
// HLM should not depend on LLM
// Both should depend on abstractions
type Relationship int
const (
Parent Relationship = iota
Child
Silbling
)
// Info struct
type Info struct {
from *Person
relationship Relationship
to *Person
}
// Person struct
type Person struct {
name string
birthday string
address string
}
// Refactor RelationshipBrowser
// low-level module
type RelationshipBrowser interface {
FindAllChildOf(name string) []*Person
}
// Relationships struct
type Relationships struct {
// if low level storage has changed, then the code from highlevel will break
relations []Info
}
// FindAllChildOf - refactor
func (r *Relationships) FindAllChildOf(name string) []*Person {
result := make([]*Person, 0)
for i, v := range r.relations {
if v.relationship == Parent && v.from.name == name {
result = append(result, r.relations[i].to)
}
}
return result
}
// high-level module
type Research struct {
// break DIP
// relationships Relationships
browser RelationshipBrowser // refactor for DIP
}
// AddPerentAndChildOf
func (r *Relationships) AddPerentAndChild(parent, child *Person) {
r.relations = append(r.relations, Info{from: parent, relationship: Parent, to: child})
r.relations = append(r.relations, Info{from: child, relationship: Child, to: parent})
}
// Refactor Investigate func below
func (r *Research) Investigate() {
for _, rel := range r.browser.FindAllChildOf("John") {
fmt.Println("John has a child called", rel.name)
}
}
// program entry
func main() {
// populate the Person struct
parent := Person{"John", "08/19/1999", "2345 St. Patrick Rd, San Jose, CA 93122"}
child1 := Person{"Chris", "06/30/1996", "883 St. John Rd, San Jose, CA 95123"}
child2 := Person{"Matt", "05/16/1994", "556 St. Matthew Rd, San Jose, CA 95123"}
child3 := Person{"Dave", "", ""}
// Perform research into the relationships
relationships := Relationships{}
relationships.AddPerentAndChild(&parent, &child1)
relationships.AddPerentAndChild(&parent, &child2)
relationships.AddPerentAndChild(&parent, &child3)
r := Research{&relationships}
r.Investigate()
//
}
|
package viz
import (
"github.com/nsf/termbox-go"
)
type TitleView struct {
win *Window
title string
bold bool
}
func MakeTitleView(win *Window) *TitleView {
me := &TitleView{
win: win,
}
win.SetPainter(me)
return me
}
func (me *TitleView) SetTitle(title string) {
me.title = title
// TODO: Uncomment this once repaint requests are correctly async.
// me.win.Repaint()
}
func (me *TitleView) Highlight(bold bool) {
me.bold = bold
me.win.Repaint()
}
func (me *TitleView) Paint() {
surf := me.win.Surface()
surf.HorzLine(0, 0, surf.Width())
var attr termbox.Attribute = 0
if me.bold {
attr = termbox.AttrBold
}
surf.TextAttr(1, 0, me.title, attr)
}
|
package main
import (
"bufio"
"errors"
"log"
"net/url"
"os"
"strings"
"vim-swp-exp/settings"
"vim-swp-exp/watcher"
mapset "github.com/deckarep/golang-set"
cli "github.com/urfave/cli/v2"
)
func main() {
app := &cli.App{
Name: "vim-swp-exp",
Usage: "vim swp file exploit",
UsageText: "vim-swp-exp -u url | vim-swp-exp -f file",
Version: "v0.1",
Authors: []*cli.Author{{
Name: "无在无不在",
Email: "2227627947@qq.com",
}},
Flags: []cli.Flag{
&cli.StringFlag{
Name: "file",
Aliases: []string{"f"},
Usage: "specify the url list to watch",
Destination: &settings.AppConfig.InputFilePath,
},
&cli.StringFlag{
Name: "url",
Aliases: []string{"u"},
Usage: "specify the url to watch",
Destination: &settings.AppConfig.URL,
},
},
Action: run,
}
if err := app.Run(os.Args); err != nil {
log.Println("app.Run failed,err:", err)
return
}
}
func run(c *cli.Context) error {
log.Println("start watching:")
if len(settings.AppConfig.URL) > 0 {
u, err := url.Parse(settings.AppConfig.URL)
if err != nil {
log.Println("url.Parse failed,err:", err)
return err
}
watcher.Watch(u)
} else if len(settings.AppConfig.InputFilePath) > 0 {
file, err := os.Open(settings.AppConfig.InputFilePath)
if err != nil {
log.Println("os.Open failed,err:", err)
return err
}
URLSet := mapset.NewSet()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if len(line) == 0 {
continue
}
u, err := url.Parse(line)
if err != nil {
log.Println("url.Parse failed,err:", err)
continue
}
if URLSet.Contains(u) {
continue
}
URLSet.Add(u)
go watcher.Watch(u)
}
select {}
} else {
cli.ShowAppHelp(c)
return errors.New("pls specify -u or -f")
}
return nil
}
|
package c37pb
//go:generate protoc -I/usr/local/include -I. --go_out=plugins=grpc:. messages.proto
|
package cmdimages
import (
"github.com/tilt-dev/tilt/internal/store"
)
func HandleCmdImageUpsertAction(state *store.EngineState, action CmdImageUpsertAction) {
obj := action.CmdImage
n := obj.Name
state.CmdImages[n] = obj
}
func HandleCmdImageDeleteAction(state *store.EngineState, action CmdImageDeleteAction) {
delete(state.CmdImages, action.Name)
}
|
package models
import (
"gorm.io/gorm"
"time"
)
type Session struct {
gorm.Model
Uuid string `gorm:"index:,unique;not null"`
UserId uint
ExpiresAt time.Time
RefreshCode string `gorm:"index;not null"`
User User `gorm:"foreignKey:UserId"`
}
|
package impl_test
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/require"
"github.com/bgildson/ifood_backend_challenge/base"
"github.com/bgildson/ifood_backend_challenge/impl"
)
func TestJSONPlaylistSerializerDecode(t *testing.T) {
serializer := impl.JSONPlaylistSerializer{}
// success
playlist := []map[string]interface{}{
{"name": "track 01", "artist": map[string]interface{}{"name": "artist name"}},
{"name": "track 02", "other_field": false},
}
data, _ := json.Marshal(playlist)
result, err := serializer.Decode(data)
require.Nil(t, err)
require.Len(t, result, len(playlist))
for k, v := range playlist {
require.Equal(t, v["name"], result[k].Name)
}
// failure
data = []byte("something that is not a json")
result, err = serializer.Decode(data)
require.NotNil(t, err)
require.Nil(t, result)
}
func TestJSONPlaylistSerializerEncode(t *testing.T) {
serializer := impl.JSONPlaylistSerializer{}
// success
playlist := base.Playlist{
{Name: "track 01"},
{Name: "track 02"},
}
result, err := serializer.Encode(playlist)
require.Nil(t, err)
var data []map[string]interface{}
json.Unmarshal(result, &data)
require.Len(t, data, len(playlist))
for k, v := range playlist {
require.Equal(t, v.Name, data[k]["name"])
}
}
|
package chroma
import (
"compress/gzip"
"encoding/xml"
"errors"
"fmt"
"io"
"io/fs"
"math"
"path/filepath"
"reflect"
"regexp"
"strings"
"github.com/dlclark/regexp2"
)
// Serialisation of Chroma rules to XML. The format is:
//
// <rules>
// <state name="$STATE">
// <rule [pattern="$PATTERN"]>
// [<$EMITTER ...>]
// [<$MUTATOR ...>]
// </rule>
// </state>
// </rules>
//
// eg. Include("String") would become:
//
// <rule>
// <include state="String" />
// </rule>
//
// [null, null, {"kind": "include", "state": "String"}]
//
// eg. Rule{`\d+`, Text, nil} would become:
//
// <rule pattern="\\d+">
// <token type="Text"/>
// </rule>
//
// eg. Rule{`"`, String, Push("String")}
//
// <rule pattern="\"">
// <token type="String" />
// <push state="String" />
// </rule>
//
// eg. Rule{`(\w+)(\n)`, ByGroups(Keyword, Whitespace), nil},
//
// <rule pattern="(\\w+)(\\n)">
// <bygroups token="Keyword" token="Whitespace" />
// <push state="String" />
// </rule>
var (
// ErrNotSerialisable is returned if a lexer contains Rules that cannot be serialised.
ErrNotSerialisable = fmt.Errorf("not serialisable")
emitterTemplates = func() map[string]SerialisableEmitter {
out := map[string]SerialisableEmitter{}
for _, emitter := range []SerialisableEmitter{
&byGroupsEmitter{},
&usingSelfEmitter{},
TokenType(0),
&usingEmitter{},
&usingByGroup{},
} {
out[emitter.EmitterKind()] = emitter
}
return out
}()
mutatorTemplates = func() map[string]SerialisableMutator {
out := map[string]SerialisableMutator{}
for _, mutator := range []SerialisableMutator{
&includeMutator{},
&combinedMutator{},
&multiMutator{},
&pushMutator{},
&popMutator{},
} {
out[mutator.MutatorKind()] = mutator
}
return out
}()
)
// fastUnmarshalConfig unmarshals only the Config from a serialised lexer.
func fastUnmarshalConfig(from fs.FS, path string) (*Config, error) {
r, err := from.Open(path)
if err != nil {
return nil, err
}
defer r.Close()
dec := xml.NewDecoder(r)
for {
token, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
return nil, fmt.Errorf("could not find <config> element")
}
return nil, err
}
switch se := token.(type) {
case xml.StartElement:
if se.Name.Local != "config" {
break
}
var config Config
err = dec.DecodeElement(&config, &se)
if err != nil {
return nil, fmt.Errorf("%s: %w", path, err)
}
return &config, nil
}
}
}
// MustNewXMLLexer constructs a new RegexLexer from an XML file or panics.
func MustNewXMLLexer(from fs.FS, path string) *RegexLexer {
lex, err := NewXMLLexer(from, path)
if err != nil {
panic(err)
}
return lex
}
// NewXMLLexer creates a new RegexLexer from a serialised RegexLexer.
func NewXMLLexer(from fs.FS, path string) (*RegexLexer, error) {
config, err := fastUnmarshalConfig(from, path)
if err != nil {
return nil, err
}
for _, glob := range append(config.Filenames, config.AliasFilenames...) {
_, err := filepath.Match(glob, "")
if err != nil {
return nil, fmt.Errorf("%s: %q is not a valid glob: %w", config.Name, glob, err)
}
}
var analyserFn func(string) float32
if config.Analyse != nil {
type regexAnalyse struct {
re *regexp2.Regexp
score float32
}
regexAnalysers := make([]regexAnalyse, 0, len(config.Analyse.Regexes))
for _, ra := range config.Analyse.Regexes {
re, err := regexp2.Compile(ra.Pattern, regexp2.None)
if err != nil {
return nil, fmt.Errorf("%s: %q is not a valid analyser regex: %w", config.Name, ra.Pattern, err)
}
regexAnalysers = append(regexAnalysers, regexAnalyse{re, ra.Score})
}
analyserFn = func(text string) float32 {
var score float32
for _, ra := range regexAnalysers {
ok, err := ra.re.MatchString(text)
if err != nil {
return 0
}
if ok && config.Analyse.Single {
return float32(math.Min(float64(ra.score), 1.0))
}
if ok {
score += ra.score
}
}
return float32(math.Min(float64(score), 1.0))
}
}
return &RegexLexer{
config: config,
analyser: analyserFn,
fetchRulesFunc: func() (Rules, error) {
var lexer struct {
Config
Rules Rules `xml:"rules"`
}
// Try to open .xml fallback to .xml.gz
fr, err := from.Open(path)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
path += ".gz"
fr, err = from.Open(path)
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
defer fr.Close()
var r io.Reader = fr
if strings.HasSuffix(path, ".gz") {
r, err = gzip.NewReader(r)
if err != nil {
return nil, fmt.Errorf("%s: %w", path, err)
}
}
err = xml.NewDecoder(r).Decode(&lexer)
if err != nil {
return nil, fmt.Errorf("%s: %w", path, err)
}
return lexer.Rules, nil
},
}, nil
}
// Marshal a RegexLexer to XML.
func Marshal(l *RegexLexer) ([]byte, error) {
type lexer struct {
Config Config `xml:"config"`
Rules Rules `xml:"rules"`
}
rules, err := l.Rules()
if err != nil {
return nil, err
}
root := &lexer{
Config: *l.Config(),
Rules: rules,
}
data, err := xml.MarshalIndent(root, "", " ")
if err != nil {
return nil, err
}
re := regexp.MustCompile(`></[a-zA-Z]+>`)
data = re.ReplaceAll(data, []byte(`/>`))
return data, nil
}
// Unmarshal a RegexLexer from XML.
func Unmarshal(data []byte) (*RegexLexer, error) {
type lexer struct {
Config Config `xml:"config"`
Rules Rules `xml:"rules"`
}
root := &lexer{}
err := xml.Unmarshal(data, root)
if err != nil {
return nil, fmt.Errorf("invalid Lexer XML: %w", err)
}
lex, err := NewLexer(&root.Config, func() Rules { return root.Rules })
if err != nil {
return nil, err
}
return lex, nil
}
func marshalMutator(e *xml.Encoder, mutator Mutator) error {
if mutator == nil {
return nil
}
smutator, ok := mutator.(SerialisableMutator)
if !ok {
return fmt.Errorf("unsupported mutator: %w", ErrNotSerialisable)
}
return e.EncodeElement(mutator, xml.StartElement{Name: xml.Name{Local: smutator.MutatorKind()}})
}
func unmarshalMutator(d *xml.Decoder, start xml.StartElement) (Mutator, error) {
kind := start.Name.Local
mutator, ok := mutatorTemplates[kind]
if !ok {
return nil, fmt.Errorf("unknown mutator %q: %w", kind, ErrNotSerialisable)
}
value, target := newFromTemplate(mutator)
if err := d.DecodeElement(target, &start); err != nil {
return nil, err
}
return value().(SerialisableMutator), nil
}
func marshalEmitter(e *xml.Encoder, emitter Emitter) error {
if emitter == nil {
return nil
}
semitter, ok := emitter.(SerialisableEmitter)
if !ok {
return fmt.Errorf("unsupported emitter %T: %w", emitter, ErrNotSerialisable)
}
return e.EncodeElement(emitter, xml.StartElement{
Name: xml.Name{Local: semitter.EmitterKind()},
})
}
func unmarshalEmitter(d *xml.Decoder, start xml.StartElement) (Emitter, error) {
kind := start.Name.Local
mutator, ok := emitterTemplates[kind]
if !ok {
return nil, fmt.Errorf("unknown emitter %q: %w", kind, ErrNotSerialisable)
}
value, target := newFromTemplate(mutator)
if err := d.DecodeElement(target, &start); err != nil {
return nil, err
}
return value().(SerialisableEmitter), nil
}
func (r Rule) MarshalXML(e *xml.Encoder, _ xml.StartElement) error {
start := xml.StartElement{
Name: xml.Name{Local: "rule"},
}
if r.Pattern != "" {
start.Attr = append(start.Attr, xml.Attr{
Name: xml.Name{Local: "pattern"},
Value: r.Pattern,
})
}
if err := e.EncodeToken(start); err != nil {
return err
}
if err := marshalEmitter(e, r.Type); err != nil {
return err
}
if err := marshalMutator(e, r.Mutator); err != nil {
return err
}
return e.EncodeToken(xml.EndElement{Name: start.Name})
}
func (r *Rule) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
for _, attr := range start.Attr {
if attr.Name.Local == "pattern" {
r.Pattern = attr.Value
break
}
}
for {
token, err := d.Token()
if err != nil {
return err
}
switch token := token.(type) {
case xml.StartElement:
mutator, err := unmarshalMutator(d, token)
if err != nil && !errors.Is(err, ErrNotSerialisable) {
return err
} else if err == nil {
if r.Mutator != nil {
return fmt.Errorf("duplicate mutator")
}
r.Mutator = mutator
continue
}
emitter, err := unmarshalEmitter(d, token)
if err != nil && !errors.Is(err, ErrNotSerialisable) { // nolint: gocritic
return err
} else if err == nil {
if r.Type != nil {
return fmt.Errorf("duplicate emitter")
}
r.Type = emitter
continue
} else {
return err
}
case xml.EndElement:
return nil
}
}
}
type xmlRuleState struct {
Name string `xml:"name,attr"`
Rules []Rule `xml:"rule"`
}
type xmlRules struct {
States []xmlRuleState `xml:"state"`
}
func (r Rules) MarshalXML(e *xml.Encoder, _ xml.StartElement) error {
xr := xmlRules{}
for state, rules := range r {
xr.States = append(xr.States, xmlRuleState{
Name: state,
Rules: rules,
})
}
return e.EncodeElement(xr, xml.StartElement{Name: xml.Name{Local: "rules"}})
}
func (r *Rules) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
xr := xmlRules{}
if err := d.DecodeElement(&xr, &start); err != nil {
return err
}
if *r == nil {
*r = Rules{}
}
for _, state := range xr.States {
(*r)[state.Name] = state.Rules
}
return nil
}
type xmlTokenType struct {
Type string `xml:"type,attr"`
}
func (t *TokenType) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
el := xmlTokenType{}
if err := d.DecodeElement(&el, &start); err != nil {
return err
}
tt, err := TokenTypeString(el.Type)
if err != nil {
return err
}
*t = tt
return nil
}
func (t TokenType) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
start.Attr = append(start.Attr, xml.Attr{Name: xml.Name{Local: "type"}, Value: t.String()})
if err := e.EncodeToken(start); err != nil {
return err
}
return e.EncodeToken(xml.EndElement{Name: start.Name})
}
// This hijinks is a bit unfortunate but without it we can't deserialise into TokenType.
func newFromTemplate(template interface{}) (value func() interface{}, target interface{}) {
t := reflect.TypeOf(template)
if t.Kind() == reflect.Ptr {
v := reflect.New(t.Elem())
return v.Interface, v.Interface()
}
v := reflect.New(t)
return func() interface{} { return v.Elem().Interface() }, v.Interface()
}
func (b *Emitters) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
for {
token, err := d.Token()
if err != nil {
return err
}
switch token := token.(type) {
case xml.StartElement:
emitter, err := unmarshalEmitter(d, token)
if err != nil {
return err
}
*b = append(*b, emitter)
case xml.EndElement:
return nil
}
}
}
func (b Emitters) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if err := e.EncodeToken(start); err != nil {
return err
}
for _, m := range b {
if err := marshalEmitter(e, m); err != nil {
return err
}
}
return e.EncodeToken(xml.EndElement{Name: start.Name})
}
|
package main
import (
"fmt"
"math/rand"
)
type node struct {
ch [2]*node
priority int
val int
}
func (o *node) cmp(b int) int {
switch {
case b < o.val:
return 0
case b > o.val:
return 1
default:
return -1
}
}
func (o *node) rotate(d int) *node {
x := o.ch[d^1]
o.ch[d^1] = x.ch[d]
x.ch[d] = o
return x
}
type treap struct {
root *node
cnt int
}
func (t *treap) _insert(o *node, val int) *node {
if o == nil {
t.cnt++
return &node{priority: rand.Int(), val: val}
}
if d := o.cmp(val); d >= 0 {
o.ch[d] = t._insert(o.ch[d], val)
if o.ch[d].priority > o.priority {
o = o.rotate(d ^ 1)
}
}
return o
}
func (t *treap) insert(val int) {
t.root = t._insert(t.root, val)
}
func (t *treap) _erase(o *node, val int) *node {
if o == nil {
return nil
}
if d := o.cmp(val); d >= 0 {
o.ch[d] = t._erase(o.ch[d], val)
return o
}
t.cnt--
if o.ch[1] == nil {
return o.ch[0]
}
if o.ch[0] == nil {
return o.ch[1]
}
d := 0
if o.ch[0].priority > o.ch[1].priority {
d = 1
}
o = o.rotate(d)
o.ch[d] = t._erase(o.ch[d], val)
return o
}
func (t *treap) erase(val int) {
t.root = t._erase(t.root, val)
}
func (t *treap) lowerBound(val int) (lb *node) {
for o := t.root; o != nil; {
switch c := o.cmp(val); {
case c == 0:
lb = o
o = o.ch[0]
case c == 1:
o = o.ch[1]
case c == -1:
return o
}
}
return
}
func (t *treap) upperBound(val int) (ub *node) {
for o := t.root; o != nil; {
switch c := o.cmp(val); {
case c == 0:
o = o.ch[0]
case c == 1:
ub = o
o = o.ch[1]
case c == -1:
return o
}
}
return
}
func (t *treap) size() int {
return t.cnt
}
func (t *treap) begin() int {
ans := 0
for o := t.root; o != nil; {
ans = o.val
o = o.ch[0]
}
return ans
}
func (t *treap) end() int {
ans := 0
for o := t.root; o != nil; {
ans = o.val
o = o.ch[1]
}
return ans
}
func main() {
t := &treap{}
t.insert(3)
t.insert(6)
t.insert(4)
t.insert(4)
fmt.Println(t.size())
t.erase(4)
fmt.Println(t.size())
}
|
package generator
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
type ImportedPluginsConfigs struct {
Path string
PluginsConfigs PluginsConfigs
}
type PluginsConfigs map[string]interface{}
type GenerateConfig struct {
GenerateTraces bool `yaml:"generate_tracer"`
VendorPath string `yaml:"vendor_path"`
Imports []string `yaml:"imports"`
PluginsConfigsImports []ImportedPluginsConfigs
PluginsConfigs `yaml:",inline"`
}
func (gc *GenerateConfig) ParseImports() error {
for _, importPath := range gc.Imports {
normalizedPath := os.ExpandEnv(importPath)
normalizedPath, err := filepath.Abs(normalizedPath)
if err != nil {
return errors.Wrapf(err, "failed to make normalized path '%s' absolute", normalizedPath)
}
cfg, err := ioutil.ReadFile(normalizedPath)
if err != nil {
return errors.Wrapf(err, "Failed to read import '%s' file", normalizedPath)
}
pluginsConfig := PluginsConfigs{}
importedPluginsConfig := ImportedPluginsConfigs{
Path: normalizedPath,
PluginsConfigs: pluginsConfig,
}
err = yaml.Unmarshal(cfg, pluginsConfig)
if err != nil {
return errors.Wrapf(err, "Failed to unmarshal import '%s' file", normalizedPath)
}
gc.PluginsConfigsImports = append(gc.PluginsConfigsImports, importedPluginsConfig)
}
return nil
}
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/line/line-bot-sdk-go/linebot"
)
func main() {
// create Handler
handler, err := newHandler()
if err != nil {
log.Fatal(err)
}
// set handler (Routing = /callback)
http.HandleFunc("/callback", handler.HandleEvent)
// LISTEN PORT
if err := http.ListenAndServe(fmt.Sprintf(":%s", os.Getenv("PORT")), nil); err != nil {
log.Fatal(err)
}
}
type EventHandler interface {
HandleEvent(resWriter http.ResponseWriter, request *http.Request)
}
type eventHandler struct {
client *linebot.Client
}
func newHandler() (EventHandler, error) {
// create app
client, err := linebot.New(
os.Getenv("CHANNEL_SECRET"),
os.Getenv("CHANNEL_ACCESS_TOKEN"),
)
if err != nil {
return nil, err
}
return &eventHandler{
client: client,
}, nil
}
func (h *eventHandler) HandleEvent(resWriter http.ResponseWriter, request *http.Request) {
events, err := h.client.ParseRequest(request)
if err != nil {
if err == linebot.ErrInvalidSignature {
resWriter.WriteHeader(400)
} else {
resWriter.WriteHeader(500)
}
return
}
// handling event
for _, event := range events {
log.Printf("Receive Event: %v", event)
// Message受信
if event.Type == linebot.EventTypeMessage {
switch message := event.Message.(type) {
case *linebot.TextMessage:
// TextMessageをおうむ返し
if _, err = h.client.ReplyMessage(event.ReplyToken, linebot.NewTextMessage(message.Text)).Do(); err != nil {
// 標準のログライブラリにロギングレベルの概念はない
log.Print(err)
}
}
}
}
}
|
package main
import "fmt"
import "./vec"
func main() {
mesh := Cube(1) // a unit cube
ray := Ray{vec.Make(1, 1, 2), vec.Make(0, 0, -1)}
fmt.Printf("%v\n", ray.IntersectsMesh(mesh))
}
func Cube(radius float64) *Mesh {
sides := make([]*Mesh, 6)
i := 0
for _, r := range []float64{radius, -radius} {
// top
sides[i] = Quadrilateral(
vec.Make(-r, r, -r),
vec.Make(r, r, -r),
vec.Make(r, r, r),
vec.Make(-r, r, r))
i++
// front
sides[i] = Quadrilateral(
vec.Make(-r, r, r),
vec.Make(r, r, r),
vec.Make(r, -r, r),
vec.Make(-r, -r, r))
i++
// right
sides[i] = Quadrilateral(
vec.Make(r, r, r),
vec.Make(r, r, -r),
vec.Make(r, -r, -r),
vec.Make(r, -r, r))
i++
}
return Fuse(sides)
}
func Quadrilateral(nw *vec.Vector, ne *vec.Vector, se *vec.Vector, sw *vec.Vector) *Mesh {
tri1 := Triangle{nw, ne, se}
tri2 := Triangle{se, sw, nw}
return &Mesh{[]*Triangle{&tri1, &tri2}}
}
func Fuse(meshes []*Mesh) *Mesh {
size := 0
for _, mesh := range meshes {
size += len(mesh.Triangles)
}
triangles := make([]*Triangle, size)
i := 0
for _, mesh := range meshes {
for _, tri := range mesh.Triangles {
triangles[i] = tri
i++
}
}
return &Mesh{triangles}
}
type Ray struct {
Start *vec.Vector
Direction *vec.Vector // relative direction
}
func (ray *Ray) String() string {
return fmt.Sprintf("Ray %v %v", ray.Start, ray.Direction)
}
func (ray *Ray) IntersectsTriangle(tri *Triangle) bool {
u := vec.Subtract(tri.B, tri.A)
v := vec.Subtract(tri.C, tri.A)
if vec.Parallel(u, v) { // the triangle is degenerate
// we could check for a ray/line-segment intersection here, but it seems pointless
return false
}
// n is the normal to the supporting plane of the triangle
n := vec.CrossProduct(u, v)
// w0 is a relative vector from the start of the ray to one corner of the triangle
w0 := vec.Subtract(ray.Start, tri.A)
if vec.Orthogonal(n, ray.Direction) { // the ray is parallel to the triangle
// again, ray/line-segment intersection
return false
} else {
a := -vec.DotProduct(n, w0)
b := vec.DotProduct(n, ray.Direction)
r := a / b
if r < 0 {
return false
}
i := vec.Add(ray.Start, vec.Scale(r, ray.Direction))
uu := vec.DotProduct(u, u)
uv := vec.DotProduct(u, v)
vv := vec.DotProduct(v, v)
w := vec.Subtract(i, tri.A)
wu := vec.DotProduct(w, u)
wv := vec.DotProduct(w, v)
d := uv * uv - uu * vv
s := (uv * wv - vv * wu) / d
if s < 0 || s > 1 {
return false
}
t := (uv * wu - uu * wv) / d
if t < 0 || s + t > 1 {
return false
}
}
return true
}
func (ray *Ray) IntersectsMesh(mesh *Mesh) bool {
for _, tri := range mesh.Triangles {
if ray.IntersectsTriangle(tri) {
return true
}
}
return false
}
type Mesh struct {
Triangles []*Triangle
}
func (mesh *Mesh) String() string {
listStr := ""
for _, tri := range mesh.Triangles {
if len(listStr) > 0 {
listStr += ", "
}
listStr += tri.String()
}
return fmt.Sprintf("Mesh {%v}", listStr)
}
type Triangle struct {
A *vec.Vector
B *vec.Vector
C *vec.Vector
}
func (tri *Triangle) String() string {
return fmt.Sprintf("Triangle %v %v %v", tri.A, tri.B, tri.C)
}
|
package handlers
import (
"net/http"
"peribahasa/app/models"
"peribahasa/web/utils"
"strconv"
"strings"
"github.com/gorilla/mux"
)
// EditAsalPage controller
var EditAsalPage = func(w http.ResponseWriter, r *http.Request) {
var tmpl = utils.ParseTemplates("web/templates/admin/asal/edit.html")
vars := mux.Vars(r)
a := &models.Asal{}
id, err := strconv.Atoi(vars["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := a.Get(id); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if r.Method == http.MethodPost {
a.Bahasa = r.FormValue("bahasa")
if err := a.Update(id); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/admin/asal", http.StatusSeeOther)
return
}
if err = tmpl.ExecuteTemplate(w, "base", map[string]interface{}{"data": a, "status": false}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// AddNewAsalPage controller
var AddNewAsalPage = func(w http.ResponseWriter, r *http.Request) {
var tmpl = utils.ParseTemplates("web/templates/admin/asal/new.html")
if r.Method == http.MethodPost {
asal := models.Asal{
Bahasa: r.FormValue("bahasa"),
}
if err := asal.Create(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, r, "/admin/asal", http.StatusSeeOther)
return
}
if err := tmpl.ExecuteTemplate(w, "base", nil); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
// DeleteAsalPage controller
var DeleteAsalPage = func(w http.ResponseWriter, r *http.Request) {
var tmpl = utils.ParseTemplates("web/templates/admin/asal/delete.html")
vars := mux.Vars(r)
a := &models.Asal{}
id, err := strconv.Atoi(vars["id"])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if r.Method == http.MethodPost {
if strings.ToLower(r.FormValue("confirm")) == "ya" {
if err := a.Delete(id); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
http.Redirect(w, r, "/admin/asal", http.StatusSeeOther)
return
}
if err := a.Get(id); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := tmpl.ExecuteTemplate(w, "base", map[string]interface{}{"data": a}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
// AsalIndexPage controller
var AsalIndexPage = func(w http.ResponseWriter, r *http.Request) {
var tmpl = utils.ParseTemplates("web/templates/admin/asal/index.html")
var lAsal models.ListAsal
if err := lAsal.List(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := tmpl.ExecuteTemplate(w, "base", map[string]interface{}{"data": lAsal}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
|
package main
import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"github.com/dlintw/goconf"
. "github.com/wikiocracy/cryptoballot/cryptoballot"
"io/ioutil"
"strconv"
)
type Config struct {
configFile string
database struct {
host string
port int
user string
password string
dbname string
sslmode string
maxIdleConnections int
}
readme []byte // Static content for serving to the root readme (at "/")
signingPrivateKey rsa.PrivateKey // For now we have a single key -- eventually there should be one key per election
voterlistURL string
auditorPrivateKey rsa.PrivateKey // For accessing the voter-list server, which is only open to auditors.
admins []User // List of administrators allowed to create and edit elections on this service.
}
//@@TEST: loading known good config from file
//@@TODO: transform this into a NewConfig func
//@@TODO: load keys from files
func (config *Config) loadFromFile(filepath string) (err error) {
config.configFile = filepath
c, err := goconf.ReadConfigFile(filepath)
if err != nil {
return
}
config.database.host, err = c.GetString("ballot-clerk-db", "host")
if err != nil {
return
}
config.database.port, err = c.GetInt("ballot-clerk-db", "port")
if err != nil {
return
}
config.database.user, err = c.GetString("ballot-clerk-db", "user")
if err != nil {
return
}
config.database.password, err = c.GetString("ballot-clerk-db", "password")
if err != nil {
return
}
config.database.dbname, err = c.GetString("ballot-clerk-db", "dbname")
if err != nil {
return
}
config.database.sslmode, err = c.GetString("ballot-clerk-db", "sslmode")
if err != nil {
return
}
// For max_idle_connections missing should translates to -1
if c.HasOption("ballot-clerk-db", "max_idle_connections") {
config.database.maxIdleConnections, err = c.GetInt("ballot-clerk-db", "max_idle_connections")
if err != nil {
return
}
} else {
config.database.maxIdleConnections = -1
}
// Ingest the private key into the global config object
privateKeyLocation, err := c.GetString("ballot-clerk", "private-key")
if err != nil {
return
}
rawKeyPEM, err := ioutil.ReadFile(privateKeyLocation)
if err != nil {
return
}
PEMBlock, _ := pem.Decode(rawKeyPEM)
if PEMBlock.Type != "RSA PRIVATE KEY" {
err = errors.New("Could not find an RSA PRIVATE KEY block in " + privateKeyLocation)
return
}
signingPrivateKey, err := x509.ParsePKCS1PrivateKey(PEMBlock.Bytes)
if err != nil {
return
}
config.signingPrivateKey = *signingPrivateKey
// Ingest administrators
config.admins = make([]User, 0)
adminPEMLocation, err := c.GetString("ballot-clerk", "admins")
if err != nil {
return
}
rawAdminPEM, err := ioutil.ReadFile(adminPEMLocation)
if err != nil {
return
}
var adminPEMBlock *pem.Block
for {
adminPEMBlock, rawAdminPEM = pem.Decode(rawAdminPEM)
if adminPEMBlock == nil {
break
}
if adminPEMBlock.Type != "PUBLIC KEY" {
err = errors.New("Found unexpected " + adminPEMBlock.Type + " in " + adminPEMLocation)
return
}
user, err := NewUserFromBlock(adminPEMBlock)
if err != nil {
return err
}
config.admins = append(config.admins, *user)
}
// Ingest the readme
readmeLocation, err := c.GetString("ballot-clerk", "readme")
if err != nil {
return
}
config.readme, err = ioutil.ReadFile(readmeLocation)
if err != nil {
return
}
return
}
func (config *Config) databaseConnectionString() (connection string) {
if config.database.host != "" {
connection += "host=" + config.database.host + " "
}
if config.database.port != 0 {
connection += "port=" + strconv.Itoa(config.database.port) + " "
}
if config.database.user != "" {
connection += "user=" + config.database.user + " "
}
if config.database.password != "" {
connection += "password=" + config.database.password + " "
}
if config.database.dbname != "" {
connection += "dbname=" + config.database.dbname + " "
}
if config.database.sslmode != "" {
connection += "sslmode=" + config.database.sslmode
}
return
}
|
package queue_list
import (
"encoding/xml"
"github.com/tmconsulting/amadeus-ws-go/formats"
)
type QueueList struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/QDQLRQ_11_1_1A Queue_List"`
// presence implies that this is a follow up scrolling entry to a previous entry. Absence implies start of a new search
Scroll *ActionDetailsTypeI `xml:"scroll,omitempty"`
// used to specify the target office for which the queue count is to be displayed
TargetOffice *AdditionalBusinessSourceInformationTypeI `xml:"targetOffice,omitempty"`
// used to specify the queue if required
QueueNumber *QueueInformationTypeI `xml:"queueNumber,omitempty"`
// used to select the category
CategoryDetails *SubQueueInformationTypeI `xml:"categoryDetails,omitempty"`
// date range as system defined
Date *StructuredDateTimeInformationType `xml:"date,omitempty"`
// defines the start point for the search and may also define the end point of the search
ScanRange *RangeDetailsTypeI `xml:"scanRange,omitempty"`
SearchCriteria *SearchCriteria `xml:"searchCriteria,omitempty"`
// Passenger name list (all the names in the PNR).
PassengerName *TravellerInformationTypeI `xml:"passengerName,omitempty"`
// The last 2 characters of the sine of the agent who placed the PNR on queue.
AgentSine *UserIdentificationType `xml:"agentSine,omitempty"`
// Account number issue from AIAN entry in the PNR.
AccountNumber *AccountingInformationElementType `xml:"accountNumber,omitempty"`
FlightInformation *FlightInformation `xml:"flightInformation,omitempty"`
// This is the point of sale of segments in PNRs: - 9 char Amadeus Office ID. - OR 2 char GDS code for OA PNRs PNRs containing a segment sold in any Amadeus Office ID matching pattern NCE6X*** or ***BA0*** or sold in Sabre (1S) or Gallileo (1G).
Pos *PointOfSaleInformationType `xml:"pos,omitempty"`
// The repetition is 10 because we can transport: - until 5 tierLevel - until 5 customerValue, including possibly range of customerValue. If we have tierLevel in the FTI, the customerValue must not be present. If we have customerValue in the FTI, the tierLevel must not be present.
TierLevelAndCustomerValue *FrequentTravellerIdentificationCodeType `xml:"tierLevelAndCustomerValue,omitempty"`
SortCriteria *SortCriteria `xml:"sortCriteria,omitempty"`
}
type SearchCriteria struct {
// used to specify if ticketing, departure or creation options
SearchOption *SelectionDetailsTypeI `xml:"searchOption,omitempty"`
// used to specify the dates to be searched on
Dates *StructuredPeriodInformationType `xml:"dates,omitempty"`
}
type FlightInformation struct {
// It transport the type of flight information that will follow.
FlightInformationType *StatusTypeI `xml:"flightInformationType,omitempty"`
// Board point or Off Point.
BoardPointOrOffPoint *OriginAndDestinationDetailsTypeI `xml:"boardPointOrOffPoint,omitempty"`
// Airline code or Flight Number (in fact, airline + flight number)
AirlineCodeOrFlightNumber *TransportIdentifierType `xml:"airlineCodeOrFlightNumber,omitempty"`
// Booking class.
ClassOfService *ProductInformationTypeI `xml:"classOfService,omitempty"`
// Segment status code.
SegmentStatus *RelatedProductInformationTypeI `xml:"segmentStatus,omitempty"`
}
type SortCriteria struct {
// dummy for SDT clash
Dumbo *DummySegmentTypeI `xml:"dumbo,omitempty"`
// Determine the order of the display.
SortOption *SelectionDetailsTypeI `xml:"sortOption,omitempty"`
}
type AccountingElementType struct {
// Account number
Number formats.AlphaNumericString_Length1To10 `xml:"number,omitempty"`
}
type AccountingInformationElementType struct {
// One of these 4 data elements is mandatory , but non in particular
Account *AccountingElementType `xml:"account,omitempty"`
}
type ActionDetailsTypeI struct {
// used for scrollling purposes
NumberOfItemsDetails *ProcessingInformationTypeI `xml:"numberOfItemsDetails,omitempty"`
}
type AdditionalBusinessSourceInformationTypeI struct {
// the office we are targetting
SourceType *SourceTypeDetailsTypeI `xml:"sourceType,omitempty"`
// contains the office ID
OriginatorDetails *OriginatorIdentificationDetailsTypeI `xml:"originatorDetails,omitempty"`
}
type CompanyIdentificationTypeI struct {
// Marketing company.
MarketingCompany formats.AlphaNumericString_Length1To3 `xml:"marketingCompany,omitempty"`
}
type DummySegmentTypeI struct {
XMLName xml.Name `xml:"http://xml.amadeus.com/QDQLRQ_11_1_1A DummySegmentTypeI"`
}
type FrequentTravellerIdentificationCodeType struct {
// Frequent Traveller Info. Repetition 2 is used only in the case we provide a customer value range (only one is accepted).
FrequentTravellerDetails *FrequentTravellerIdentificationType `xml:"frequentTravellerDetails,omitempty"`
DummyNET struct {
} `xml:"Dummy.NET,omitempty"`
}
type FrequentTravellerIdentificationType struct {
// This field specifies the Tier Level. This is a 4 letter string indicating the airline's ranking of frequent flyers. It is not to be confused with Alliance tier. If tierLevel is filled in a given FTI segment, customerValue must not be filled.
TierLevel formats.AlphaNumericString_Length1To4 `xml:"tierLevel,omitempty"`
// This field specifies the Customer value. This is a 4 letter string indicating the airline's ranking of frequent flyers. It is not to be confused with Alliance tier. If customerValue is filled in a given FTI segment, tierLevel field must not be filled.
CustomerValue formats.NumericInteger_Length1To4 `xml:"customerValue,omitempty"`
}
type LocationTypeU struct {
// Office identification. It can contain wildcards.
Name formats.AlphaNumericString_Length1To9 `xml:"name,omitempty"`
}
type OriginAndDestinationDetailsTypeI struct {
// Board point
Origin formats.AlphaNumericString_Length3To3 `xml:"origin,omitempty"`
// Off point
Destination formats.AlphaNumericString_Length3To3 `xml:"destination,omitempty"`
}
type OriginatorIdentificationDetailsTypeI struct {
// the office that is being targetted
InHouseIdentification1 formats.AlphaNumericString_Length1To9 `xml:"inHouseIdentification1,omitempty"`
}
type PartyIdentifierTypeU struct {
// GDS identifier: 1A, 1S, 1G.
PartyIdentifier formats.AlphaNumericString_Length1To3 `xml:"partyIdentifier,omitempty"`
}
type PointOfSaleInformationType struct {
// Party identification.
PointOfSale *PartyIdentifierTypeU `xml:"pointOfSale,omitempty"`
// Office id in case the party identifier is 1A.
LocationDetails *LocationTypeU `xml:"locationDetails,omitempty"`
}
type ProcessingInformationTypeI struct {
// determine if move up or move down required
ActionQualifier formats.AlphaNumericString_Length1To3 `xml:"actionQualifier,omitempty"`
}
type ProductDetailsTypeI struct {
// Class designator.
Designator formats.AlphaNumericString_Length1To1 `xml:"designator,omitempty"`
}
type ProductIdentificationDetailsTypeI struct {
// Flight number.
FlightNumber formats.AlphaNumericString_Length1To4 `xml:"flightNumber,omitempty"`
}
type ProductInformationTypeI struct {
// Booking class details.
BookingClassDetails *ProductDetailsTypeI `xml:"bookingClassDetails,omitempty"`
}
type QueueInformationDetailsTypeI struct {
// queue number
Number formats.NumericInteger_Length1To2 `xml:"number,omitempty"`
}
type QueueInformationTypeI struct {
// queue identification
QueueDetails *QueueInformationDetailsTypeI `xml:"queueDetails,omitempty"`
}
type RangeDetailsTypeI struct {
// define is a range or not
RangeQualifier formats.AlphaNumericString_Length1To3 `xml:"rangeQualifier,omitempty"`
// define the start and possible end point of the scan
RangeDetails *RangeTypeI `xml:"rangeDetails,omitempty"`
}
type RangeTypeI struct {
// starting point of the scan
Min formats.NumericInteger_Length1To18 `xml:"min,omitempty"`
// ending point of the scan
Max formats.NumericInteger_Length1To18 `xml:"max,omitempty"`
}
type RelatedProductInformationTypeI struct {
// Status code
StatusCode formats.AlphaNumericString_Length2To2 `xml:"statusCode,omitempty"`
}
type SelectionDetailsInformationTypeI struct {
// used to determine if a new start or a continuation Also used for search and sort criteria on the ticketing, departure and creation dates
Option formats.AlphaNumericString_Length1To3 `xml:"option,omitempty"`
}
type SelectionDetailsTypeI struct {
// used for search and sort criteria
SelectionDetails *SelectionDetailsInformationTypeI `xml:"selectionDetails,omitempty"`
}
type SourceTypeDetailsTypeI struct {
// not needed - but mandatory field So just stick a 4 in it !!
SourceQualifier1 formats.AlphaNumericString_Length1To3 `xml:"sourceQualifier1,omitempty"`
}
type StatusDetailsTypeI struct {
// Indicator showing what flight information will be transported.
Indicator formats.AlphaNumericString_Length1To3 `xml:"indicator,omitempty"`
}
type StatusTypeI struct {
// Flight status details.
StatusDetails *StatusDetailsTypeI `xml:"statusDetails,omitempty"`
}
type StructuredDateTimeInformationType struct {
// used for date range only The date ranges are defined on central system as 1,2,3,4 The actual values of the ranges are set in the office profile
TimeMode formats.NumericInteger_Length1To3 `xml:"timeMode,omitempty"`
}
type StructuredDateTimeType struct {
// Year number.
Year formats.Year_YYYY `xml:"year,omitempty"`
// Month number in the year ( begins to 1 )
Month formats.Month_mM `xml:"month,omitempty"`
// Day number in the month ( begins to 1 )
Day formats.Day_nN `xml:"day,omitempty"`
}
type StructuredPeriodInformationType struct {
// Convey the begin date/time of a period.
BeginDateTime *StructuredDateTimeType `xml:"beginDateTime,omitempty"`
// Convey the end date/time of a period.
EndDateTime *StructuredDateTimeType `xml:"endDateTime,omitempty"`
}
type SubQueueInformationDetailsTypeI struct {
// E for every category A for cats with items to be worked C for category number N for nickname CN for both category number and nickname numeric for date range
IdentificationType formats.AlphaNumericString_Length1To3 `xml:"identificationType,omitempty"`
// category number
ItemNumber formats.AlphaNumericString_Length1To3 `xml:"itemNumber,omitempty"`
// used for nickname on inbound used for category name on outbound
ItemDescription formats.AlphaNumericString_Length1To35 `xml:"itemDescription,omitempty"`
}
type SubQueueInformationTypeI struct {
// identifies the category or categories.
SubQueueInfoDetails *SubQueueInformationDetailsTypeI `xml:"subQueueInfoDetails,omitempty"`
}
type TransportIdentifierType struct {
// Company identification.
CompanyIdentification *CompanyIdentificationTypeI `xml:"companyIdentification,omitempty"`
// Flight details.
FlightDetails *ProductIdentificationDetailsTypeI `xml:"flightDetails,omitempty"`
}
type TravellerInformationTypeI struct {
// Traveller surname information.
PaxDetails *TravellerSurnameInformationTypeI `xml:"paxDetails,omitempty"`
}
type TravellerSurnameInformationTypeI struct {
// Passenger surname.
Surname formats.AlphaNumericString_Length1To70 `xml:"surname,omitempty"`
}
type UserIdentificationType struct {
// The last 2 characters of the sine of the agent who placed the PNR on queue.
Originator formats.AlphaNumericString_Length1To2 `xml:"originator,omitempty"`
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package netplan
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewEthernetConfig(t *testing.T) {
c := NewDHCP4EthernetConfig()
assert := assert.New(t)
assert.YAMLEq("dhcp4: true", c.YAMLString())
}
func TestNewBondMode4(t *testing.T) {
c := NewBondMode4(
&EthernetConfig{
DHCP4: false,
Gateway4: "192.168.1.1",
Addresses: []string{"192.168.1.252/24"},
Nameservers: &Nameservers{
Search: []string{"local"},
Addresses: []string{"8.8.8.8", "8.8.4.4"},
},
},
[]string{"enp2s0", "enp3s0"},
)
yamlStr := `
addresses:
- 192.168.1.252/24
dhcp4: false
gateway4: 192.168.1.1
interfaces:
- enp2s0
- enp3s0
nameservers:
addresses:
- 8.8.8.8
- 8.8.4.4
search:
- local
parameters:
mii-monitor-interval: 100
mode: "802.3ad"
`
assert := assert.New(t)
assert.YAMLEq(yamlStr, c.YAMLString())
}
func TestNewNetwork(t *testing.T) {
n := NewNetwork()
n.AddEthernet("eth0", NewDHCP4EthernetConfig())
n.AddEthernet("eth1", NewStaticEthernetConfig(
"10.10.10.2/24",
"10.10.10.1",
[]string{"mydomain", "otherdomain"},
[]string{"114.114.114.114"},
nil,
))
c := NewConfiguration(n)
yamlStr := `
network:
ethernets:
eth0:
dhcp4: true
eth1:
addresses:
- 10.10.10.2/24
dhcp4: false
gateway4: 10.10.10.1
nameservers:
addresses:
- 114.114.114.114
search: ["mydomain", "otherdomain"]
renderer: networkd
version: 2
`
assert := assert.New(t)
assert.YAMLEq(yamlStr, c.YAMLString())
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package delegate
import (
"fmt"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
func (d *delegator) delegateShowDatabases(stmt *tree.ShowDatabases) (tree.Statement, error) {
query := `SELECT
name AS database_name, owner, primary_region, regions, survival_goal`
if stmt.WithComment {
query += `, comment`
}
query += `
FROM
"".crdb_internal.databases d
`
if stmt.WithComment {
query += fmt.Sprintf(`
LEFT JOIN
(
SELECT
object_id, type, comment
FROM
system.comments
WHERE
type = %d
) c
ON
c.object_id = d.id`, keys.DatabaseCommentType)
}
query += `
ORDER BY
database_name`
return parse(query)
}
|
package lc
import "fmt"
// Time: O(n)
// Benchmark: 0ms 2.2mb | 100%
func getHint(secret string, guess string) string {
tally := make([]int, 10)
for _, ch := range guess {
tally[ch-'0']++
}
var bulls, cows int
for i, ch := range secret {
if ch == rune(guess[i]) {
bulls++
}
if tally[ch-'0'] > 0 {
tally[ch-'0']--
cows++
}
}
return fmt.Sprintf("%dA%dB", bulls, cows-bulls)
}
|
package controllers
import (
"fmt"
"log"
"net/http"
"strconv"
"time"
restful "github.com/emicklei/go-restful"
"github.com/tipounet/go-bank/dao"
"github.com/tipounet/go-bank/model"
"github.com/tipounet/go-bank/service"
)
var (
userService service.UserService
jwt service.JWTService
)
func init() {
if userService.Dao == nil {
dao := dao.UserDao{
DB: dao.GetDbConnexion(),
}
userService = service.UserService{
Dao: &dao,
}
}
}
//UserResource gestion des ressources http pour les utilisateurs
type UserResource struct{}
// RegisterTo : enregistrement des ws
func (u UserResource) RegisterTo() *restful.WebService {
ws := new(restful.WebService)
ws.Path("/user").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("").To(u.GetAllUser).Filter(jwtFilter))
ws.Route(ws.GET("/{id}").To(u.SearchUserByID).Filter(jwtFilter))
ws.Route(ws.POST("").To(u.CreateUser).Filter(jwtFilter))
ws.Route(ws.POST("/authenticate").To(u.UserAuthenticate))
ws.Route(ws.PUT("").To(u.UpdateUser).Filter(jwtFilter))
ws.Route(ws.DELETE("/{id}").To(u.DeleteUserID).Filter(jwtFilter))
ws.Route(ws.DELETE("/logout").To(u.DeleteUserID).Filter(jwtFilter))
return ws
}
// GetAllUser : service qui retourne la liste complète des utilisateurs
func (u UserResource) GetAllUser(request *restful.Request, response *restful.Response) {
if users, e := userService.Read(); e != nil {
response.WriteError(http.StatusBadRequest, e)
} else {
response.WriteEntity(users)
}
}
//SearchUserByID :tous est dans le nom
func (u UserResource) SearchUserByID(request *restful.Request, response *restful.Response) {
stringID := request.PathParameter("id")
ID, e := strconv.Atoi(stringID)
if e != nil {
response.WriteErrorString(http.StatusBadRequest, "Paramètre id obligatoire")
} else {
user, err := userService.Search(int64(ID))
if err != nil {
// FIXME meilleur Message
log.Println("Erreur sur le select SQL ", err)
response.WriteError(http.StatusBadRequest, err)
} else {
if user.UserID == 0 {
response.WriteErrorString(http.StatusBadRequest, "Unknown User for ID ")
} else {
response.WriteEntity(user)
}
}
}
}
// CreateUser : Réponse sur requete POST a /user avec l'utilisateur en JSON dans le body
func (u UserResource) CreateUser(request *restful.Request, response *restful.Response) {
user := new(model.User)
err := request.ReadEntity(&user)
if err != nil {
response.WriteError(http.StatusBadRequest, err)
} else {
if err := userService.Create(user); err != nil {
response.WriteError(http.StatusInternalServerError, err)
} else {
response.WriteEntity(user)
}
}
}
// UpdateUser : Mise a jour d'un utilisateur
func (u UserResource) UpdateUser(request *restful.Request, response *restful.Response) {
user := new(model.User)
err := request.ReadEntity(&user)
if err != nil {
response.WriteError(http.StatusBadRequest, err)
} else {
if err := userService.Update(user); err != nil {
response.WriteError(http.StatusInternalServerError, err)
} else {
response.WriteHeader(http.StatusNoContent)
}
}
}
// DeleteUserID : reponse http à la demande de suppression d'un utilisateur a partir de son ID
func (u UserResource) DeleteUserID(request *restful.Request, response *restful.Response) {
strID := request.PathParameter("id")
if strID == "" {
response.WriteErrorString(http.StatusBadRequest, "Paramètre ID obligatoire non vide")
} else {
ID, errConv := strconv.Atoi(strID)
if errConv != nil {
response.WriteError(http.StatusBadRequest, errConv)
} else {
if err := userService.Delete(&model.User{UserID: int64(ID)}); err != nil {
msg := "Suppresion du user d'id `" + string(ID) + "` impossible. \n" + err.Error()
response.WriteErrorString(http.StatusInternalServerError, msg)
} else {
response.WriteHeader(http.StatusNoContent)
}
}
}
}
//UserAuthenticate : authentification de l'utilisateur, un utilisateur est en paylaod de la requête
func (u UserResource) UserAuthenticate(request *restful.Request, response *restful.Response) {
user := new(model.User)
err := request.ReadEntity(&user)
if err != nil {
response.WriteError(http.StatusBadRequest, err)
} else {
if isEmptyString(user.Pwd) {
response.WriteErrorString(http.StatusBadRequest, "Information de connexion (utilisateur ou / ou mot de passe) manquante(s)")
} else {
var aerr error
var retour model.User
if !isEmptyString(user.Email) {
log.Printf("Recherche par email : %v\n", user.Email)
retour, aerr = userService.UserAuthenticateByEMail(user.Email, user.Pwd)
} else if !isEmptyString(user.Pseudo) {
log.Printf("Recherche par pseudo : %v %v\n", user.Email, user.Pwd)
retour, aerr = userService.UserAuthenticate(user.Pseudo, user.Pwd)
} else {
log.Printf("Fail y a ni mail ni pseudo %v\n", user)
aerr = fmt.Errorf("Information de connexion (utilisateur ou / ou mot de passe) manquante(s)")
}
if aerr != nil {
response.WriteError(http.StatusBadRequest, aerr)
} else {
if retour.UserID > 0 {
// suppression du mot de passe de l'objet que l'on renvoit au client.
retour.Pwd = ""
addJWTtokenToResponse(retour, response)
response.WriteEntity(retour)
} else {
response.WriteErrorString(http.StatusUnauthorized, "Erreur d'authentification, utilisateur inconnu ou mot de passe erroné")
}
}
}
}
}
// UserLogout : traitement de la requête http DELTE sur /user/logout. Il s'agit de la déconnexion de l'utilisateur
func (u UserResource) UserLogout(request *restful.Request, response *restful.Response) {
// en cas de sauvegarde de l'utilisateur connecté en base il faut le supprimer
// suppression du cookie jwt
http.SetCookie(response.ResponseWriter, &http.Cookie{
Name: "jwt",
Value: "",
Path: "/",
Expires: time.Now().Add(-20 * time.Minute),
})
response.AddHeader("Authorization", "")
}
// cette fonction ne fonctionne pas, comment tester correctement qu'une chaine de caractère est vide ????
func isEmptyString(s string) (retour bool) {
retour = true
// TODO : voir le fonctionnement du trim en go !
if s != "" {
retour = false
}
return
}
|
package mirror
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"net/http"
"os"
"path"
"path/filepath"
"time"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/remote"
"k8s.io/klog/v2"
)
const mappingFile = "mapping.txt"
func getRemoteOpts(ctx context.Context, insecure bool) []remote.Option {
return []remote.Option{
remote.WithAuthFromKeychain(authn.DefaultKeychain),
remote.WithTransport(createRT(insecure)),
remote.WithContext(ctx),
}
}
func getCraneOpts(ctx context.Context, insecure bool) []crane.Option {
opts := []crane.Option{
crane.WithAuthFromKeychain(authn.DefaultKeychain),
crane.WithTransport(createRT(insecure)),
crane.WithContext(ctx),
}
if insecure {
opts = append(opts, crane.Insecure)
}
return opts
}
func getNameOpts(insecure bool) (options []name.Option) {
if insecure {
options = append(options, name.Insecure)
}
return options
}
func createRT(insecure bool) http.RoundTripper {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
// By default, we wrap the transport in retries, so reduce the
// default dial timeout to 5s to avoid 5x 30s of connection
// timeouts when doing the "ping" on certain http registries.
Timeout: 5 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: insecure,
MinVersion: tls.VersionTLS12,
},
}
}
func (o *MirrorOptions) createResultsDir() (resultsDir string, err error) {
resultsDir = filepath.Join(
o.Dir,
fmt.Sprintf("results-%v", time.Now().Unix()),
)
if err := os.MkdirAll(resultsDir, os.ModePerm); err != nil {
return resultsDir, err
}
return resultsDir, nil
}
func (o *MirrorOptions) newMetadataImage(uid string) string {
repo := path.Join(o.ToMirror, o.UserNamespace, "oc-mirror")
return fmt.Sprintf("%s:%s", repo, uid)
}
func getTLSConfig() (*tls.Config, error) {
certPool, err := x509.SystemCertPool()
if err != nil {
return nil, err
}
config := &tls.Config{
RootCAs: certPool,
MinVersion: tls.VersionTLS12,
}
return config, nil
}
func (o *MirrorOptions) checkErr(err error, acceptableErr func(error) bool, logMessage func(error) string) error {
if err == nil {
return nil
}
var skip, skipAllTypes bool
if acceptableErr != nil {
skip = acceptableErr(err)
} else {
skipAllTypes = true
}
message := err.Error()
if logMessage != nil {
message = logMessage(err)
}
// Instead of returning an error, just log it.
if o.ContinueOnError && (skip || skipAllTypes) {
klog.Errorf("error: %v", message)
o.continuedOnError = true
} else {
return fmt.Errorf("%v", message)
}
return nil
}
|
package array
import (
"fmt"
"github.com/project-flogo/core/data"
"github.com/project-flogo/core/data/coerce"
"github.com/project-flogo/core/data/expression/function"
"github.com/project-flogo/core/support/log"
)
type sumFunc struct {
}
func init() {
function.Register(&sumFunc{})
}
func (a *sumFunc) Name() string {
return "sum"
}
func (sumFunc) Sig() (paramTypes []data.Type, isVariadic bool) {
return []data.Type{data.TypeAny}, false
}
func (sumFunc) Eval(params ...interface{}) (interface{}, error) {
arr := params[0]
log.RootLogger().Debugf("Start array sum function with parameters %+v", arr)
if arr == nil {
//Do nothing
return 0, nil
}
newArray, err := coerce.ToArray(arr)
if err != nil {
return nil, fmt.Errorf("array.sun function argument must be array")
}
sum := float64(0)
for _, v := range newArray {
num, err := coerce.ToFloat64(v)
if err != nil {
return nil, fmt.Errorf("array element must be number for array.sum function")
}
sum = sum + num
}
log.RootLogger().Debugf("array sum function done, final result %+v", sum)
return sum, nil
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package network
import (
"context"
"time"
"chromiumos/tast/common/shillconst"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/bundles/cros/network/veth"
"chromiumos/tast/local/shill"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ShillVeth,
Desc: "Verifies that a test veth pair creates a Device and Service in Shill",
Contacts: []string{
"stevenjb@google.com",
"cros-network-health-team@google.com",
},
Attr: []string{"group:mainline", "informational"},
Fixture: "shillReset",
Timeout: 5 * time.Minute,
})
}
// ShillVeth sets up a test virtual Ethernet pair and ensures that a corresponding
// Shill Device and Service are created.
// Note: This configuration is only used in tests and the veth pair will not by default be connected.
// This also ensures that changes to the veth Device Priority do not affect the Ethernet device.
func ShillVeth(ctx context.Context, s *testing.State) {
m, err := shill.NewManager(ctx)
if err != nil {
s.Fatal("Failed creating shill manager proxy")
}
var ethProperties = map[string]interface{}{
shillconst.ServicePropertyType: shillconst.TypeEthernet,
shillconst.ServicePropertyIsConnected: true,
}
// Check whether Ethernet is connected.
s.Log("Waiting for initial Ethernet Service")
ethService, err := m.WaitForServiceProperties(ctx, ethProperties, 5*time.Second)
if err != nil {
s.Log("No Ethernet Service: ", err)
}
// Set up a test profile.
restarted := false
popFunc, err := m.PushTestProfile(ctx)
if err != nil {
s.Fatal("Failed to push test profile: ", err)
}
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
defer func() {
if !restarted {
popFunc()
} else {
m.RemoveTestProfile(cleanupCtx)
}
}()
// Prepare virtual ethernet link.
const (
// Note: Shill does not manage interfaces with names prefixed with 'veth',
// so use 'test' as a prefix for both of these.
vethIface = "test_veth"
peerIface = "test_peer"
)
vEth, err := veth.NewPair(ctx, vethIface, peerIface)
if err != nil {
s.Fatal("Failed to create veth pair")
} else {
defer func() {
if e := vEth.Delete(cleanupCtx); e != nil {
testing.ContextLog(cleanupCtx, "Failed to cleanup veth: ", e)
}
}()
}
d, err := m.WaitForDeviceByName(ctx, vEth.Iface.Name, 3*time.Second)
if err != nil {
s.Fatal("Failed to find veth device managed by Shill")
}
service, err := d.WaitForSelectedService(ctx, shillconst.DefaultTimeout)
if err != nil {
s.Fatal("Failed to get Service: ", err)
}
serviceProps, err := service.GetProperties(ctx)
if err != nil {
s.Fatal("Failed to get Service properties: ", err)
}
state, err := serviceProps.GetString(shillconst.ServicePropertyState)
if err != nil {
s.Error("Failed to get Service.State: ", err)
} else {
s.Log("Service.State: ", state)
}
initialPri, err := serviceProps.GetInt32(shillconst.ServicePropertyPriority)
if err != nil {
s.Fatal("Failed to get Service.Priority: ", err)
}
// Prioritize the veth Service. Note: This does not trigger a connect.
// The priority should be persisted to the temporary profile, not the default profile.
if err = service.SetProperty(ctx, shillconst.ServicePropertyPriority, initialPri+1); err != nil {
s.Fatal("Failed to set Priority: ", err)
}
if ethService == nil {
s.Log("No primary Ethernet service, exiting without testing Shill restart")
return
}
// Restart Shill which will ensure that any properties written to the default profile are saved.
// Note: that should not include the Priority set above.
// On restart Shill will create a Device for the built-in Ethernet with properties from the default profile.
s.Log("Restarting Shill")
if err := upstart.RestartJob(ctx, "shill"); err != nil {
s.Fatal("Failed starting Shill: ", err)
}
restarted = true
// Verify that Ethernet becomes connected.
s.Log("Waiting for Ethernet Service after Shill restart")
ethService, err = m.WaitForServiceProperties(ctx, ethProperties, 60*time.Second)
if err != nil {
s.Fatal("Failed to get Ethernet Service after Shill restart: ", err)
}
// Verify that Ethernet has the initial Priority, not the one set above.
ethServiceProps, err := ethService.GetProperties(ctx)
if err != nil {
s.Fatal("Failed to get Ethernet Service properties: ", err)
}
priority, err := ethServiceProps.GetInt32(shillconst.ServicePropertyPriority)
if err != nil {
s.Fatal("Failed to get Ethernet Service.Priority: ", err)
}
if priority != initialPri {
s.Fatalf("Unexpected Ethernet Service.Priority: %v, %v", priority, err)
}
}
|
package main
import "container/heap"
import "strconv"
// Leetcode 230. (medium)
func kthSmallest(root *TreeNode, k int) int {
_, res := recursiveKthSmallest(root, 0, k, -1)
return res
}
func recursiveKthSmallest(root *TreeNode, i, k, res int) (int, int) {
if root == nil || i >= k {
return i, res
}
i, res = recursiveKthSmallest(root.Left, i, k, res)
i++
if i == k {
return i, root.Val
}
i, res = recursiveKthSmallest(root.Right, i, k, res)
return i, res
}
// Leetcode 5403. (hard)
func kthSmallest2(mat [][]int, k int) int {
var h KthSmallestHeap
heap.Init(&h)
sum := 0
for i := range mat {
sum += mat[i][0]
}
idx := make([]int, len(mat))
heap.Push(&h, KthSmallestPair{sum, idx})
memo := make(map[string]bool)
for i := 0; i < k; i++ {
p := heap.Pop(&h).(KthSmallestPair)
sum, idx = p.sum, p.idx
for j := range idx {
if idx[j] == len(mat[j])-1 {
continue
}
tmpIdx := make([]int, len(idx))
copy(tmpIdx, idx)
tmpIdx[j]++
str := toString(tmpIdx)
if _, ok := memo[str]; !ok {
tmpSum := sum - mat[j][idx[j]] + mat[j][idx[j]+1]
heap.Push(&h, KthSmallestPair{tmpSum, tmpIdx})
memo[str] = true
}
}
}
return sum
}
type KthSmallestPair struct {
sum int
idx []int
}
type KthSmallestHeap []KthSmallestPair
func (h KthSmallestHeap) Len() int {
return len(h)
}
func (h KthSmallestHeap) Less(i, j int) bool {
return h[i].sum < h[j].sum
}
func (h KthSmallestHeap) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h *KthSmallestHeap) Push(x interface{}) {
*h = append(*h, x.(KthSmallestPair))
}
func (h *KthSmallestHeap) Pop() interface{} {
tmp := (*h)[len(*h)-1]
(*h) = (*h)[:len(*h)-1]
return tmp
}
func toString(arr []int) string {
res := "["
for i := 0; i < len(arr); i++ {
res += strconv.Itoa(arr[i])
if i != len(arr)-1 {
res += ", "
}
}
res += "]"
return res
}
|
package msfapi
import (
"bytes"
"errors"
"fmt"
"gopkg.in/vmihailenco/msgpack.v2"
"io/ioutil"
"net/http"
)
type API struct {
Token string
URL string
}
func New(url string) *API {
// TODO ensure url responds before continuing
api := new(API)
api.URL = url
return api
}
func (api *API) ensureToken() error {
if api.Token == "" {
return errors.New("Token is empty for some reason")
}
return nil
}
func (api *API) request(request, response interface{}) error {
packedBytes, err := msgpack.Marshal(request)
if err != nil {
return errors.New(fmt.Sprintf("problem with marshaling:\n\t%v\n", err))
}
responseReader := bytes.NewReader(packedBytes)
resp, err := http.Post(api.URL, "binary/message-pack", responseReader)
if err != nil {
return errors.New(fmt.Sprintf("problem with posting:\n\t%v\n", err))
}
defer resp.Body.Close()
receiveBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return errors.New(fmt.Sprintf("problem with ReadAll:\n\t%v\n", err))
}
err = msgpack.Unmarshal(receiveBytes, response)
if err != nil {
return errors.New(fmt.Sprintf("problem unmarshaling %v:\n\t%v", receiveBytes, err))
}
return nil
}
func int64ify(n interface{}) int64 {
switch n := n.(type) {
case int:
return int64(n)
case int8:
return int64(n)
case int16:
return int64(n)
case int32:
return int64(n)
case int64:
return int64(n)
case uint:
return int64(n)
case uintptr:
return int64(n)
case uint8:
return int64(n)
case uint16:
return int64(n)
case uint32:
return int64(n)
case uint64:
return int64(n)
}
return int64(0)
}
|
package LeetCode
func Code83() {
l1 := InitSingleList([]int{1, 1, 2, 2})
PrintSingleList(deleteDuplicates1(l1))
}
/**
给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。
示例 1:
输入: 1->1->2
输出: 1->2
示例 2:
输入: 1->1->2->3->3
输出: 1->2->3
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func deleteDuplicates1(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return head
}
pre := &ListNode{}
pre.Next = head
p, q := pre, head
for q != nil {
if q.Next != nil && q.Val == q.Next.Val {
p.Next = q.Next
} else {
p = q
}
q = q.Next
}
return pre.Next
}
|
package validator
import (
"encoding/json"
"fmt"
"reflect"
"strconv"
"strings"
)
// 1. int, bool 类型声明的时候必须用指针
// 2. 如果是嵌入对象,那么必须是required,不然会panic
// 4. 类似链表嵌套
// set default value in array/slice is not support now
func ValidateJson(in []byte, v interface{}) error {
err := json.Unmarshal(in, v)
if err != nil {
return fmt.Errorf("Invalid request: malformed %s", err)
}
err = ValidateParameters(v)
if err != nil {
return err
}
return nil
}
func ValidateParameters(in interface{}) (err error) {
defer func() {
if err := recover(); err != nil {
fmt.Println(err)
}
}()
//Indirect returns the value that v points to. If v is a nil pointer,
//Indirect returns a zero Value. If v is not a pointer, Indirect returns v.
//Using this we don't need to care about if the interface is point to v or *v -gs
v := reflect.ValueOf(in).Elem()
t := reflect.TypeOf(in).Elem()
for i := 0; i < t.NumField(); i++ {
sf := t.Field(i)
sv := v.FieldByName(sf.Name)
if isRequired, ok := sf.Tag.Lookup("is_required"); ok && isRequired == "true" {
// fmt.Printf("%s\n", sf.Type.Kind())
switch sf.Type.Kind() {
case reflect.String:
if sv.String() == "" {
return fmt.Errorf("Invalid request: missing %s", sf.Name)
}
case reflect.Int, reflect.Int64:
//FIXME: 0 is meaningful here
if sv.Int() == 0 {
return fmt.Errorf("Invalid request: missing %s", sf.Name)
}
case reflect.Struct:
err = ValidateParameters(sv.Interface())
if err != nil {
return err
}
case reflect.Ptr:
if sv.IsNil() {
return fmt.Errorf("Invalid request: missing %s", sf.Name)
}
if sv.Elem().Kind() == reflect.Struct {
// fmt.Printf("%s:%s\n", sv, reflect.ValueOf(iv).Type())
err = ValidateParameters(sv.Interface())
if err != nil {
return err
}
}
case reflect.Slice, reflect.Array:
if sv.Len() == 0 {
return fmt.Errorf("Invalid request: missing %s", sf.Name)
}
for i := 0; i < sv.Len(); i++ {
err = ValidateParameters(sv.Index(i).Interface())
if err != nil {
return err
}
}
}
} else {
switch sf.Type.Kind() {
case reflect.String:
if defaultV, ok := sf.Tag.Lookup("default"); ok && sv.String() == "" {
sv.SetString(defaultV)
}
case reflect.Int, reflect.Int64:
if defaultV, ok := sf.Tag.Lookup("default"); ok && sv.Int() == 0 {
tempV, _ := strconv.Atoi(defaultV)
sv.SetInt(int64(tempV))
}
case reflect.Struct:
// fmt.Printf("TEST %s\n",)
err = ValidateParameters(sv.Interface())
if err != nil {
return err
}
case reflect.Ptr:
// fmt.Printf("%s\n", sv)
tx := reflect.TypeOf(sv.Interface())
// fmt.Printf("%s\n", tx.Elem().Kind())
switch tx.Elem().Kind() {
case reflect.Bool:
if defaultV, ok := sf.Tag.Lookup("default"); ok && sv.IsNil() {
tempV, _ := strconv.ParseBool(defaultV)
newV := reflect.ValueOf(&tempV)
sv.Set(newV)
}
case reflect.Int, reflect.Int64:
if defaultV, ok := sf.Tag.Lookup("default"); ok && sv.IsNil() {
tempV, _ := strconv.Atoi(defaultV)
fmt.Printf("%s\n", sv)
newV := reflect.ValueOf(&tempV)
sv.Set(newV)
}
case reflect.Struct:
if sv.IsNil() {
newStruct := reflect.New(tx.Elem())
sv.Set(newStruct)
}
// fmt.Printf("%s:%s\n", sv, reflect.ValueOf(sv.Interface()).Type())
err = ValidateParameters(sv.Interface())
if err != nil {
return err
}
}
}
}
if maxLen, ok := sf.Tag.Lookup("max_len"); ok {
maxLenInt, _ := strconv.Atoi(maxLen)
switch sf.Type.Kind() {
case reflect.String:
s := sv.String()
if len(s) > maxLenInt {
return fmt.Errorf("%s exceed the max len %d", sf.Name, maxLenInt)
}
case reflect.Slice, reflect.Array:
if sv.Len() > maxLenInt {
return fmt.Errorf("%s exceed the max len %d", sf.Name, maxLenInt)
}
}
}
if max, ok := sf.Tag.Lookup("max"); ok {
switch sf.Type.Kind() {
case reflect.Int, reflect.Int64:
maxInt, _ := strconv.Atoi(max)
s := sv.Int()
if s > int64(maxInt) {
return fmt.Errorf("%s exceed the max %d", sf.Name, maxInt)
}
case reflect.Ptr:
tx := reflect.TypeOf(sv.Interface())
switch tx.Elem().Kind() {
case reflect.Int, reflect.Int64:
maxInt, _ := strconv.Atoi(max)
s := sv.Elem().Int()
if s > int64(maxInt) {
return fmt.Errorf("%s exceed the max %d", sf.Name, maxInt)
}
}
}
}
//FIXME: need to check if it's a empty value
if min, ok := sf.Tag.Lookup("min"); ok {
switch sf.Type.Kind() {
case reflect.Int, reflect.Int64:
minInt, _ := strconv.Atoi(min)
s := sv.Int()
if s != 0 && s < int64(minInt) {
return fmt.Errorf("%s exceed the min %d", sf.Name, minInt)
}
case reflect.Ptr:
tx := reflect.TypeOf(sv.Interface())
switch tx.Elem().Kind() {
case reflect.Int, reflect.Int64:
minInt, _ := strconv.Atoi(min)
s := sv.Elem().Int()
if s < int64(minInt) {
return fmt.Errorf("%s exceed the min %d", sf.Name, minInt)
}
}
}
}
if enum, ok := sf.Tag.Lookup("enum"); ok {
if sf.Type.Kind() == reflect.String {
enumList := strings.Split(enum, ",")
s := sv.String()
isIn := false
for _, v := range enumList {
if s == v {
isIn = true
}
}
if !isIn {
return fmt.Errorf("%s contain a unsupport type '%s'", sf.Name, s)
}
}
}
}
return
}
|
package client
import (
"bytes"
"io"
"math/rand"
"reflect"
"sync"
"testing"
"time"
)
func TestOffsetReaderRead(t *testing.T) {
buf := []byte("testData")
reader := &offsetReader{buf: bytes.NewReader(buf)}
tempBuf := make([]byte, len(buf))
n, err := reader.Read(tempBuf)
if err != nil {
t.Fatal(err)
}
if n != len(buf) {
t.Errorf("lengths don't match. expected: %d, actual: %d",
n, len(buf))
}
if !reflect.DeepEqual(buf, tempBuf) {
t.Errorf("bufs not equal. expected: %v, actual: %v", buf, tempBuf)
}
}
func TestOffsetReaderSeek(t *testing.T) {
buf := []byte("testData")
reader := newOffsetReader(bytes.NewReader(buf), 0)
n, err := reader.Seek(0, 2)
if err != nil {
t.Fatal(err)
}
if int64(len(buf)) != n {
t.Error("seek does not do so on offset")
}
}
func TestOffsetReaderClose(t *testing.T) {
buf := []byte("testData")
reader := &offsetReader{buf: bytes.NewReader(buf)}
err := reader.Close()
if err != nil {
t.Fatal(err)
}
tempBuf := make([]byte, len(buf))
n, err := reader.Read(tempBuf)
if n != 0 {
t.Error("bytes read should be 0 for EOF")
}
if !reflect.DeepEqual(err, io.EOF) {
t.Error("EOF error is not seen")
}
}
func TestOffsetReaderRace(t *testing.T) {
wg := sync.WaitGroup{}
f := func(reader *offsetReader) {
defer wg.Done()
var err error
buf := make([]byte, 1)
_, err = reader.Read(buf)
for err != io.EOF {
_, err = reader.Read(buf)
}
}
closeFn := func(reader *offsetReader) {
defer wg.Done()
time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond)
reader.Close()
}
for i := 0; i < 50; i++ {
reader := &offsetReader{buf: bytes.NewReader(make([]byte, 1024*1024))}
wg.Add(1)
go f(reader)
wg.Add(1)
go closeFn(reader)
}
wg.Wait()
}
func BenchmarkOffsetReader(b *testing.B) {
bufSize := 1024 * 1024 * 100
buf := make([]byte, bufSize)
reader := &offsetReader{buf: bytes.NewReader(buf)}
tempBuf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
reader.Read(tempBuf)
}
}
func BenchmarkBytesReader(b *testing.B) {
bufSize := 1024 * 1024 * 100
buf := make([]byte, bufSize)
reader := bytes.NewReader(buf)
tempBuf := make([]byte, 1024)
for i := 0; i < b.N; i++ {
reader.Read(tempBuf)
}
}
|
package miner
import (
"bytes"
"encoding/binary"
"errors"
"time"
"unsafe"
"github.com/NebulousLabs/Sia/crypto"
"github.com/NebulousLabs/Sia/modules"
"github.com/NebulousLabs/Sia/types"
)
// increaseAttempts is the miner's way of guaging it's own hashrate. After it's
// made 100 attempts to find a block, it calculates a hashrate based on how
// much time has passed. The number of attempts in progress is set to 0
// whenever mining starts or stops, which prevents weird low values from
// cropping up.
func (m *Miner) increaseAttempts() {
m.attempts++
if m.attempts >= 25 { // Waiting for 25 attempts minimizes hashrate variance.
m.hashRate = int64((m.attempts * iterationsPerAttempt * 1e9)) / (time.Now().UnixNano() - m.startTime.UnixNano())
m.startTime = time.Now()
m.attempts = 0
}
}
// threadedMine starts a gothread that does CPU mining. threadedMine is the
// only function that should be setting the mining flag to true.
func (m *Miner) threadedMine() {
// There should not be another thread mining, and mining should be enabled.
lockID := m.mu.Lock()
if m.mining || !m.miningOn {
m.mu.Unlock(lockID)
return
}
m.mining = true
m.mu.Unlock(lockID)
// Solve blocks repeatedly.
for {
// Kill the thread if mining has been turned off.
lockID := m.mu.Lock()
if !m.miningOn {
m.mining = false
m.mu.Unlock(lockID)
return
}
// Grab a block and try to solve it.
bfw, target := m.blockForWork()
m.increaseAttempts()
m.mu.Unlock(lockID)
b, solved := m.SolveBlock(bfw, target)
if solved {
err := m.SubmitBlock(b)
if err != nil {
m.log.Println("ERROR: An error occurred while cpu mining:", err)
}
}
}
}
// AddBlock adds a block to the consensus set.
func (m *Miner) AddBlock() (types.Block, error) {
block, err := m.FindBlock()
if err != nil {
return types.Block{}, err
}
err = m.cs.AcceptBlock(block)
if err != nil {
return types.Block{}, err
}
return block, nil
}
// CPUHashrate returns the cpu hashrate.
func (m *Miner) CPUHashrate() int {
lockID := m.mu.Lock()
defer m.mu.Unlock(lockID)
return int(m.hashRate)
}
// CPUMining indicates whether a cpu miner is running.
func (m *Miner) CPUMining() bool {
lockID := m.mu.Lock()
defer m.mu.Unlock(lockID)
return m.mining
}
// FindBlock finds at most one block that extends the current blockchain.
func (m *Miner) FindBlock() (types.Block, error) {
lockID := m.mu.Lock()
if !m.wallet.Unlocked() {
return types.Block{}, modules.ErrLockedWallet
}
err := m.checkAddress()
if err != nil {
return types.Block{}, err
}
m.mu.Unlock(lockID)
// Get a block for work.
lockID = m.mu.Lock()
bfw, target := m.blockForWork()
m.mu.Unlock(lockID)
block, ok := m.SolveBlock(bfw, target)
if !ok {
return types.Block{}, errors.New("could not solve block using limited hashing power")
}
return block, nil
}
// SolveBlock takes a block, target, and number of iterations as input and
// tries to find a block that meets the target. This function can take a long
// time to complete, and should not be called with a lock.
func (m *Miner) SolveBlock(b types.Block, target types.Target) (types.Block, bool) {
// Assemble the header.
merkleRoot := b.MerkleRoot()
header := make([]byte, 80)
copy(header, b.ParentID[:])
binary.LittleEndian.PutUint64(header[40:48], uint64(b.Timestamp))
copy(header[48:], merkleRoot[:])
nonce := (*uint64)(unsafe.Pointer(&header[32]))
for i := 0; i < iterationsPerAttempt; i++ {
id := crypto.HashBytes(header)
if bytes.Compare(target[:], id[:]) >= 0 {
copy(b.Nonce[:], header[32:40])
return b, true
}
*nonce++
}
return b, false
}
// StartMining will spawn a thread to begin mining. The thread will only start
// mining if there is not another thread mining yet.
func (m *Miner) StartCPUMining() {
lockID := m.mu.Lock()
defer m.mu.Unlock(lockID)
m.miningOn = true
go m.threadedMine()
}
// StopMining sets desiredThreads to 0, a value which is polled by mining
// threads. When set to 0, the mining threads will all cease mining.
func (m *Miner) StopCPUMining() {
lockID := m.mu.Lock()
defer m.mu.Unlock(lockID)
m.hashRate = 0
m.miningOn = false
}
|
package Controllers
import (
"TaibaiSupport/Models"
"encoding/json"
"fmt"
"github.com/streadway/amqp"
"log"
"os"
)
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
// 不断消费mq的消息
var RabbitmqEventReceivedChan chan Models.TaibaiClassroomEvent
var RabbitmqEventTobeSendChan chan Models.TaibaiClassroomEvent
var ExchangeName = "taibai-exchange-" // + region
var QueueName = "taibai-queue-" // + region + hostname
var ConsumerName = "taibai-consumer-" // + region + hostname
func init() {
RabbitmqEventReceivedChan = make(chan Models.TaibaiClassroomEvent, 3)
RabbitmqEventTobeSendChan = make(chan Models.TaibaiClassroomEvent, 3)
hostName,_ := os.Hostname()
rabbitmq_addr := os.Getenv("rabbitmq_addr")
rabbitmq_user := os.Getenv("rabbitmq_user")
rabbitmq_passwd := os.Getenv("rabbitmq_passwd")
classroom_region := os.Getenv("classroom_region")
ExchangeName = ExchangeName + classroom_region
QueueName = QueueName + classroom_region + "-" +hostName
ConsumerName = ConsumerName + classroom_region + "-" + hostName
amqp_link := "amqp://" + rabbitmq_user + ":" + rabbitmq_passwd + "@" + rabbitmq_addr +":5672/"
log.Println(amqp_link)
conn, err := amqp.Dial(amqp_link)
failOnError(err, "Failed to connect to RabbitMQ")
ch, err := conn.Channel()
failOnError(err, "Failed to get a channel")
err = ch.ExchangeDeclare(
ExchangeName, // name
"fanout", // type
true, // durable
false, // auto-deleted
false, // internal
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare exchange")
q, err := ch.QueueDeclare(
QueueName, // name
false, // durable
true, // delete when usused
true, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare queue")
err = ch.QueueBind(
q.Name, // queue name
"", // routing key
ExchangeName, // exchange
false,
nil,
)
failOnError(err, "Failed to bind exchange with queue")
consumer, err := ch.Consume(
QueueName, // queue
ConsumerName, // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
// 此协程将mq消息转为event存起来
go func() {
log.Println("start listen to rabbitmq")
for event := range consumer {
taibaiEvent := Models.TaibaiClassroomEvent{}
err := json.Unmarshal(event.Body, &taibaiEvent)
if err!=nil{
log.Printf("failed to Unmarshal to TaibaiEvent message: %s" , event.Body)
}else {
RabbitmqEventReceivedChan <- taibaiEvent
}
}
}()
// 此协程消费mq的event
go func(){
for event := range RabbitmqEventReceivedChan {
eventJson,_ := json.Marshal(event)
log.Println("从mq收到:", string(eventJson))
switch event.EventType {
case Models.EventType_UserOnlineStatusChangd:
HandleEventUserOnlineStatusChanged(&event)
case Models.EventType_UserVideoPositionChanged:
HandleEventUserVideoPositionChanged(&event)
case Models.EventType_1V1StateChanged:
HandleEvent1V1StateChanged(&event)
case Models.EventType_RTCSDPChanged:
HandleEventRTCSDPChanged(&event)
case Models.EventType_RTCICECandidateChanged:
HandleEventRTCICECandidateChanged(&event)
}
}
}()
// 此协程不断给mq发送消息
go func() {
for event := range RabbitmqEventTobeSendChan {
message, _ := json.Marshal(event)
err = ch.Publish(
ExchangeName, // exchange
"", // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "text/plain",
Body: message,
})
if err != nil {
fmt.Println("send message to mq error: ", err)
}
}
}()
}
|
package api
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
)
// TestFixer_GetRate to test getting the rate from fixer data.
func TestFixer_GetRate(t *testing.T) {
f := Fixer{
Base: "EUR",
Date: "2017-10-26",
Rates: map[string]float32{
"AUD": 1.5248, "BGN": 1.9558, "BRL": 3.803, "CAD": 1.5041, "CHF": 1.1678,
"CNY": 7.8003, "CZK": 25.589, "DKK": 7.4432, "GBP": 0.8901, "HKD": 9.1701,
"HRK": 7.5155, "HUF": 310.32, "IDR": 15982.0, "ILS": 4.1343, "INR": 76.23,
"JPY": 133.75, "KRW": 1320.4, "MXN": 22.368, "MYR": 4.9762, "NOK": 9.4865,
"NZD": 1.7118, "PHP": 60.939, "PLN": 4.235, "RON": 4.5983, "RUB": 67.76,
"SEK": 9.7218, "SGD": 1.601, "THB": 38.973, "TRY": 4.4338, "USD": 1.1753, "ZAR": 16.739},
}
rate, err := f.GetRate("EUR", "NOK")
if err != nil {
t.Errorf("EUR and NOK should be valid. got: %s", err.Error())
return
}
if rate != 9.4865 {
t.Errorf("Expected rate 9.4865, got %f", rate)
}
rate, err = f.GetRate("NOK", "EUR")
if err != nil {
t.Errorf("EUR and NOK should be valid. got: %s", err.Error())
return
}
if rate != 1/float32(9.4865) {
t.Errorf("Expected rate %f, got %f", 1/float32(9.4865), rate)
}
rate, err = f.GetRate("SEK", "NOK")
if err != nil {
t.Errorf("EUR and NOK should be valid. got: %s", err.Error())
return
}
if rate != float32(9.4865)*(1/float32(9.7218)) {
t.Errorf("Expected rate %f, got %f", float32(9.4865)*(1/float32(9.7218)), rate)
}
}
// handleTestGetLatest to simulate fixer API
func handleTestGetLatest(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case "GET":
json := `{"base":"EUR","date":"2017-10-26","rates":{"AUD":1.5248,"BGN":1.9558,"BRL":3.803,"CAD":1.5041,
"CHF":1.1678,"CNY":7.8003,"CZK":25.589,"DKK":7.4432,"GBP":0.8901,"HKD":9.1701,"HRK":7.5155,"HUF":310.32,
"IDR":15982.0,"ILS":4.1343,"INR":76.23,"JPY":133.75,"KRW":1320.4,"MXN":22.368,"MYR":4.9762,"NOK":9.4865,
"NZD":1.7118,"PHP":60.939,"PLN":4.235,"RON":4.5983,"RUB":67.76,"SEK":9.7218,"SGD":1.601,"THB":38.973,
"TRY":4.4338,"USD":1.1753,"ZAR":16.739}}`
fmt.Fprint(w, json)
default:
http.Error(w, "not implemented", http.StatusBadRequest)
}
}
// TestNewFixer to test getting fixer data. using a test server
func TestNewFixer(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(handleTestGetLatest))
defer ts.Close()
f, err := NewFixer(ts.URL)
if err != nil {
t.Errorf("Error getting fixer: %s", err.Error())
}
if f.Base != "EUR" {
t.Errorf("Expected base EUR, got %s", f.Base)
}
if f.Date != "2017-10-26" {
t.Errorf("Expected date 2017-10-26, got %s", f.Date)
}
if f.Rates["NOK"] != float32(9.4865) {
t.Errorf("Expected date 2017-10-26, got %f", f.Rates["NOK"])
}
}
|
package main
import (
"errors"
"fmt"
)
func main() {
f1 := func(i int) (int, error) {
if i == 100 {
return -1, errors.New("overload")
} else {
return i - 1, nil
}
}
var r, e = f1(100)
if e != nil {
fmt.Println("error:", e)
} else {
fmt.Println("success:", r)
}
r, e = f1(99)
if e != nil {
fmt.Println("error", e)
} else {
fmt.Println("success:", r)
}
f2 := func(i int) (int, error) {
if i == 100 {
return -1, &argError{i, "overload"}
} else {
return i - 1, nil
}
}
f2(99)
r, e = f2(99)
if e != nil {
fmt.Println("error", e)
} else {
fmt.Println("success:", r)
}
r, e = f2(100)
if e != nil {
fmt.Println("error", e)
} else {
fmt.Println("success:", r)
}
}
type argError struct {
code int
msg string
}
//实现 Error方法
func (e *argError) Error() string {
return fmt.Sprintf("%d - %s", e.code, e.msg)
}
|
package nebula
import (
"encoding/binary"
"errors"
"fmt"
"time"
"github.com/flynn/noise"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
"golang.org/x/net/ipv4"
"google.golang.org/protobuf/proto"
)
const (
minFwPacketLen = 4
)
func readOutsidePackets(f *Interface) udp.EncReader {
return func(
addr *udp.Addr,
out []byte,
packet []byte,
header *header.H,
fwPacket *firewall.Packet,
lhh udp.LightHouseHandlerFunc,
nb []byte,
q int,
localCache firewall.ConntrackCache,
) {
f.readOutsidePackets(addr, nil, out, packet, header, fwPacket, lhh, nb, q, localCache)
}
}
func (f *Interface) readOutsidePackets(addr *udp.Addr, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf udp.LightHouseHandlerFunc, nb []byte, q int, localCache firewall.ConntrackCache) {
err := h.Parse(packet)
if err != nil {
// TODO: best if we return this and let caller log
// TODO: Might be better to send the literal []byte("holepunch") packet and ignore that?
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
if len(packet) > 1 {
f.l.WithField("packet", packet).Infof("Error while parsing inbound packet from %s: %s", addr, err)
}
return
}
//l.Error("in packet ", header, packet[HeaderLen:])
if addr != nil {
if ip4 := addr.IP.To4(); ip4 != nil {
if ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, iputil.VpnIp(binary.BigEndian.Uint32(ip4))) {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("udpAddr", addr).Debug("Refusing to process double encrypted packet")
}
return
}
}
}
var hostinfo *HostInfo
// verify if we've seen this index before, otherwise respond to the handshake initiation
if h.Type == header.Message && h.Subtype == header.MessageRelay {
hostinfo = f.hostMap.QueryRelayIndex(h.RemoteIndex)
} else {
hostinfo = f.hostMap.QueryIndex(h.RemoteIndex)
}
var ci *ConnectionState
if hostinfo != nil {
ci = hostinfo.ConnectionState
}
switch h.Type {
case header.Message:
// TODO handleEncrypted sends directly to addr on error. Handle this in the tunneling case.
if !f.handleEncrypted(ci, addr, h) {
return
}
switch h.Subtype {
case header.MessageNone:
if !f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache) {
return
}
case header.MessageRelay:
// The entire body is sent as AD, not encrypted.
// The packet consists of a 16-byte parsed Nebula header, Associated Data-protected payload, and a trailing 16-byte AEAD signature value.
// The packet is guaranteed to be at least 16 bytes at this point, b/c it got past the h.Parse() call above. If it's
// otherwise malformed (meaning, there is no trailing 16 byte AEAD value), then this will result in at worst a 0-length slice
// which will gracefully fail in the DecryptDanger call.
signedPayload := packet[:len(packet)-hostinfo.ConnectionState.dKey.Overhead()]
signatureValue := packet[len(packet)-hostinfo.ConnectionState.dKey.Overhead():]
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, signedPayload, signatureValue, h.MessageCounter, nb)
if err != nil {
return
}
// Successfully validated the thing. Get rid of the Relay header.
signedPayload = signedPayload[header.Len:]
// Pull the Roaming parts up here, and return in all call paths.
f.handleHostRoaming(hostinfo, addr)
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
f.connectionManager.In(hostinfo.localIndexId)
f.connectionManager.RelayUsed(h.RemoteIndex)
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
if !ok {
// The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing
// its internal mapping. This should never happen.
hostinfo.logger(f.l).WithFields(logrus.Fields{"vpnIp": hostinfo.vpnIp, "remoteIndex": h.RemoteIndex}).Error("HostInfo missing remote relay index")
return
}
switch relay.Type {
case TerminalType:
// If I am the target of this relay, process the unwrapped packet
// From this recursive point, all these variables are 'burned'. We shouldn't rely on them again.
f.readOutsidePackets(nil, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache)
return
case ForwardingType:
// Find the target HostInfo relay object
targetHI, targetRelay, err := f.hostMap.QueryVpnIpRelayFor(hostinfo.vpnIp, relay.PeerIp)
if err != nil {
hostinfo.logger(f.l).WithField("relayTo", relay.PeerIp).WithError(err).Info("Failed to find target host info by ip")
return
}
// If that relay is Established, forward the payload through it
if targetRelay.State == Established {
switch targetRelay.Type {
case ForwardingType:
// Forward this packet through the relay tunnel
// Find the target HostInfo
f.SendVia(targetHI, targetRelay, signedPayload, nb, out, false)
return
case TerminalType:
hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal")
}
} else {
hostinfo.logger(f.l).WithFields(logrus.Fields{"relayTo": relay.PeerIp, "relayFrom": hostinfo.vpnIp, "targetRelayState": targetRelay.State}).Info("Unexpected target relay state")
return
}
}
}
case header.LightHouse:
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
if !f.handleEncrypted(ci, addr, h) {
return
}
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
if err != nil {
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
WithField("packet", packet).
Error("Failed to decrypt lighthouse packet")
//TODO: maybe after build 64 is out? 06/14/2018 - NB
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
return
}
lhf(addr, hostinfo.vpnIp, d)
// Fallthrough to the bottom to record incoming traffic
case header.Test:
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
if !f.handleEncrypted(ci, addr, h) {
return
}
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
if err != nil {
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
WithField("packet", packet).
Error("Failed to decrypt test packet")
//TODO: maybe after build 64 is out? 06/14/2018 - NB
//f.sendRecvError(net.Addr(addr), header.RemoteIndex)
return
}
if h.Subtype == header.TestRequest {
// This testRequest might be from TryPromoteBest, so we should roam
// to the new IP address before responding
f.handleHostRoaming(hostinfo, addr)
f.send(header.Test, header.TestReply, ci, hostinfo, d, nb, out)
}
// Fallthrough to the bottom to record incoming traffic
// Non encrypted messages below here, they should not fall through to avoid tracking incoming traffic since they
// are unauthenticated
case header.Handshake:
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
HandleIncomingHandshake(f, addr, via, packet, h, hostinfo)
return
case header.RecvError:
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
f.handleRecvError(addr, h)
return
case header.CloseTunnel:
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
if !f.handleEncrypted(ci, addr, h) {
return
}
hostinfo.logger(f.l).WithField("udpAddr", addr).
Info("Close tunnel received, tearing down.")
f.closeTunnel(hostinfo)
return
case header.Control:
if !f.handleEncrypted(ci, addr, h) {
return
}
d, err := f.decrypt(hostinfo, h.MessageCounter, out, packet, h, nb)
if err != nil {
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", addr).
WithField("packet", packet).
Error("Failed to decrypt Control packet")
return
}
m := &NebulaControl{}
err = m.Unmarshal(d)
if err != nil {
hostinfo.logger(f.l).WithError(err).Error("Failed to unmarshal control message")
break
}
f.relayManager.HandleControlMsg(hostinfo, m, f)
default:
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
hostinfo.logger(f.l).Debugf("Unexpected packet received from %s", addr)
return
}
f.handleHostRoaming(hostinfo, addr)
f.connectionManager.In(hostinfo.localIndexId)
}
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
func (f *Interface) closeTunnel(hostInfo *HostInfo) {
final := f.hostMap.DeleteHostInfo(hostInfo)
if final {
// We no longer have any tunnels with this vpn ip, clear learned lighthouse state to lower memory usage
f.lightHouse.DeleteVpnIp(hostInfo.vpnIp)
}
}
// sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote
func (f *Interface) sendCloseTunnel(h *HostInfo) {
f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu))
}
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, addr *udp.Addr) {
if addr != nil && !hostinfo.remote.Equals(addr) {
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
hostinfo.logger(f.l).WithField("newAddr", addr).Debug("lighthouse.remote_allow_list denied roaming")
return
}
if !hostinfo.lastRoam.IsZero() && addr.Equals(hostinfo.lastRoamRemote) && time.Since(hostinfo.lastRoam) < RoamingSuppressSeconds*time.Second {
if f.l.Level >= logrus.DebugLevel {
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
Debugf("Suppressing roam back to previous remote for %d seconds", RoamingSuppressSeconds)
}
return
}
hostinfo.logger(f.l).WithField("udpAddr", hostinfo.remote).WithField("newAddr", addr).
Info("Host roamed to new udp ip/port.")
hostinfo.lastRoam = time.Now()
hostinfo.lastRoamRemote = hostinfo.remote
hostinfo.SetRemote(addr)
}
}
func (f *Interface) handleEncrypted(ci *ConnectionState, addr *udp.Addr, h *header.H) bool {
// If connectionstate exists and the replay protector allows, process packet
// Else, send recv errors for 300 seconds after a restart to allow fast reconnection.
if ci == nil || !ci.window.Check(f.l, h.MessageCounter) {
if addr != nil {
f.maybeSendRecvError(addr, h.RemoteIndex)
return false
} else {
return false
}
}
return true
}
// newPacket validates and parses the interesting bits for the firewall out of the ip and sub protocol headers
func newPacket(data []byte, incoming bool, fp *firewall.Packet) error {
// Do we at least have an ipv4 header worth of data?
if len(data) < ipv4.HeaderLen {
return fmt.Errorf("packet is less than %v bytes", ipv4.HeaderLen)
}
// Is it an ipv4 packet?
if int((data[0]>>4)&0x0f) != 4 {
return fmt.Errorf("packet is not ipv4, type: %v", int((data[0]>>4)&0x0f))
}
// Adjust our start position based on the advertised ip header length
ihl := int(data[0]&0x0f) << 2
// Well formed ip header length?
if ihl < ipv4.HeaderLen {
return fmt.Errorf("packet had an invalid header length: %v", ihl)
}
// Check if this is the second or further fragment of a fragmented packet.
flagsfrags := binary.BigEndian.Uint16(data[6:8])
fp.Fragment = (flagsfrags & 0x1FFF) != 0
// Firewall handles protocol checks
fp.Protocol = data[9]
// Accounting for a variable header length, do we have enough data for our src/dst tuples?
minLen := ihl
if !fp.Fragment && fp.Protocol != firewall.ProtoICMP {
minLen += minFwPacketLen
}
if len(data) < minLen {
return fmt.Errorf("packet is less than %v bytes, ip header len: %v", minLen, ihl)
}
// Firewall packets are locally oriented
if incoming {
fp.RemoteIP = iputil.Ip2VpnIp(data[12:16])
fp.LocalIP = iputil.Ip2VpnIp(data[16:20])
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
fp.RemotePort = 0
fp.LocalPort = 0
} else {
fp.RemotePort = binary.BigEndian.Uint16(data[ihl : ihl+2])
fp.LocalPort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
}
} else {
fp.LocalIP = iputil.Ip2VpnIp(data[12:16])
fp.RemoteIP = iputil.Ip2VpnIp(data[16:20])
if fp.Fragment || fp.Protocol == firewall.ProtoICMP {
fp.RemotePort = 0
fp.LocalPort = 0
} else {
fp.LocalPort = binary.BigEndian.Uint16(data[ihl : ihl+2])
fp.RemotePort = binary.BigEndian.Uint16(data[ihl+2 : ihl+4])
}
}
return nil
}
func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []byte, h *header.H, nb []byte) ([]byte, error) {
var err error
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], mc, nb)
if err != nil {
return nil, err
}
if !hostinfo.ConnectionState.window.Update(f.l, mc) {
hostinfo.logger(f.l).WithField("header", h).
Debugln("dropping out of window packet")
return nil, errors.New("out of window packet")
}
return out, nil
}
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) bool {
var err error
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
if err != nil {
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
//TODO: maybe after build 64 is out? 06/14/2018 - NB
//f.sendRecvError(hostinfo.remote, header.RemoteIndex)
return false
}
err = newPacket(out, true, fwPacket)
if err != nil {
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
Warnf("Error while validating inbound packet")
return false
}
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
Debugln("dropping out of window packet")
return false
}
dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.caPool, localCache)
if dropReason != nil {
f.rejectOutside(out, hostinfo.ConnectionState, hostinfo, nb, out, q)
if f.l.Level >= logrus.DebugLevel {
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
WithField("reason", dropReason).
Debugln("dropping inbound packet")
}
return false
}
f.connectionManager.In(hostinfo.localIndexId)
_, err = f.readers[q].Write(out)
if err != nil {
f.l.WithError(err).Error("Failed to write to tun")
}
return true
}
func (f *Interface) maybeSendRecvError(endpoint *udp.Addr, index uint32) {
if f.sendRecvErrorConfig.ShouldSendRecvError(endpoint.IP) {
f.sendRecvError(endpoint, index)
}
}
func (f *Interface) sendRecvError(endpoint *udp.Addr, index uint32) {
f.messageMetrics.Tx(header.RecvError, 0, 1)
//TODO: this should be a signed message so we can trust that we should drop the index
b := header.Encode(make([]byte, header.Len), header.Version, header.RecvError, 0, index, 0)
f.outside.WriteTo(b, endpoint)
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("index", index).
WithField("udpAddr", endpoint).
Debug("Recv error sent")
}
}
func (f *Interface) handleRecvError(addr *udp.Addr, h *header.H) {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("index", h.RemoteIndex).
WithField("udpAddr", addr).
Debug("Recv error received")
}
hostinfo := f.hostMap.QueryReverseIndex(h.RemoteIndex)
if hostinfo == nil {
f.l.WithField("remoteIndex", h.RemoteIndex).Debugln("Did not find remote index in main hostmap")
return
}
hostinfo.Lock()
defer hostinfo.Unlock()
if !hostinfo.RecvErrorExceeded() {
return
}
if hostinfo.remote != nil && !hostinfo.remote.Equals(addr) {
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
return
}
f.closeTunnel(hostinfo)
// We also delete it from pending hostmap to allow for fast reconnect.
f.handshakeManager.DeleteHostInfo(hostinfo)
}
/*
func (f *Interface) sendMeta(ci *ConnectionState, endpoint *net.UDPAddr, meta *NebulaMeta) {
if ci.eKey != nil {
//TODO: log error?
return
}
msg, err := proto.Marshal(meta)
if err != nil {
l.Debugln("failed to encode header")
}
c := ci.messageCounter
b := HeaderEncode(nil, Version, uint8(metadata), 0, hostinfo.remoteIndexId, c)
ci.messageCounter++
msg := ci.eKey.EncryptDanger(b, nil, msg, c)
//msg := ci.eKey.EncryptDanger(b, nil, []byte(fmt.Sprintf("%d", counter)), c)
f.outside.WriteTo(msg, endpoint)
}
*/
func RecombineCertAndValidate(h *noise.HandshakeState, rawCertBytes []byte, caPool *cert.NebulaCAPool) (*cert.NebulaCertificate, error) {
pk := h.PeerStatic()
if pk == nil {
return nil, errors.New("no peer static key was present")
}
if rawCertBytes == nil {
return nil, errors.New("provided payload was empty")
}
r := &cert.RawNebulaCertificate{}
err := proto.Unmarshal(rawCertBytes, r)
if err != nil {
return nil, fmt.Errorf("error unmarshaling cert: %s", err)
}
// If the Details are nil, just exit to avoid crashing
if r.Details == nil {
return nil, fmt.Errorf("certificate did not contain any details")
}
r.Details.PublicKey = pk
recombined, err := proto.Marshal(r)
if err != nil {
return nil, fmt.Errorf("error while recombining certificate: %s", err)
}
c, _ := cert.UnmarshalNebulaCertificate(recombined)
isValid, err := c.Verify(time.Now(), caPool)
if err != nil {
return c, fmt.Errorf("certificate validation failed: %s", err)
} else if !isValid {
// This case should never happen but here's to defensive programming!
return c, errors.New("certificate validation failed but did not return an error")
}
return c, nil
}
|
// Copyright © 2017 stripe-proxy authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"encoding/json"
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/stripe/stripe-go"
)
type ErrorResponse struct {
StripeError stripe.Error `json:"error"`
}
// These routes will match in order, so the ResourceAll route is a fallback and
// transfer reversals will match before transfers.
var resourceRoutes = []struct {
route string
sr StripeResource
}{
// Payment methods
{"/v1/customers/{cust_id}/sources", ResourceSource},
// Core resources
{"/v1/balance", ResourceBalance},
{"/v1/charges", ResourceCharges},
{"/v1/customers", ResourceCustomers},
{"/v1/disputes", ResourceDisputes},
{"/v1/events", ResourceEvents},
{"/v1/files", ResourceFileUploads},
{"/v1/refunds", ResourceRefunds},
{"/v1/tokens", ResourceTokens},
{"/v1/transfers/{transfer_id}/reversals", ResourceTransferReversals},
{"/v1/transfers", ResourceTransfers},
// Connect resources
{"/v1/accounts", ResourceAccount},
{"/v1/application_fees/{fee_id}/refunds", ResourceApplicationFeeRefund},
{"/v1/application_fees", ResourceApplicationFee},
{"/v1/recipients", ResourceRecipient},
{"/v1/country_specs", ResourceCountrySpec},
{"/v1/accounts/{account_id}/external_accounts", ResourceExternalAccount},
// Relay resources
{"/v1/orders", ResourceOrder},
{"/v1/order_returns", ResourceOrderReturn},
{"/v1/products", ResourceProduct},
{"/v1/skus", ResourceSKU},
// Subscription resources
{"/v1/coupons", ResourceCoupon},
{"/v1/invoices", ResourceInvoice},
{"/v1/invoiceitems", ResourceInvoiceItem},
{"/v1/plans", ResourcePlan},
{"/v1/subscriptions", ResourceSubscription},
{"/v1/subscription_items", ResourceSubscriptionItem},
// Catch all
{"/v1/", ResourceAll},
}
var accessMethods = map[Access][]string{
Read: []string{"GET", "HEAD"},
Write: []string{"POST", "DELETE", "PUT", "PATCH"},
}
func validButInsufficientError(msg string) *ErrorResponse {
return &ErrorResponse{
StripeError: stripe.Error{
Type: stripe.ErrorTypePermission,
Msg: msg,
HTTPStatusCode: 403,
}}
}
func invalidCredentialError(msg string) *ErrorResponse {
return &ErrorResponse{
StripeError: stripe.Error{
Type: stripe.ErrorTypeAuthentication,
Msg: msg,
HTTPStatusCode: 403,
}}
}
func checkPermissions(acc Access, res StripeResource, key []byte, req *http.Request) *ErrorResponse {
authHeader := req.Header.Get("Authorization")
if authHeader == "" {
return invalidCredentialError("Request requires Authorization header")
}
// Check for bearer token
signedPermissions := strings.TrimPrefix(authHeader, "Bearer ")
if signedPermissions == authHeader {
// Try basic auth
var ok bool
signedPermissions, _, ok = req.BasicAuth()
if !ok {
return invalidCredentialError("Request requires valid Basic or Bearer auth header")
}
}
granted, err := Verify(signedPermissions, key)
if err != nil {
return invalidCredentialError(err.Error())
}
if !granted.Can(acc, res) {
return validButInsufficientError("Request requires permission that was not granted")
}
if anyExpand := req.URL.Query().Get("expand[]"); anyExpand != "" && !granted.Can(acc, ResourceAll) {
// This is a necessary shortcut until such time that Stripe publishes
// detailed machine-readable API docs, which include the mapping of
// expand params to response schema/resource.
return validButInsufficientError("Requests that expand return values must have permissions to all resources")
}
return nil
}
func NewStripePermissionsProxy(stripeKey string, delegate http.Handler) http.Handler {
r := mux.NewRouter()
stripeKeyAsBytes := []byte(stripeKey)
for _, rr := range resourceRoutes {
for access, methods := range accessMethods {
resourceToCheck := rr.sr
accessToCheck := access
f := func(rw http.ResponseWriter, req *http.Request) {
err := checkPermissions(accessToCheck, resourceToCheck, stripeKeyAsBytes, req)
if err != nil {
// Abort the request
rw.WriteHeader(403)
json.NewEncoder(rw).Encode(err)
return
}
req.SetBasicAuth(stripeKey, "")
delegate.ServeHTTP(rw, req)
}
r.PathPrefix(rr.route).HandlerFunc(f).Methods(methods...)
}
}
return r
}
|
package main
import (
// "bytes"
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"time"
"github.com/containernetworking/cni/pkg/skel"
"github.com/containernetworking/cni/pkg/types"
"github.com/containernetworking/cni/pkg/types/current"
"github.com/containernetworking/cni/pkg/version"
)
const (
logPath = "/var/log/ntct.log"
defaultSock = "/opt/cni/bin/ntct.sock"
// different sock location for openshift
// defaultSock = "/var/lib/cni/bin/ntct.sock"
)
// PluginConf is whatever you expect your configuration json to be. This is whatever
// is passed in on stdin. Your plugin may wish to expose its functionality via
// runtime args, see CONVENTIONS.md in the CNI spec.
type PluginConf struct {
// This is the previous result, when called in the context of a chained
// plugin. Because this plugin supports multiple versions, we'll have to
// parse this in two passes. If your plugin is not chained, this can be
// removed (though you may wish to error if a non-chainable plugin is
// chained.
// If you need to modify the result before returning it, you will need
// to actually convert it to a concrete versioned struct.
RawPrevResult *map[string]interface{} `json:"prevResult"`
PrevResult *current.Result `json:"-"`
types.NetConf
Unix string `json:"unix"`
IP string `json:"ip"`
}
// parseConfig parses the supplied configuration (and prevResult) from stdin.
func parseConfig(stdin []byte) (*PluginConf, error) {
conf := PluginConf{}
if err := json.Unmarshal(stdin, &conf); err != nil {
return nil, fmt.Errorf("failed to parse network configuration: %v", err)
}
// Parse previous result. Remove this if your plugin is not chained.
if conf.RawPrevResult != nil {
resultBytes, err := json.Marshal(conf.RawPrevResult)
if err != nil {
return nil, fmt.Errorf("could not serialize prevResult: %v", err)
}
res, err := version.NewResult(conf.CNIVersion, resultBytes)
if err != nil {
return nil, fmt.Errorf("could not parse prevResult: %v", err)
}
conf.RawPrevResult = nil
conf.PrevResult, err = current.NewResultFromResult(res)
if err != nil {
return nil, fmt.Errorf("could not convert result to current version: %v", err)
}
}
// End previous result parsing
if conf.Unix == "" && conf.IP == "" {
conf.Unix = defaultSock
}
return &conf, nil
}
func appendLog(d string) error {
file, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)
if err != nil {
return fmt.Errorf("Failed opening file: %s", err)
}
_, err = file.WriteString(d)
return err
}
func getClient(conf *PluginConf) http.Client {
httpc := http.Client{
Timeout: 3 * time.Second, // The whole connection should take less than 3 seconds
}
if conf.Unix != "" {
httpc.Transport = &http.Transport{
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
dialer := net.Dialer{}
return dialer.DialContext(ctx, "unix", conf.Unix)
},
}
}
return httpc
}
func getURL(conf *PluginConf, uri string) string {
if uri == "" {
uri = "/"
}
url := "http://unix" + uri
if conf.IP != "" {
url = "http://" + conf.IP + uri
}
return url
}
type cmdType int
const (
typeAdd = iota
typeDel
typeCheck
)
func send(conf *PluginConf, args *skel.CmdArgs, cmd cmdType) error {
//httpc := getClient(conf)
buf, err := json.Marshal(args)
if err != nil {
return err
}
var method string
switch cmd {
case typeAdd:
method = http.MethodPut
appendLog(fmt.Sprintf("%s %s\n", method, buf))
case typeDel:
method = http.MethodDelete
case typeCheck:
method = http.MethodPost
default:
return fmt.Errorf("Unknown cmd type: %v", cmd)
}
/* request, err := http.NewRequest(method, getURL(conf, "/pod"), bytes.NewReader(buf))
if err != nil {
return err
}
request.Header.Set("Content-Type", "application/json")
resp, err := httpc.Do(request)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("HTTP request failed with: %d", resp.StatusCode)
}
*/
return nil
}
func genericCmd(args *skel.CmdArgs, cmd cmdType) error {
conf, err := parseConfig(args.StdinData)
if err != nil {
return err
}
defer func() {
// Just in case we need to recover
_ = recover()
// Pass through the result for the next plugin
// ignore any errors since we can't do anything about them anyways
if conf.PrevResult == nil {
res := ¤t.Result{}
_ = types.PrintResult(res, conf.CNIVersion)
} else {
_ = types.PrintResult(conf.PrevResult, conf.CNIVersion)
}
}()
return send(conf, args, cmd)
}
// cmdAdd is called for ADD requests
func cmdAdd(args *skel.CmdArgs) error {
return genericCmd(args, typeAdd)
}
// cmdDel is called for DELETE requests
func cmdDel(args *skel.CmdArgs) error {
return genericCmd(args, typeDel)
}
func cmdCheck(args *skel.CmdArgs) error {
return genericCmd(args, typeCheck)
}
func main() {
err := skel.PluginMainWithError(cmdAdd, cmdCheck, cmdDel, version.All, "NetScout Prototype v0.0.2")
if err != nil {
_ = appendLog(fmt.Sprintf("%s\n", err.Error()))
}
}
|
package main
//Invalid
//Checks the validity of assigment of one defined type var to anohter whcih are resolvable to same base type
func main() {
type num int
type num1 num
var a num
var b num1
a = b
}
|
// Copyright (c) 2019 VMware, Inc. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package k8s
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/cli-runtime/pkg/printers"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const (
LegacyGroupName = "core"
)
// Client prepares and exposes a dynamic, discovery, and Rest clients
type Client struct {
Client dynamic.Interface
Disco discovery.DiscoveryInterface
CoreRest rest.Interface
JsonPrinter printers.JSONPrinter
}
// New returns a *Client
func New(kubeconfig string) (*Client, error) {
// creating cfg for each client type because each
// setup its own cfg default which may not be compatible
dynCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
client, err := dynamic.NewForConfig(dynCfg)
if err != nil {
return nil, err
}
discoCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
disco, err := discovery.NewDiscoveryClientForConfig(discoCfg)
if err != nil {
return nil, err
}
restCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
return nil, err
}
setCoreDefaultConfig(restCfg)
restc, err := rest.RESTClientFor(restCfg)
if err != nil {
return nil, err
}
return &Client{Client: client, Disco: disco, CoreRest: restc}, nil
}
func setCoreDefaultConfig(config *rest.Config) {
config.GroupVersion = &corev1.SchemeGroupVersion
config.APIPath = "/api"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
}
|
package notifier
import (
"time"
"github.com/void616/gm.mint.sender/internal/watcher/db/types"
"github.com/void616/gotask"
)
// Task loop
func (n *Notifier) Task(token *gotask.Token) {
for !token.Stopped() {
// get list
list, err := n.dao.ListUnnotifiedIncomings(itemsPerShot)
if err != nil {
n.logger.WithError(err).Error("Failed to get unsent items")
token.Sleep(time.Second * 30)
continue
}
// nothing
if len(list) == 0 {
token.Sleep(time.Second * 30)
continue
}
out := false
for _, inc := range list {
if out {
break
}
// mark as notified
{
now := time.Now().UTC()
if inc.FirstNotifyAt == nil {
inc.FirstNotifyAt = &now
}
inc.NotifyAt = &now
inc.Notified = true
}
if err := n.dao.UpdateIncoming(inc); err != nil {
n.logger.
WithError(err).
WithField("wallet", inc.To.String()).
WithField("tx", inc.Digest.String()).
Error("Failed to update incoming")
token.Sleep(time.Second * 30)
out = true
continue
}
// notify
var notiErr error
switch inc.Service.Transport {
case types.ServiceNats:
if n.natsTrans != nil {
notiErr = n.natsTrans.NotifyRefilling(inc.Service.Name, inc.To, inc.From, inc.Token, inc.Amount, inc.Digest)
} else {
n.logger.Warn("Nats transport is disabled, skipping notification")
continue
}
case types.ServiceHTTP:
if n.httpTrans != nil {
if inc.Service.CallbackURL != "" {
notiErr = n.httpTrans.NotifyRefilling(inc.Service.CallbackURL, inc.Service.Name, inc.To, inc.From, inc.Token, inc.Amount, inc.Digest)
}
} else {
n.logger.Warn("HTTP transport is disabled, skipping notification")
continue
}
default:
n.logger.Errorf("Transport %v is not implemented", inc.Service.Transport)
continue
}
if notiErr != nil {
n.logger.
WithField("wallet", inc.To.String()).
WithField("tx", inc.Digest.String()).
WithError(notiErr).
Error("Failed to notify")
// notify next time
when := time.Now().UTC()
if inc.FirstNotifyAt != nil {
mikes := time.Now().UTC().Sub(*inc.FirstNotifyAt).Minutes()
switch {
// for 5m: every 1m
case mikes < 5:
when = when.Add(time.Minute)
// then for 30m: every 5m
case mikes < 35:
when = when.Add(time.Minute * 5)
// then for 60m: every 10m
case mikes < 95:
when = when.Add(time.Minute * 10)
// then every 120m
default:
when = when.Add(time.Minute * 120)
}
} else {
when = when.Add(time.Hour * 24 * 365)
}
// mark as unnotified
inc.NotifyAt = &when
inc.Notified = false
if err := n.dao.UpdateIncoming(inc); err != nil {
n.logger.
WithError(err).
WithField("wallet", inc.To.String()).
WithField("tx", inc.Digest.String()).
Error("Failed to update incoming")
token.Sleep(time.Second * 30)
out = true
continue
}
} else {
n.logger.
WithField("wallet", inc.To.String()).
WithField("tx", inc.Digest.String()).
Info("Notified")
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.