text stringlengths 11 4.05M |
|---|
package packagen
import (
"bytes"
"io/ioutil"
"path/filepath"
"testing"
qt "github.com/frankban/quicktest"
)
func TestExtendStruct(t *testing.T) {
for _, tc := range []ExtendOption{
{
SrcPkg: "./testdata/extend/src",
Src: "Data",
DstPkg: "./testdata/extend/dst",
Dst: "ExData",
Fields: map[string]string{"i": "int32", "is": "uint32"},
FieldPrefix: "field_",
MethodPrefix: "method_",
},
} {
t.Run(tc.SrcPkg, func(t *testing.T) {
c := qt.New(t)
var buf, methods bytes.Buffer
_, err := ExtendStruct(&buf, &methods, tc)
c.Assert(err, qt.IsNil)
fname := filepath.Join("testdata", "extend_"+tc.Dst+".golden")
mname := filepath.Join("testdata", "extend_"+tc.Dst+"_methods.golden")
if *update {
t.Log("update golden file")
if err := ioutil.WriteFile(fname, buf.Bytes(), 0644); err != nil {
t.Fatalf("failed to update golden file: %s", err)
}
if err := ioutil.WriteFile(mname, methods.Bytes(), 0644); err != nil {
t.Fatalf("failed to update golden file: %s", err)
}
}
result, err := ioutil.ReadFile(fname)
c.Assert(err, qt.IsNil)
c.Assert(buf.String(), qt.Equals, string(result))
result, err = ioutil.ReadFile(mname)
c.Assert(err, qt.IsNil)
c.Assert(methods.String(), qt.Equals, string(result))
})
}
}
|
package main
import (
"bytes"
"flag"
"io/ioutil"
"log"
"os"
"path/filepath"
"text/template"
)
func main() {
var (
flRoot = flag.String(
"root",
"",
"Package the entire contents of the directory tree at root-path, typically a destination root created by xcodebuild(1).",
)
flIdentifier = flag.String(
"identifier",
"",
`Specify a unique identifier for this package. The OS X Installer recognizes a package as being an upgrade to an already-installed package only if the package identifiers match, so it is advisable
to set a meaningful, consistent identifier when you build the package. pkgbuild will infer an identifier when building a package from a single component, but will fail otherwise if the identifier
has not been set.`,
)
flPkgVersion = flag.String(
"version",
"",
`Specify a version for the package. Packages with the same identifier are compared using this version, to determine if the package is an upgrade or downgrade.
If you don't specify a version, a default of zero is assumed, but this may prevent proper upgrade/downgrade checking.`,
)
flOut = flag.String("output", "", "path to output file")
)
flag.Parse()
size, count, err := fileInfo(*flRoot)
if err != nil {
log.Fatal(err)
}
pkginfo := &info{
Identifier: *flIdentifier,
Version: *flPkgVersion,
Size: size,
NumFiles: count,
}
os.Remove(*flOut)
os.MkdirAll(filepath.Dir(*flOut), os.ModePerm)
out, err := os.Create(*flOut)
if err != nil {
log.Fatal(err)
}
defer out.Close()
funcMap := template.FuncMap{"scripts": pkginfo.Scripts}
tmpl := template.Must(template.New("").
Funcs(funcMap).
Parse(pkginfoTemplate),
)
if err := tmpl.Execute(out, pkginfo); err != nil {
log.Fatal(err)
}
}
type info struct {
Identifier string
Version string
Size int64
NumFiles int64
}
func (i *info) Scripts() string {
if fi, err := os.Stat("scripts"); os.IsNotExist(err) {
return ""
} else if err != nil {
panic(err)
} else if !fi.IsDir() {
panic("scripts must be a directory")
}
dirFiles, err := ioutil.ReadDir("scripts")
if err != nil {
panic(err)
}
buf := new(bytes.Buffer)
buf.WriteString(" <scripts>\n")
for _, f := range dirFiles {
if f.Name() == "preinstall" {
buf.WriteString(` <preinstall file="./preinstall"/>`)
buf.WriteString("\n")
}
if f.Name() == "postinstall" {
buf.WriteString(` <postinstall file="./postinstall"/>`)
buf.WriteString("\n")
}
}
buf.WriteString(" </scripts>")
return buf.String()
}
func fileInfo(path string) (int64, int64, error) {
var size int64
var count int64
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if info == nil || err != nil {
return err
}
count++
if !info.IsDir() {
size += info.Size()
}
return err
})
return size / 1024.0, count, err
}
var pkginfoTemplate = `<pkg-info format-version="2" identifier="{{ .Identifier }}" version="{{ .Version }}" install-location="/" auth="root">
<payload installKBytes="{{ .Size }}" numberOfFiles="{{ .NumFiles }}"/>
{{ scripts }}
<bundle-version/>
<upgrade-bundle/>
<update-bundle/>
<atomic-update-bundle/>
<strict-identifier/>
<relocate/>
</pkg-info>`
|
package main
import "fmt"
func main() {
var nums []int = []int{3, 2, 4}
results := twoSumBest(nums, 6)
fmt.Println(results)
}
func twoSumBest(nums []int, target int) []int {
if len(nums) < 2 {
return nil
}
var m map[int]int = make(map[int]int, len(nums))
for i, v := range nums {
if j, ok := m[v]; ok {
return []int{i, j}
} else {
m[target-v] = i
}
}
return nil
}
func twoSum(nums []int, target int) []int {
if len(nums) < 2 {
return nil
}
var m map[int]int = make(map[int]int)
for i, v := range nums {
m[v] = i
}
for i, v := range nums {
if j, ok := m[target - v]; ok {
if i != j {
return []int{i, j}
}
}
}
return nil
}
|
/**
* @Author: XiaoLongBao
* @Description: 对interface的了解学习
* @File: interface_test
* @Program: hello world
* @Date: 2021-03-17 09:27
*/
package _interface
import (
"reflect"
"testing"
)
/*
* Go interface 的一个 “坑” 及原理分析 https://mp.weixin.qq.com/s/vNACbdSDxC9S0LOAr7ngLQ
* 针对 interface 使用疑惑。
*/
// todo: 事例1
func TestInterfaceEg1(t *testing.T) {
var v interface{}
v = (*int)(nil)
// 为什么不是 true。明明都已经强行置为 nil 了。是不是 Go 编译器有问题?
t.Log(v == nil)
}
// todo: 事例2
func TestInterfaceEg2(t *testing.T) {
var data *byte
var in interface{}
// 刚刚声明出来的 data 和 in 变量,确实是输出结果是 nil,判断结果也是 true
t.Log(data, data == nil)
t.Log(in, in == nil)
in = data
// 奇怪是怎么把变量 data 一赋予给变量 in,世界就变了?输出结果依然是 nil,但判定却变成了 false
t.Log(in, in == nil)
}
/*
* 分析:
* interface 判断与想象中不一样的根本原因是,interface 并不是一个指针类型,虽然他看起来很像,以至于误导了不少人。
* interface 共有两类数据结构: runtime.iface 和 runtime.eface
* runtime.eface 结构体:表示不包含任何方法的空接口,也称为 empty interface
* runtime.iface 结构体:表示包含方法的接口
* 两者相应的底层数据结构
* type eface struct {
* _type *_type
* data unsafe.Pointer
* }
*
* type iface struct {
* tab *itab
* data unsafe.Pointer
* }
*
* interface 不是单纯的值,而是分为类型和值
* 所以传统认知的此 nil 并非彼 nil,必须得类型和值同时都为 nil 的情况下,interface 的 nil 判断才会为 true
* 1、可以利用反射(reflect)来做 nil 的值判断,在反射中会有针对 interface 类型的特殊处理
* 2、对值进行 nil 判断,再返回给 interface 设置
* 3、返回具体的值类型,而不是返回 interface
*/
// todo: 利用反射(reflect)来做 nil 的值判断
func TestInterfaceReflect(t *testing.T) {
var data *byte
var in interface{}
in = data
t.Log(in, func(i interface{}) bool {
vi := reflect.ValueOf(i)
// 类型如果为指针时:
if vi.Kind() == reflect.Ptr {
return vi.IsNil()
}
return false
}(in))
}
|
package main
import (
"context"
"errors"
"flag"
"fmt"
"net/url"
"os"
"sync"
"github.com/folbricht/desync"
)
const cacheUsage = `desync cache [options] <caibx> [<caibx>..]
Read chunk IDs in caibx files from one or more stores without creating a blob.
Can be used to pre-populate a local cache.`
func cache(ctx context.Context, args []string) error {
var (
cacheLocation string
n int
storeLocations = new(multiArg)
stores []desync.Store
)
flags := flag.NewFlagSet("cache", flag.ExitOnError)
flags.Usage = func() {
fmt.Fprintln(os.Stderr, cacheUsage)
flags.PrintDefaults()
}
flags.Var(storeLocations, "s", "casync store location, can be multiples")
flags.StringVar(&cacheLocation, "c", "", "use local store as cache")
flags.IntVar(&n, "n", 10, "number of goroutines")
flags.Parse(args)
if flags.NArg() < 1 {
return errors.New("Not enough arguments. See -h for help.")
}
// Checkout the store
if len(storeLocations.list) == 0 {
return errors.New("No casync store provided. See -h for help.")
}
// Read the input files and merge all chunk IDs in a map to de-dup them
ids := make(map[desync.ChunkID]struct{})
for _, name := range flags.Args() {
c, err := readCaibxFile(name)
if err != nil {
return err
}
for _, c := range c.Chunks {
ids[c.ID] = struct{}{}
}
}
// Go through each store passed in the command line, initialize them, and
// build a list
for _, location := range storeLocations.list {
loc, err := url.Parse(location)
if err != nil {
return fmt.Errorf("Unable to parse store location %s : %s", location, err)
}
var s desync.Store
switch loc.Scheme {
case "ssh":
r, err := desync.NewRemoteSSHStore(loc, n)
if err != nil {
return err
}
defer r.Close()
s = r
case "http", "https":
s, err = desync.NewRemoteHTTPStore(loc)
if err != nil {
return err
}
case "":
s, err = desync.NewLocalStore(loc.Path)
if err != nil {
return err
}
default:
return fmt.Errorf("Unsupported store access scheme %s", loc.Scheme)
}
stores = append(stores, s)
}
// Combine all stores into one router
var s desync.Store = desync.NewStoreRouter(stores...)
// See if we want to use a local store as cache, if so, attach a cache to
// the router
if cacheLocation != "" {
cache, err := desync.NewLocalStore(cacheLocation)
if err != nil {
return err
}
cache.UpdateTimes = true
s = desync.NewCache(s, cache)
}
var (
wg sync.WaitGroup
in = make(chan desync.ChunkID)
mu sync.Mutex
errs []error
)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Helper function to record and deal with any errors in the goroutines
recordError := func(err error) {
mu.Lock()
defer mu.Unlock()
errs = append(errs, err)
cancel()
}
// Start the workers
for i := 0; i < n; i++ {
wg.Add(1)
go func() {
for id := range in {
if _, err := s.GetChunk(id); err != nil {
recordError(err)
}
}
wg.Done()
}()
}
// Feed the workers, stop on any errors
loop:
for id := range ids {
// See if we're meant to stop
select {
case <-ctx.Done():
break loop
default:
}
in <- id
}
close(in)
wg.Wait()
if len(errs) != 0 {
for _, e := range errs {
fmt.Fprintln(os.Stderr, e)
}
os.Exit(1)
}
return nil
}
|
package model
import (
"database/sql"
"strconv"
"time"
"github.com/tribalmedia/vista/setting"
)
//Team is ...
type Team struct {
ID int `db:"id"`
Name string `db:"name"`
PictureURL sql.NullString `db:"picture_url"`
Description sql.NullString `db:"description"`
Created time.Time `db:"created"`
Modified time.Time `db:"modified"`
}
// ListTeam is ...
type ListTeam struct {
List []Team
}
// GetAllTeam is function that get all team in database with id and name
func GetAllTeam() ListTeam {
team := Team{}
var teams ListTeam
rows, _ := DB.Queryx("SELECT id, name from teams")
for rows.Next() {
err := rows.StructScan(&team)
if err != nil {
Logger.Error(err.Error())
}
teams.List = append(teams.List, team)
}
return teams
}
// GetTeamByID gets team by team's id
// It returns the team
func GetTeamByID(id int) Team {
team := Team{}
err := DB.Get(&team, "SELECT id, name, picture_url, description, created, modified FROM teams WHERE id =?", id)
if err != nil && err != sql.ErrNoRows {
Logger.Error(err.Error())
}
return team
}
// EditTeam updates team info
// It returns true if save success
func EditTeam(data Team) {
query := "UPDATE teams SET name=:name, description=:description, picture_url=:picture_url WHERE id=:id"
_, err := DB.NamedExec(query, data)
if err != nil {
Logger.Fatal(err.Error())
}
}
// SaveTeam is function that save a new team to database
// It return true if save success
func SaveTeam(team Team) int64 {
res, err := DB.NamedExec("INSERT INTO teams(name,picture_url,description) VALUES(:name, :picture_url, :description)", team)
if err != nil {
Logger.Fatal(err.Error())
}
idTeam, _ := res.LastInsertId()
return idTeam
}
// ValidateTeam is function validate team data before save
// return map incase there are more validate in future
func ValidateTeam(team Team, contentLength int64) map[string]string {
err := map[string]string{}
// error message when violate validation
if len(team.Name) > setting.NameMaxLength {
err["name"] = "Team's name can't be more than " + strconv.Itoa(setting.NameMaxLength) + " characters."
}
if team.Name == "" {
err["name"] = "Team's name can't be empty."
}
if len(team.Description.String) > setting.DescriptionMaxLength {
err["description"] = "Your description can't be more than " + strconv.Itoa(setting.DescriptionMaxLength) + " characters."
}
if contentLength > setting.FileMaxSize {
err["photo"] = "File size must be less than 3 MB"
}
return err
}
// GetTeams is function get list team
func GetTeams() ListTeam {
rows, _ := DB.Queryx("SELECT id, name, picture_url, description, created, modified FROM teams ORDER BY teams.id DESC")
var listTeam ListTeam
for rows.Next() {
var team Team
err := rows.StructScan(&team)
if err != nil {
Logger.Error(err.Error())
}
listTeam.List = append(listTeam.List, team)
}
return listTeam
}
// GetTeamAndMems is function get list team with members
func GetTeamAndMems() []map[string]interface{} {
rows, _ := DB.Queryx("SELECT id, name, picture_url, description, created, modified FROM teams")
var listFull []map[string]interface{}
for rows.Next() {
var team Team
err := rows.StructScan(&team)
if err != nil {
Logger.Error(err.Error())
}
m := map[string]interface{}{
"Team": team,
"Member": GetMemberOfTeam(team.ID),
}
listFull = append(listFull, m)
}
return listFull
}
|
package json
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
)
type Foo struct {
Name string
Body string
Time int64
}
func WriteJsonToFile() {
filename := "a_foo.json"
fooBar := Foo{"Bar", "Hello", 1294706395881547000}
fmt.Println("\nWriting json ", fooBar, "to file: ", filename)
marshalledFooBar, err := json.Marshal(fooBar)
err = ioutil.WriteFile(filename, marshalledFooBar, 0644)
fmt.Println("\nRead json from file", filename)
dat, err := ioutil.ReadFile(filename)
fmt.Println(string(dat))
if err != nil {
log.Fatal(err)
}
}
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package colexeccmp
import "github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
// ComparisonExprAdapter is a utility interface that is implemented by several
// structs that behave as an adapter from tree.ComparisonExpr to a vectorized
// friendly model.
type ComparisonExprAdapter interface {
Eval(left, right tree.Datum) (tree.Datum, error)
}
// NewComparisonExprAdapter returns a new ComparisonExprAdapter for the provided
// expression.
func NewComparisonExprAdapter(
expr *tree.ComparisonExpr, evalCtx *tree.EvalContext,
) ComparisonExprAdapter {
base := cmpExprAdapterBase{
fn: expr.Fn.Fn,
evalCtx: evalCtx,
}
op := expr.Operator
if op.HasSubOperator() {
return &cmpWithSubOperatorExprAdapter{
cmpExprAdapterBase: base,
expr: expr,
}
}
nullable := expr.Fn.NullableArgs
_, _, _, flipped, negate := tree.FoldComparisonExpr(op, nil /* left */, nil /* right */)
if nullable {
if flipped {
if negate {
return &cmpNullableFlippedNegateExprAdapter{cmpExprAdapterBase: base}
}
return &cmpNullableFlippedExprAdapter{cmpExprAdapterBase: base}
}
if negate {
return &cmpNullableNegateExprAdapter{cmpExprAdapterBase: base}
}
return &cmpNullableExprAdapter{cmpExprAdapterBase: base}
}
if flipped {
if negate {
return &cmpFlippedNegateExprAdapter{cmpExprAdapterBase: base}
}
return &cmpFlippedExprAdapter{cmpExprAdapterBase: base}
}
if negate {
return &cmpNegateExprAdapter{cmpExprAdapterBase: base}
}
return &cmpExprAdapter{cmpExprAdapterBase: base}
}
|
package p2pv2
import (
"context"
"errors"
"fmt"
"github.com/golang/protobuf/proto"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/xuperchain/xupercore/lib/metrics"
"time"
"github.com/xuperchain/xupercore/kernel/common/xaddress"
knet "github.com/xuperchain/xupercore/kernel/network"
"github.com/xuperchain/xupercore/kernel/network/config"
nctx "github.com/xuperchain/xupercore/kernel/network/context"
"github.com/xuperchain/xupercore/kernel/network/p2p"
"github.com/xuperchain/xupercore/lib/logs"
pb "github.com/xuperchain/xupercore/protos"
ipfsaddr "github.com/ipfs/go-ipfs-addr"
"github.com/libp2p/go-libp2p"
circuit "github.com/libp2p/go-libp2p-circuit"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/libp2p/go-libp2p-core/protocol"
"github.com/libp2p/go-libp2p-kad-dht"
record "github.com/libp2p/go-libp2p-record"
secio "github.com/libp2p/go-libp2p-secio"
"github.com/multiformats/go-multiaddr"
"github.com/patrickmn/go-cache"
)
const (
ServerName = "p2pv2"
namespace = "xuper"
retry = 10
)
func init() {
knet.Register(ServerName, NewP2PServerV2)
}
var (
// protocol prefix
prefix = fmt.Sprintf("/%s", namespace)
// protocol version
protocolID = fmt.Sprintf("%s/2.0.0", prefix)
// MaxBroadCastPeers define the maximum number of common peers to broadcast messages
MaxBroadCastPeers = 20
)
// define errors
var (
ErrGenerateOpts = errors.New("generate host opts error")
ErrCreateHost = errors.New("create host error")
ErrCreateKadDht = errors.New("create kad dht error")
ErrCreateStreamPool = errors.New("create stream pool error")
ErrCreateBootStrap = errors.New("create bootstrap error pool error")
ErrConnectBootStrap = errors.New("error to connect to all bootstrap")
ErrLoadAccount = errors.New("load account error")
ErrStoreAccount = errors.New("dht store account error")
ErrConnect = errors.New("connect all boot and static peer error")
)
// P2PServerV2 is the node in the network
type P2PServerV2 struct {
ctx *nctx.NetCtx
log logs.Logger
config *config.NetConf
id peer.ID
host host.Host
kdht *dht.IpfsDHT
streamPool *StreamPool
dispatcher p2p.Dispatcher
cancel context.CancelFunc
staticNodes map[string][]peer.ID
// local host account
account string
// accounts store remote peer account: key:account => v:peer.ID
// accounts as cache, store in dht
accounts *cache.Cache
}
var _ p2p.Server = &P2PServerV2{}
// NewP2PServerV2 create P2PServerV2 instance
func NewP2PServerV2() p2p.Server {
return &P2PServerV2{}
}
// Init initialize p2p server using given config
func (p *P2PServerV2) Init(ctx *nctx.NetCtx) error {
p.ctx = ctx
p.log = ctx.GetLog()
p.config = ctx.P2PConf
// host
cfg := ctx.P2PConf
opts, err := genHostOption(ctx)
if err != nil {
p.log.Error("genHostOption error", "error", err)
return ErrGenerateOpts
}
ho, err := libp2p.New(ctx, opts...)
if err != nil {
p.log.Error("Create p2p host error", "error", err)
return ErrCreateHost
}
p.id = ho.ID()
p.host = ho
p.log.Trace("Host", "address", p.getMultiAddr(p.host.ID(), p.host.Addrs()), "config", *cfg)
// dht
dhtOpts := []dht.Option{
dht.Mode(dht.ModeServer),
dht.RoutingTableRefreshPeriod(10 * time.Second),
dht.ProtocolPrefix(protocol.ID(prefix)),
dht.NamespacedValidator(namespace, &record.NamespacedValidator{
namespace: blankValidator{},
}),
}
if p.kdht, err = dht.New(ctx, ho, dhtOpts...); err != nil {
return ErrCreateKadDht
}
if !cfg.IsHidden {
if err = p.kdht.Bootstrap(ctx); err != nil {
return ErrCreateBootStrap
}
}
keyPath := ctx.EnvCfg.GenDataAbsPath(ctx.EnvCfg.KeyDir)
p.account, err = xaddress.LoadAddress(keyPath)
if err != nil {
return ErrLoadAccount
}
p.accounts = cache.New(cache.NoExpiration, cache.NoExpiration)
// dispatcher
p.dispatcher = p2p.NewDispatcher(ctx)
p.streamPool, err = NewStreamPool(ctx, p)
if err != nil {
return ErrCreateStreamPool
}
// set static nodes
setStaticNodes(ctx, p)
// set broadcast peers limitation
MaxBroadCastPeers = cfg.MaxBroadcastPeers
if err := p.connect(); err != nil {
p.log.Error("connect all boot and static peer error")
return ErrConnect
}
return nil
}
func genHostOption(ctx *nctx.NetCtx) ([]libp2p.Option, error) {
cfg := ctx.P2PConf
muAddr, err := multiaddr.NewMultiaddr(cfg.Address)
if err != nil {
return nil, err
}
opts := []libp2p.Option{
libp2p.ListenAddrs(muAddr),
libp2p.EnableRelay(circuit.OptHop),
}
if cfg.IsNat {
opts = append(opts, libp2p.NATPortMap())
}
if cfg.IsTls {
priv, err := p2p.GetPemKeyPairFromPath(cfg.KeyPath)
if err != nil {
return nil, err
}
opts = append(opts, libp2p.Identity(priv))
opts = append(opts, libp2p.Security(ID, NewTLS(cfg.KeyPath, cfg.ServiceName)))
} else {
priv, err := p2p.GetKeyPairFromPath(cfg.KeyPath)
if err != nil {
return nil, err
}
opts = append(opts, libp2p.Identity(priv))
opts = append(opts, libp2p.Security(secio.ID, secio.New))
}
return opts, nil
}
func setStaticNodes(ctx *nctx.NetCtx, p *P2PServerV2) {
cfg := ctx.P2PConf
staticNodes := map[string][]peer.ID{}
for bcname, peers := range cfg.StaticNodes {
peerIDs := make([]peer.ID, 0, len(peers))
for _, peerAddr := range peers {
id, err := p2p.GetPeerIDByAddress(peerAddr)
if err != nil {
p.log.Warn("static node addr error", "peerAddr", peerAddr)
continue
}
peerIDs = append(peerIDs, id)
}
staticNodes[bcname] = peerIDs
}
p.staticNodes = staticNodes
}
func (p *P2PServerV2) setKdhtValue() {
// store: account => address
account := GenAccountKey(p.account)
address := p.getMultiAddr(p.host.ID(), p.host.Addrs())
err := p.kdht.PutValue(context.Background(), account, []byte(address))
if err != nil {
p.log.Error("dht put account=>address value error", "error", err)
}
// store: peer.ID => account
id := GenPeerIDKey(p.id)
err = p.kdht.PutValue(context.Background(), id, []byte(p.account))
if err != nil {
p.log.Error("dht put id=>account value error", "error", err)
}
}
// Start start the node
func (p *P2PServerV2) Start() {
p.log.Trace("StartP2PServer", "address", p.host.Addrs())
p.host.SetStreamHandler(protocol.ID(protocolID), p.streamHandler)
p.setKdhtValue()
ctx, cancel := context.WithCancel(p.ctx)
p.cancel = cancel
t := time.NewTicker(time.Second * 180)
go func() {
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
p.log.Trace("RoutingTable", "id", p.host.ID(), "size", p.kdht.RoutingTable().Size())
// p.kdht.RoutingTable().Print()
}
}
}()
}
func (p *P2PServerV2) connect() error {
var multiAddrs []string
if len(p.config.BootNodes) > 0 {
multiAddrs = append(multiAddrs, p.config.BootNodes...)
}
for _, ps := range p.config.StaticNodes {
multiAddrs = append(multiAddrs, ps...)
}
success := p.connectPeerByAddress(multiAddrs)
if success == 0 && len(p.config.BootNodes) != 0 {
return ErrConnectBootStrap
}
return nil
}
func (p *P2PServerV2) streamHandler(netStream network.Stream) {
if _, err := p.streamPool.NewStream(p.ctx, netStream); err != nil {
p.log.Warn("new stream error")
}
}
// Stop stop the node
func (p *P2PServerV2) Stop() {
p.log.Info("StopP2PServer")
p.kdht.Close()
p.host.Close()
if p.cancel != nil {
p.cancel()
}
}
// PeerID return the peer ID
func (p *P2PServerV2) PeerID() string {
return p.id.Pretty()
}
func (p *P2PServerV2) NewSubscriber(typ pb.XuperMessage_MessageType, v interface{}, opts ...p2p.SubscriberOption) p2p.Subscriber {
return p2p.NewSubscriber(p.ctx, typ, v, opts...)
}
// Register register message subscriber to handle messages
func (p *P2PServerV2) Register(sub p2p.Subscriber) error {
return p.dispatcher.Register(sub)
}
// UnRegister remove message subscriber
func (p *P2PServerV2) UnRegister(sub p2p.Subscriber) error {
return p.dispatcher.UnRegister(sub)
}
func (p *P2PServerV2) HandleMessage(stream p2p.Stream, msg *pb.XuperMessage) error {
if p.dispatcher == nil {
p.log.Warn("dispatcher not ready, omit", "msg", msg)
return nil
}
if p.ctx.EnvCfg.MetricSwitch {
tm := time.Now()
defer func() {
labels := prom.Labels{
metrics.LabelBCName: msg.GetHeader().GetBcname(),
metrics.LabelMessageType: msg.GetHeader().GetType().String(),
}
metrics.NetworkMsgReceivedCounter.With(labels).Inc()
metrics.NetworkMsgReceivedBytesCounter.With(labels).Add(float64(proto.Size(msg)))
metrics.NetworkServerHandlingHistogram.With(labels).Observe(time.Since(tm).Seconds())
}()
}
if err := p.dispatcher.Dispatch(msg, stream); err != nil {
p.log.Warn("handle new message dispatch error", "log_id", msg.GetHeader().GetLogid(),
"type", msg.GetHeader().GetType(), "from", msg.GetHeader().GetFrom(), "error", err)
return nil // not return err
}
return nil
}
func (p *P2PServerV2) Context() *nctx.NetCtx {
return p.ctx
}
func (p *P2PServerV2) PeerInfo() pb.PeerInfo {
peerInfo := pb.PeerInfo{
Id: p.host.ID().Pretty(),
Address: p.getMultiAddr(p.host.ID(), p.host.Addrs()),
Account: p.account,
}
peerStore := p.host.Peerstore()
for _, peerID := range p.kdht.RoutingTable().ListPeers() {
key := GenPeerIDKey(peerID)
account, err := p.kdht.GetValue(context.Background(), key)
if err != nil {
p.log.Warn("get account error", "peerID", peerID)
}
addrInfo := peerStore.PeerInfo(peerID)
remotePeerInfo := &pb.PeerInfo{
Id: peerID.String(),
Address: p.getMultiAddr(addrInfo.ID, addrInfo.Addrs),
Account: string(account),
}
peerInfo.Peer = append(peerInfo.Peer, remotePeerInfo)
}
return peerInfo
}
func (p *P2PServerV2) getMultiAddr(peerID peer.ID, addrs []multiaddr.Multiaddr) string {
peerInfo := &peer.AddrInfo{
ID: peerID,
Addrs: addrs,
}
multiAddrs, err := peer.AddrInfoToP2pAddrs(peerInfo)
if err != nil {
p.log.Warn("gen multi addr error", "peerID", p.host.ID(), "addr", p.host.Addrs())
}
if len(multiAddrs) >= 1 {
return multiAddrs[0].String()
}
return ""
}
// ConnectPeerByAddress provide connection support using peer address(netURL)
func (p *P2PServerV2) connectPeerByAddress(addresses []string) int {
return p.connectPeer(p.getAddrInfos(addresses))
}
func (p *P2PServerV2) getAddrInfos(addresses []string) []peer.AddrInfo {
addrInfos := make([]peer.AddrInfo, 0, len(addresses))
for _, addr := range addresses {
peerAddr, err := ipfsaddr.ParseString(addr)
if err != nil {
p.log.Error("p2p: parse peer address error", "peerAddr", peerAddr, "error", err)
continue
}
addrInfo, err := peer.AddrInfoFromP2pAddr(peerAddr.Multiaddr())
if err != nil {
p.log.Error("p2p: get peer node info error", "peerAddr", peerAddr, "error", err)
continue
}
addrInfos = append(addrInfos, *addrInfo)
}
return addrInfos
}
// connectPeer connect to given peers, return the connected number of peers
// only retry if all connection failed
func (p *P2PServerV2) connectPeer(addrInfos []peer.AddrInfo) int {
if len(addrInfos) <= 0 {
return 0
}
retry := retry
success := 0
for retry > 0 {
for _, addrInfo := range addrInfos {
if err := p.host.Connect(p.ctx, addrInfo); err != nil {
p.log.Error("p2p: connection with peer node error", "error", err)
continue
}
success++
p.log.Info("p2p: connection established", "addrInfo", addrInfo)
}
if success > 0 {
break
}
retry--
time.Sleep(3 * time.Second)
}
return success
}
|
package server
import (
"context"
"github.com/parulraich/grpcAssignment/calculatorpb/proto"
"io"
"log"
"time"
)
type CalculatorHandler struct{}
func (ch *CalculatorHandler) Square(ctx context.Context, request *calculatorpb.CalculatorRequest) (*calculatorpb.CalculatorResponse, error) {
response := &calculatorpb.CalculatorResponse{}
response.Result = request.GetNumber() * request.GetNumber()
return response, nil
}
func (ch *CalculatorHandler) ArmstrongNumber(ctx context.Context, request *calculatorpb.CalculatorRequest) (*calculatorpb.ArmstrongNumberResponse, error) {
var tempnum, remainder, number, res int32
response := &calculatorpb.ArmstrongNumberResponse{}
number = request.GetNumber()
tempnum = number
for {
remainder = tempnum % 10
res += remainder * remainder * remainder
tempnum /= 10
if tempnum == 0 {
break // Break Statement used to stop the loop
}
}
if res == number {
response.Result = res
}
return response, nil
}
//change
// Server Streaming
func (ch *CalculatorHandler) PrimeFactors(request *calculatorpb.CalculatorRequest, stream calculatorpb.CalculatorService_PrimeFactorsServer) error {
number := request.GetNumber()
var factor int32 = 2
for number > 1 {
if number%factor == 0 {
stream.Send(&calculatorpb.CalculatorResponse{
Result: factor,
})
time.Sleep(10 * time.Millisecond)
number = number / factor
} else {
factor++
}
}
return nil
}
// Client Streaming
func (ch *CalculatorHandler) Average(stream calculatorpb.CalculatorService_AverageServer) error {
var sum, counter int32
for {
req, err := stream.Recv()
log.Println("received", req.GetNumber())
if err == io.EOF {
return stream.SendAndClose(&calculatorpb.CalculatorAverageResponse{
Result: float32(sum) / float32(counter),
})
} else if err != nil {
log.Fatal(err)
} else {
sum += req.GetNumber()
counter++
}
}
return nil
}
// BiDirectional Streaming
func (ch *CalculatorHandler) OddEven(stream calculatorpb.CalculatorService_OddEvenServer) error {
for {
req, err := stream.Recv()
if err == io.EOF {
return nil
} else if err != nil {
log.Fatal(err)
} else {
number := req.GetNumber()
if number&1 == 0 {
stream.Send(&calculatorpb.CalculatorOddEvenResponse{
Result: number,
Type: "Even",
})
} else {
stream.Send(&calculatorpb.CalculatorOddEvenResponse{
Result: number,
Type: "Odd",
})
}
}
}
return nil
}
|
// This Source Code Form is subject to the terms of the MIT License.
// If a copy of the MIT License was not distributed with this
// file, you can obtain one at https://opensource.org/licenses/MIT.
//
// Copyright (c) DUSK NETWORK. All rights reserved.
package legacy_test
import (
"context"
"encoding/binary"
"os"
"testing"
ristretto "github.com/bwesterb/go-ristretto"
"github.com/dusk-network/dusk-blockchain/pkg/config"
"github.com/dusk-network/dusk-blockchain/pkg/core/data/ipc/transactions"
"github.com/dusk-network/dusk-blockchain/pkg/core/tests/helper"
"github.com/dusk-network/dusk-blockchain/pkg/rpc/client"
"github.com/dusk-network/dusk-blockchain/pkg/util/legacy"
"github.com/dusk-network/dusk-blockchain/pkg/util/ruskmock"
"github.com/dusk-network/dusk-protobuf/autogen/go/rusk"
"github.com/stretchr/testify/assert"
)
// Since the legacy conversions also take care to properly decode rangeproofs
// and MLSAG signatures, we can't simply use the transactions mocking package.
// This is why the RUSK mock server is used for these unit tests.
func TestStandardTxIntegrity(t *testing.T) {
s := setupRuskMock(t)
defer cleanup(s)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c, _ := client.CreateTransferClient(ctx, "localhost:10000")
resp, err := c.NewTransfer(ctx, &rusk.TransferTransactionRequest{
Value: 100,
Recipient: make([]byte, 64),
})
assert.NoError(t, err)
tx := transactions.NewTransaction()
transactions.UTransaction(resp, tx)
// Check conversion integrity
legacyTx, err := legacy.RuskTxToTx(resp)
assert.NoError(t, err)
resp2, err := legacy.TxToRuskTx(legacyTx)
assert.NoError(t, err)
tx2 := transactions.NewTransaction()
transactions.UTransaction(resp2, tx2)
assert.True(t, transactions.Equal(tx, tx2))
}
func TestBidTxIntegrity(t *testing.T) {
s := setupRuskMock(t)
defer cleanup(s)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c, _ := client.CreateBidServiceClient(ctx, "localhost:10000")
// Generate a K
var k ristretto.Scalar
k.Rand()
resp, err := c.NewBid(ctx, &rusk.BidTransactionRequest{
K: k.Bytes(),
Value: 100,
})
assert.NoError(t, err)
tx := transactions.NewTransaction()
transactions.UTransaction(resp.Tx, tx)
// Check conversion integrity
legacyTx, err := legacy.RuskBidToBid(resp.Tx)
assert.NoError(t, err)
resp2, err := legacy.BidToRuskBid(legacyTx)
assert.NoError(t, err)
tx2 := transactions.NewTransaction()
transactions.UTransaction(resp2.Tx, tx2)
assert.True(t, transactions.Equal(tx, tx2))
}
func TestStakeTxIntegrity(t *testing.T) {
s := setupRuskMock(t)
defer cleanup(s)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c, _ := client.CreateStakeClient(ctx, "localhost:10000")
resp, err := c.NewStake(ctx, &rusk.StakeTransactionRequest{
Value: 100,
PublicKeyBls: make([]byte, 129),
})
assert.NoError(t, err)
tx := transactions.NewTransaction()
transactions.UTransaction(resp, tx)
// Check conversion integrity
legacyTx, err := legacy.RuskStakeToStake(resp)
assert.NoError(t, err)
resp2, err := legacy.StakeToRuskStake(legacyTx)
assert.NoError(t, err)
tx2 := transactions.NewTransaction()
transactions.UTransaction(resp2, tx2)
assert.True(t, transactions.Equal(tx, tx2))
}
// Since the coinbase is a different kind of transaction, we can use the mocking
// package here. We also don't test for integrity, as we never go from legacy
// coinbases to RUSK ones - the server only receives it and never sends it.
// So instead, we just do a simple check of values.
func TestCoinbaseIntegrity(t *testing.T) {
tx := transactions.RandDistributeTx(100, 5)
rtx := new(rusk.Transaction)
transactions.MTransaction(rtx, tx)
legacyTx, err := legacy.RuskDistributeToCoinbase(rtx)
assert.NoError(t, err)
reward := binary.LittleEndian.Uint64(tx.Payload.CallData)
assert.Equal(t, uint64(100), reward)
assert.Equal(t, uint64(100), legacyTx.Rewards[0].EncryptedAmount.BigInt().Uint64())
assert.Equal(t, tx.Payload.Notes[0].PkR, legacyTx.Rewards[0].EncryptedMask.Bytes())
}
func TestBlockIntegrity(t *testing.T) {
s := setupRuskMock(t)
defer cleanup(s)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
c, _ := client.CreateStakeClient(ctx, "localhost:10000")
// Create a valid stake to put in the block.
resp, err := c.NewStake(ctx, &rusk.StakeTransactionRequest{
Value: 100,
PublicKeyBls: make([]byte, 129),
})
assert.NoError(t, err)
tx := transactions.NewTransaction()
transactions.UTransaction(resp, tx)
// Randomize block and replace txs
blk := helper.RandomBlock(2, 5)
blk.Txs = []transactions.ContractCall{tx}
ob, err := legacy.NewBlockToOldBlock(blk)
assert.NoError(t, err)
blk2, err := legacy.OldBlockToNewBlock(ob)
assert.NoError(t, err)
assert.True(t, blk.Equals(blk2))
}
func setupRuskMock(t *testing.T) *ruskmock.Server {
c := config.Registry{}
// Hardcode wallet values, so that it always starts up correctly
c.Wallet.Store = "walletDB"
c.Wallet.File = "../../../devnet-wallets/wallet0.dat"
s, err := ruskmock.New(ruskmock.DefaultConfig(), c)
assert.NoError(t, err)
assert.NoError(t, s.Serve("tcp", ":10000"))
return s
}
func cleanup(s *ruskmock.Server) {
_ = s.Stop()
if err := os.RemoveAll("walletDB_2"); err != nil {
panic(err)
}
}
|
package main
import (
"bufio"
"fmt"
"io"
"math"
"os"
"strconv"
"strings"
)
func main() {
fmt.Printf("The total meal cost is %d dollars.\n", response(os.Stdin))
}
func response(input io.Reader) int64 {
in := bufio.NewReader(input)
mealC, _ := in.ReadString('\n')
tipP, _ := in.ReadString('\n')
taxP, _ := in.ReadString('\n')
getVal := func(x string) float64 {
r, _ := strconv.ParseFloat(strings.TrimSpace(x), 64)
return r
}
mealCost := getVal(mealC)
tipPercent := getVal(tipP)
taxPercent := getVal(taxP)
return round(mealCost + (mealCost * ((tipPercent + taxPercent) / 100)))
}
func round(number float64) int64 {
if number < 0 {
return int64(math.Ceil(number - 0.5))
}
return int64(math.Floor(number + 0.5))
}
|
// Copyright (c) 2017 Kuguar <licenses@kuguar.io> Author: Adrian P.K. <apk@kuguar.io>
//
// MIT License
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package repo
import (
"bytes"
"fmt"
"golang.org/x/crypto/bcrypt"
"github.com/adrianpk/fundacja/db"
"github.com/adrianpk/fundacja/logger"
"github.com/adrianpk/fundacja/models"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" // Import pq without side effect
)
// UserRepository - User repository manager.
type UserRepository struct {
DB *sqlx.DB
}
// MakeUserRepository - UserRepository constructor.
func MakeUserRepository() (UserRepository, error) {
db, err := db.GetDbx()
if err != nil {
return UserRepository{}, err
}
return UserRepository{DB: db}, nil
}
// GetAll - GetAll Users in repo.
func (repo *UserRepository) GetAll() ([]models.User, error) {
users := []models.User{}
err := repo.DB.Select(&users, "SELECT * FROM users ORDER BY first_name ASC")
return users, err
}
// Create - Persists a User in repo.
func (repo *UserRepository) Create(user *models.User) error {
user.SetID()
user.UpdatePasswordHash()
user.SetCreationValues()
tx := repo.DB.MustBegin()
userInsertSQL := "INSERT INTO users (id, username, password_hash, email, first_name, middle_names, last_name, geolocation, started_at, created_by, is_active, is_logical_deleted, created_at, updated_at) VALUES (:id, :username, :password_hash, :email, :first_name, :middle_names, :last_name, :geolocation, :started_at, :created_by, :is_active, :is_logical_deleted, :created_at, :updated_at)"
_, err := tx.NamedExec(userInsertSQL, user)
if err != nil {
return err
}
err = tx.Commit()
if err != nil {
return err
}
return err
}
// Login - Retrive a User if username/email and provided
func (repo *UserRepository) Login(user models.User) (models.User, error) {
u := models.User{}
logger.Debugf("User / Password: %s / %s", user.Username.String, user.Email.String)
err := repo.DB.Get(&u, "SELECT * FROM users WHERE username = $1 OR email=$2 LIMIT 1", user.Username, user.Email)
if err != nil {
logger.Debugf("Error 1: %s", err)
return user, err
}
// Validate password
err = bcrypt.CompareHashAndPassword([]byte(u.PasswordHash), []byte(user.Password))
if err != nil {
logger.Debugf("Error 2: %s", err)
return user, err
}
return u, nil
}
// Get - Retrive a Organization in repo by its ID.
func (repo *UserRepository) Get(id string) (models.User, error) {
u := models.User{}
err := repo.DB.Get(&u, "SELECT * FROM users WHERE id = $1", id)
if err != nil {
return u, err
}
return u, nil
}
// GetByUsername - Retrive a Organization in repo by its owner's username.
func (repo *UserRepository) GetByUsername(username string) (models.User, error) {
u := models.User{}
err := repo.DB.Get(&u, "SELECT * FROM users WHERE username = $1", username)
if err != nil {
return u, err
}
return u, nil
}
// Update - Update a user in repo.
func (repo *UserRepository) Update(user *models.User) error {
// Update password and audit values
user.SetUpdateValues()
user.UpdatePasswordHash()
// Current state
reference, err := repo.Get(user.ID.String)
if err != nil {
return err
}
// Customized query
changes := UserChanges(user, reference)
number := len(changes)
pos := 0
last := number < 2
var query bytes.Buffer
query.WriteString("UPDATE users SET ")
for field, structField := range changes {
var partial string
if last {
partial = fmt.Sprintf("%v = %v ", field, structField)
} else {
partial = fmt.Sprintf("%v = %v, ", field, structField)
}
query.WriteString(partial)
pos = pos + 1
last = pos == number-1
}
query.WriteString(fmt.Sprintf("WHERE id = '%s';", user.ID.String))
tx := repo.DB.MustBegin()
_, err = tx.NamedExec(query.String(), &user)
//logger.Debugf("Query: %s", query.String())
if err != nil {
return err
}
err = tx.Commit()
return err
}
func (repo *UserRepository) Delete(id string) error {
tx := repo.DB.MustBegin()
userDeleteSQL := fmt.Sprintf("DELETE FROM users WHERE id = '%s'", id)
_ = tx.MustExec(userDeleteSQL)
err := tx.Commit()
if err != nil {
return err
}
return nil
}
|
package entity
import (
"time"
)
const (
ATAd = 1 // 广告图片
ATHk = 2 // 关键词
ATSg = 3 // 标语
)
//应用参数
type AppParams struct {
Id int64 `json:"id"`
Name string `json:"name"`
Value string `json:"value"`
Type int `json:"type"`
Data string `json:"data"` //扩展信息
CreatedAt time.Time `xorm:"created" json:"created_at"`
UpdatedAt time.Time `xorm:"updated" json:"updated_at"`
}
//询价
type Inquiry struct {
Id int64 `json:"id"`
UserId int64 `json:"user_id"`
Model string `json:"model"`
Quantity int `json:"quantity"`
Contact string `json:"contact"`
Phone string `json:"phone"`
QqMsn string `json:"qq_msn"`
Replies int `json:"replies"` //回复数量, 冗余字段
CreatedAt time.Time `xorm:"created" json:"created_at"`
}
//询价回复
type InquiryReply struct {
Id int64 `json:"id"`
InquiryId int64 `json:"inquiry_id"`
UserId int64 `json:"user_id"`
Title string `json:"title"`
Content string `xorm:"varchar(1000)" json:"content"`
CreatedAt time.Time `xorm:"created" json:"created_at"`
}
//Migrations
type Migration struct {
Id int64 `json:"id"`
Name string `xorm:"unique" json:"name"`
Description string `json:"description"`
CreatedAt time.Time `xorm:"created" json:"created_at"`
}
// 信息反馈
type Feedback struct {
Id int64 `json:"id"`
Email string `json:"email"` //邮件
Subject string `json:"subject"` //主题
Name string `json:"name"` //称呼
Phone string `json:"phone"` //电话
Content string `xorm:"varchar(2000)" json:"content"` //内容
Tags string `json:"tags"` //标记
CreatedAt time.Time `xorm:"created" json:"created_at"`
}
|
package main
import (
"bufio"
"io"
"os"
)
const (
null = byte('\000')
)
func collectFiles(filepath string) ([]string, error) {
fin := os.Stdin
if filepath != "-" {
// 从其他文件读取
f, err := os.Open(filepath)
if err != nil {
return nil, err
}
fin = f
}
return readFiles(fin)
}
func readFiles(reader io.Reader) ([]string, error) {
files := []string{}
r := bufio.NewReader(reader)
for {
v, err := r.ReadString(null)
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
files = append(files, v[0:len(v)-1])
}
return files, nil
}
|
package v3
func init() {
registerXform(selectToIndexScan{})
}
type selectToIndexScan struct {
xformImplementation
}
func (selectToIndexScan) id() xformID {
return xformSelectToIndexScanID
}
func (selectToIndexScan) pattern() *expr {
return &expr{
op: selectOp,
children: []*expr{
&expr{ // left
op: scanOp,
},
patternTree, // filter
},
}
}
func (selectToIndexScan) apply(e *expr, results []*expr) []*expr {
// TODO(peter): Note that this logic is simplistic and incomplete. We really
// want to be translating the filters into a set of per-column constraints.
scan := e.children[0]
table := scan.private.(*table)
for i := range table.keys {
key := &table.keys[i]
// If the first column of the index is the variable used for a filter,
// output an index scan expression.
col0 := scan.props.columns[key.columns[0]]
for _, filter := range e.filters() {
// TODO(peter): this is ugly.
switch filter.op {
case eqOp:
case ltOp:
case gtOp:
case leOp:
case geOp:
break
default:
continue
}
if filter.children[0].op != variableOp ||
filter.children[1].op != constOp {
continue
}
if !filter.children[0].scalarProps.inputCols.Contains(col0.index) {
continue
}
indexScan := newIndexScanExpr(table, key, scan.props)
if !scan.props.outputCols.SubsetOf(indexScan.props.outputCols) {
primaryScan := newIndexScanExpr(table, table.getPrimaryKey(), scan.props)
var projections []*expr
for _, col := range scan.props.columns {
if indexScan.props.outputCols.Contains(col.index) {
continue
}
projections = append(projections, col.newVariableExpr(""))
}
primaryScan.addProjections(projections)
// TODO(peter): need to add a join condition on the columns of the
// primary key.
indexScan = &expr{
op: innerJoinOp,
children: []*expr{
indexScan, // left
primaryScan, // right
nil, // filter
},
props: scan.props,
}
indexScan.initProps()
}
indexScan.loc = memoLoc{group: scan.loc.group, expr: -1}
results = append(results, &expr{
op: selectOp,
children: []*expr{
indexScan,
e.children[1],
},
props: e.props,
})
}
}
return results
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
//func main() {
// var nums []int
// input := bufio.NewScanner(os.Stdin)
// input.Scan()
// //str=strings.Split(input.Text(),",")
// str := strings.ReplaceAll(input.Text(), " ", "")
// newstr := strings.ReplaceAll(str, ",", "")
// //fmt.Printf("%c",newstr[len(newstr)-1])
// nums = make([]int, len(newstr))
// for i := 0; i < len(newstr); i++ {
// nums[i], _ = strconv.Atoi(string(newstr[i]))
// //fmt.Printf("%T", nums[i])
// }
// fmt.Println(nums)
//}
//func main() {
// reader:=bufio.NewReader(os.Stdin)
// input,_:=reader.ReadString('\n') //ReadString 需要处理结尾的换行符
// str:=strings.ReplaceAll(input," ","")
// newstr:=strings.ReplaceAll(str,",","")
//
// nums:=make([]int,len(newstr))
// for i:=0;i<len(newstr);i++{
// nums[i],_=strconv.Atoi(string(newstr[i]))
// //fmt.Printf("%T",nums[i])
// }
// fmt.Println(nums)
//}
func main() {
var nums []int
input := bufio.NewScanner(os.Stdin)
input.Scan()
str1 := strings.Split(input.Text(), " ")
//str := strings.ReplaceAll(input.Text(), " ", "")
for i := 0; i < len(str1); i++ {
newstr := strings.Split(str1[i], ",")
num1, _ := strconv.Atoi(newstr[0])
num2, _ := strconv.Atoi(newstr[1])
nums = append(nums, num1, num2)
}
fmt.Println(nums)
}
|
package main
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"github.com/BurntSushi/toml"
"github.com/spf13/cobra"
"github.com/HalalChain/qitmeer-cli/rpc/client"
)
// Config cli config file
type Config struct {
ConfigFile string
SimNet bool
TestNet bool
RPC *client.Config
}
var cfg = &Config{
RPC: &client.Config{}}
var rpcClient *client.RPCClient
var rootCmd = &cobra.Command{
Use: "cli",
Long: `cli is a RPC tool for noxd`,
PersistentPreRunE: makeConfigFile,
}
func init() {
//flags
rootCmd.PersistentFlags().StringVar(&cfg.ConfigFile, "conf", "config.toml", "RPC username")
rootCmd.PersistentFlags().StringVarP(&cfg.RPC.RPCUser, "user", "u", "", "RPC username")
rootCmd.PersistentFlags().StringVarP(&cfg.RPC.RPCPassword, "password", "P", "", "RPC password")
rootCmd.PersistentFlags().StringVarP(&cfg.RPC.RPCServer, "server", "s", "127.0.0.1:18131", "RPC server to connect to")
rootCmd.PersistentFlags().StringVar(&cfg.RPC.RPCCert, "c", "", "RPC server certificate file path")
rootCmd.PersistentFlags().BoolVar(&cfg.RPC.NoTLS, "notls", true, "Do not verify tls certificates (not recommended!)")
rootCmd.PersistentFlags().BoolVar(&cfg.RPC.TLSSkipVerify, "skipverify", true, "Do not verify tls certificates (not recommended!)")
rootCmd.PersistentFlags().StringVar(&cfg.RPC.Proxy, "proxy", "", "Connect via SOCKS5 proxy (eg. 127.0.0.1:9050)")
rootCmd.PersistentFlags().StringVar(&cfg.RPC.ProxyUser, "proxyuser", "", "Username for proxy server")
rootCmd.PersistentFlags().StringVar(&cfg.RPC.ProxyPass, "proxypass", "", "Password for proxy server")
rootCmd.PersistentFlags().BoolVar(&cfg.TestNet, "testnet", false, "Connect to testnet")
rootCmd.PersistentFlags().BoolVar(&cfg.SimNet, "simnet", false, "Connect to the simulation test network")
//cmds
rootCmd.AddCommand(GenerateCmd)
rootCmd.AddCommand(GetBlockCountCmd)
rootCmd.AddCommand(GetBlockTemplateCmd)
rootCmd.AddCommand(GetBlockHashCmd)
rootCmd.AddCommand(GetBlockCmd)
rootCmd.AddCommand(GetMempoolCmd)
rootCmd.AddCommand(GetRawTransactionCmd)
rootCmd.AddCommand(CreateRawTransactionCmd)
rootCmd.AddCommand(DecodeRawTransactionCmd)
rootCmd.AddCommand(SendRawTransactionCmd)
rootCmd.AddCommand(TxSignCmd)
rootCmd.AddCommand(GetUtxoCmd)
}
//
func makeConfigFile(cmd *cobra.Command, args []string) error {
cfg2 := &Config{}
_, decodeErr := toml.DecodeFile(cfg.ConfigFile, cfg2)
if decodeErr != nil {
fmt.Println("config file err:", decodeErr)
} else {
if !cmd.Flag("user").Changed {
cfg.RPC.RPCUser = cfg2.RPC.RPCUser
}
if !cmd.Flag("password").Changed {
cfg.RPC.RPCPassword = cfg2.RPC.RPCPassword
}
if !cmd.Flag("server").Changed {
cfg.RPC.RPCServer = cfg2.RPC.RPCServer
}
}
//params.MainNetParams.DefaultPort
// preCfg := cfg
// Multiple networks can't be selected simultaneously.
numNets := 0
if cfg.TestNet {
numNets++
}
if cfg.SimNet {
numNets++
}
if numNets > 1 {
return fmt.Errorf("loadConfig: %s", "one of the testnet and simnet")
}
//save
buf := new(bytes.Buffer)
if err := toml.NewEncoder(buf).Encode(cfg); err != nil {
log.Fatal(err)
}
err := ioutil.WriteFile(cfg.ConfigFile, buf.Bytes(), 0666)
if err != nil {
return err
}
rpcClient, err = client.NewRPCClient(cfg.RPC)
if err != nil {
return err
}
return nil
}
func main() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
return
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codegen
import (
"fmt"
"strings"
"llvm/bindings/go/llvm"
)
func (b *Builder) val(ty Type, val llvm.Value) *Value {
// if a, b := val.Type().String(), ty.llvmTy().String(); a != b {
// fail("Value type mismatch: value: %v, type: %v", a, b)
// }
return &Value{ty, val, val.Name(), b}
}
// Value represents a value.
type Value struct {
ty Type
llvm llvm.Value
name string
b *Builder
}
// Type returns the type of the value.
func (v *Value) Type() Type { return v.ty }
// Name returns the value's name.
func (v *Value) Name() string { return v.name }
// SetName assigns a name to the value.
func (v *Value) SetName(name string) *Value {
v.name = name
if IsPointer(v.ty) {
name = "→" + name
}
v.llvm.SetName(name)
return v
}
// EmitDebug emits debug info for the value.
func (v *Value) EmitDebug(name string) *Value {
v.b.dbgEmitValue(v, name)
return v
}
// Load loads the element from the pointer value.
func (v *Value) Load() *Value {
if !IsPointer(v.ty) {
fail("Load must be from a pointer. Got %v", v.Type())
}
elTy := v.Type().(Pointer).Element
return v.b.val(elTy, v.b.llvm.CreateLoad(v.llvm, v.name))
}
// LoadUnaligned loads the element from the pointer value, it allows the loaded
// value to be unaligned
func (v *Value) LoadUnaligned() *Value {
if !IsPointer(v.ty) {
fail("Load must be from a pointer. Got %v", v.Type)
}
elTy := v.Type().(Pointer).Element
load := v.b.llvm.CreateLoad(v.llvm, v.name)
load.SetAlignment(1)
return v.b.val(elTy, load)
}
// StoreUnaligned stores val to the pointer ptr. It assumes that the
// destination address may be unaligned
func (v *Value) StoreUnaligned(val *Value) {
if !IsPointer(v.ty) {
fail("Store must be to a pointer. Got %v", v.Type)
}
elTy := v.Type().(Pointer).Element
if val.ty.String() != elTy.String() {
fail("Attempted to store value of type %v to pointer element type %v",
val.ty.TypeName(), elTy.TypeName())
}
store := v.b.llvm.CreateStore(val.llvm, v.llvm)
store.SetAlignment(1)
}
// Store stores val to the pointer ptr.
func (v *Value) Store(val *Value) {
if !IsPointer(v.ty) {
fail("Store must be to a pointer. Got %v", v.Type)
}
elTy := v.Type().(Pointer).Element
if val.ty.String() != elTy.String() {
fail("Attempted to store value of type %v to pointer element type %v",
val.ty.TypeName(), elTy.TypeName())
}
v.b.llvm.CreateStore(val.llvm, v.llvm)
}
func field(s *Struct, f IndexOrName) (Field, int) {
var i int
switch f := f.(type) {
case int:
i = f
case string:
var ok bool
if i, ok = s.fieldIndices[f]; !ok {
fail("%v does not contain a field with name '%v':\n%+v", s.TypeName(), f, s)
}
default:
fail("Attempted to index field of struct '%v' with %T. Must be int or string", s.TypeName(), f)
}
return s.fields[i], i
}
func (b *Builder) pathName(rootName string, path []ValueIndexOrName) string {
name := rootName
for _, p := range path {
switch p := p.(type) {
case int:
name = fmt.Sprintf("%v[%v]", name, p)
case string:
name = fmt.Sprintf("%v.%v", name, p)
case *Value:
name = fmt.Sprintf("%v[%v]", name, p.Name())
}
}
return name
}
func (b *Builder) path(rootTy Type, rootName string, path ...ValueIndexOrName) (indices []llvm.Value, name string, target Type) {
err := func(i int) string {
full := b.pathName(rootName, path)
okay := b.pathName(rootName, path[:i])
fail := b.pathName(rootName, path[:i+1])
pad := strings.Repeat(" ", len(okay))
highlight := strings.Repeat("^", len(fail)-len(okay))
return fmt.Sprintf("\n%v\n%v%v", full, pad, highlight)
}
target = rootTy
indices = make([]llvm.Value, len(path))
for i, p := range path {
switch t := target.(type) {
case Pointer:
if i == 0 {
switch p := p.(type) {
case int:
indices[i] = b.Scalar(uint32(p)).llvm
case *Value:
if !IsInteger(p.Type()) {
fail("Tried to index pointer with non-integer %v.%v", p.Type().TypeName(), err(i))
}
indices[i] = p.llvm
default:
fail("Tried to index pointer with %T (%v).%v", p, err(i))
}
target = t.Element
} else {
fail("Tried to index %v. Only the root pointer can be indexed.%v", target.TypeName(), err(i))
}
case *Struct:
field, idx := field(t, p)
target = field.Type
indices[i] = b.Scalar(uint32(idx)).llvm
case *Array:
switch p := p.(type) {
case int:
indices[i] = b.Scalar(uint32(p)).llvm
case *Value:
indices[i] = p.llvm
default:
fail("Tried to index array with %T.%v", p, err(i))
}
target = t.Element
default:
fail("Cannot index type %v.%v", target, err(i))
}
}
return indices, b.pathName(rootName, path), target
}
// Index returns a new pointer to the array or field element found by following
// the list of indices as specified by path.
func (v *Value) Index(path ...ValueIndexOrName) *Value {
if !IsPointer(v.ty) {
fail("Index only works with pointer value types. Got %v", v.Type().TypeName())
}
indices, name, target := v.b.path(v.Type(), v.Name(), path...)
return v.b.val(v.b.m.Types.Pointer(target), v.b.llvm.CreateGEP(v.llvm, indices, "")).SetName(name)
}
// Insert creates a copy of the struct or array v with the field/element at
// changed to val.
func (v *Value) Insert(at ValueIndexOrName, val *Value) *Value {
switch ty := v.ty.(type) {
case *Struct:
f, idx := field(ty, at)
assertTypesEqual(f.Type, val.Type())
return v.b.val(ty, v.b.llvm.CreateInsertValue(v.llvm, val.llvm, idx, "")).SetName(v.Name())
case *Array:
idx, ok := at.(int)
if !ok {
fail("Insert parameter at must be int for arrays values. Got %T", at)
}
assertTypesEqual(ty.Element, val.Type())
return v.b.val(ty, v.b.llvm.CreateInsertValue(v.llvm, val.llvm, idx, "")).SetName(v.Name())
default:
fail("Attempted to insert on non-struct and non-array type %v", v.ty.TypeName())
return nil
}
}
// Extract returns the field at extracted from the struct or array v.
func (v *Value) Extract(at IndexOrName) *Value {
switch ty := v.ty.(type) {
case *Struct:
f, idx := field(ty, at)
return v.b.val(f.Type, v.b.llvm.CreateExtractValue(v.llvm, idx, f.Name))
case *Array:
idx, ok := at.(int)
if !ok {
fail("Extract parameter at must be int for arrays values. Got %T", at)
}
return v.b.val(ty.Element, v.b.llvm.CreateExtractValue(v.llvm, idx, fmt.Sprintf("%v[%d]", v.name, idx)))
default:
fail("Attempted to extract on non-struct and non-array type %v", v.ty.TypeName())
return nil
}
}
// IsNull returns true if the pointer value v is null.
func (v *Value) IsNull() *Value {
if !IsPointer(v.ty) {
fail("IsNull only works with pointer value types. Got %v", v.Type().TypeName())
}
return v.b.val(v.b.m.Types.Bool, v.b.llvm.CreateIsNull(v.llvm, ""))
}
|
package main
import "fmt"
func main() {
//i := 0
//j := 1
flag := true
k := 1 ^ 1
fmt.Println(k)
fmt.Println(flag)
}
|
package set
type Elem int
// Set implements a basic set data structure.
type Set struct {
data map[Elem]struct{}
}
func (s *Set) init() {
if s.data == nil {
s.data = make(map[Elem]struct{})
}
}
// Len returns the number of elements in the set.
func (s *Set) Len() int {
return len(s.data)
}
// Add adds the element to the set.
func (s *Set) Add(v Elem) bool {
s.init()
_, ok := s.data[v]
if !ok {
s.data[v] = struct{}{}
}
return ok
}
// Has returns whether or not the element in the set.
func (s *Set) Has(v Elem) bool {
s.init()
_, ok := s.data[v]
return ok
}
// Del removes the element from the set.
func (s *Set) Del(v Elem) bool {
s.init()
_, ok := s.data[v]
if ok {
delete(s.data, v)
}
return ok
}
// UnionWith performs the union of the two sets into s.
func (s *Set) Unionwith(ss *Set) {
if ss == nil || len(ss.data) == 0 {
return
}
s.init()
for v := range ss.data {
s.data[v] = struct{}{}
}
}
// IntersectionWith removes all the elements from s that are not in s and ss.
func (s *Set) Intersectionwith(ss *Set) {
if ss == nil || len(ss.data) == 0 {
s.data = nil
return
}
if len(s.data) == 0 {
return
}
s.init()
for v := range s.data {
if _, ok := ss.data[v]; !ok {
delete(s.data, v)
}
}
}
// DifferenceWith sets s to all the elements not in the intersection of s and ss.
func (s *Set) DifferenceWith(ss *Set) {
if ss == nil || len(ss.data) == 0 {
return
}
s.init()
if len(s.data) == 0 {
for v := range ss.data {
s.data[v] = struct{}{}
}
return
}
for v := range ss.data {
if _, ok := s.data[v]; ok {
delete(s.data, v)
} else {
s.data[v] = struct{}{}
}
}
}
|
package email
import (
"fmt"
"regexp"
"strings"
)
var (
isHTMLRgx = regexp.MustCompile(`.*<html.*>.*`)
lineBreak = "\r\n"
)
type EmailMessage struct {
Recipients []string
BCC []string
CC []string
SenderEmail string
Subject string
Content string
}
func (msg *EmailMessage) GetContent() string {
headers := make(map[string]string)
headers["From"] = msg.SenderEmail
headers["To"] = strings.Join(msg.Recipients, ", ")
headers["Cc"] = strings.Join(msg.CC, ", ")
message := ""
for k, v := range headers {
message += fmt.Sprintf("%s: %s"+lineBreak, k, v)
}
//default mime-type is html
mime := "MIME-version: 1.0;" + lineBreak + "Content-Type: text/html; charset=\"UTF-8\";" + lineBreak + lineBreak
if !isHTML(msg.Content) {
mime = "MIME-version: 1.0;" + lineBreak + "Content-Type: text/plain; charset=\"UTF-8\";" + lineBreak + lineBreak
}
subject := "Subject: " + msg.Subject + lineBreak
message += subject + mime + lineBreak + msg.Content
return message
}
func isHTML(input string) bool {
return isHTMLRgx.MatchString(input)
}
|
package peer
import (
"fmt"
"math/rand"
"os"
"sync"
"time"
"code.cloudfoundry.org/lager"
)
type peerClient interface {
ReadLeader(logger lager.Logger, leader string) ([]Glimpse, error)
PostAndReadSnapshot(logger lager.Logger, host string) ([]Glimpse, error)
}
type Heartbeat struct {
Leader string
Peers List
Logger lager.Logger
CheckInterval time.Duration
Client peerClient
}
func (h *Heartbeat) RunHeartbeat(signals <-chan os.Signal, ready chan<- struct{}) error {
rand.Seed(time.Now().UnixNano())
nextInterval, _ := time.ParseDuration(fmt.Sprintf("%ds", rand.Intn(5)))
close(ready)
for {
select {
case <-signals:
return nil
case <-time.After(nextInterval):
h.check()
}
jitter := (rand.Float64() + 0.5) * h.CheckInterval.Seconds() / 2
nextInterval = time.Duration(jitter) * time.Second
h.Logger.Debug("next-interval", lager.Data{"seconds": nextInterval.Seconds()})
}
}
func (h *Heartbeat) check() {
logger := h.Logger.Session("heartbeat")
defer logger.Debug("done")
if h.Leader != "" {
leaderLogger := logger.Session("read-leader").WithData(lager.Data{"leader": h.Leader})
leaderPeers, err := h.Client.ReadLeader(leaderLogger, h.Leader)
if err != nil {
leaderLogger.Error("get-from-leader", err)
return
}
leaderLogger.Info("get-from-leader", lager.Data{"candidate-peers": leaderPeers})
h.Peers.UpsertUntrusted(leaderLogger, leaderPeers)
}
ttlThreshhold := int(h.CheckInterval.Seconds())
candidates := h.Peers.Snapshot(logger)
wg := sync.WaitGroup{}
for _, peer := range candidates {
if peer.TTL <= ttlThreshhold || rand.Float32() > 0.5 {
wg.Add(1)
go func(peerHost string) {
defer wg.Done()
peerLogger := logger.Session("post-peer").WithData(lager.Data{"peer": peerHost})
morePeers, err := h.Client.PostAndReadSnapshot(peerLogger, peerHost)
if err != nil {
peerLogger.Error("post-to-peer", err)
return
}
peerLogger.Debug("post-to-peer")
h.Peers.Upsert(peerLogger, peerHost)
h.Peers.UpsertUntrusted(peerLogger, morePeers)
}(peer.Host)
}
}
wg.Wait()
}
|
package editorapi
import (
"editorApi/init/mgdb"
"editorApi/init/qmlog"
"editorApi/mdbmodel/editor"
"fmt"
"runtime"
"sync"
"time"
"github.com/mongodb/mongo-go-driver/mongo"
"go.mongodb.org/mongo-driver/bson"
)
var toClient *mongo.Client
var (
onLineJobsCollection string
catalogCollection string
contentVersionCollection string
courseLangsCollection string
courseInfoCollection string
contentTagsCollection string
)
func init() {
toClient = mgdb.OnlineClient
onLineJobsCollection = "online_jobs"
catalogCollection = "catalogs"
contentVersionCollection = "course_content_infos"
courseLangsCollection = "course_langs"
courseInfoCollection = "course_infos"
contentTagsCollection = "content_tags"
}
//用于复制目录以及目录下面的内容
func CopyContentVersion(msg *CatalogCopyParam) {
//设置recover,recover只能放在defer后面使用
defer func() {
if err := recover(); err != nil {
var buf [4096]byte
n := runtime.Stack(buf[:], false)
fmt.Printf("==> %s\n", string(buf[:n]))
}
}()
qmlog.QMLog.Info("开始复制版本内容", msg.Uuids[0])
var catalogs []*editor.Catalogs
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
tblCatalogs,
bson.M{
"parent_uuid": msg.Uuids[0],
"has_del": false,
},
nil,
nil,
0,
0,
&catalogs,
)
uuids := []string{}
for _, c := range catalogs {
uuids = append(uuids, c.Uuid)
}
if len(uuids) > 0 {
msg.Uuids = uuids
copyCatalogInfo(msg, false)
}
}
func PushContent(msg *PushOnlineMsg) {
if msg.DbEnv == "online" {
PushContentOnline(msg)
} else if msg.DbEnv == "test" {
PushContentTest(msg)
}
}
//上线课程内容到正式环境
func PushContentOnline(msg *PushOnlineMsg) {
//设置recover,recover只能放在defer后面使用
defer func() {
if err := recover(); err != nil {
var buf [4096]byte
n := runtime.Stack(buf[:], false)
fmt.Printf("==> %s\n", string(buf[:n]))
}
}()
qmlog.QMLog.Info("开始上线")
//更新任务状态
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
onLineJobsCollection,
bson.M{
"uuid": msg.UUID,
},
bson.M{
"$set": bson.M{
"state": 1,
},
},
false,
)
toConfig := mgdb.EnvOnline
contentVersionUuid := ""
if msg.OnlineType == "catalog" {
//当前目录以及上层目录信息到线上
catatlogUuid := msg.OnlineUUID
qmlog.QMLog.Info("开始上线父目录")
for {
var ct *editor.Catalogs
mgdb.FindOne(
mgdb.EnvEditor,
EDITOR_DB,
catalogCollection,
bson.M{
"uuid": catatlogUuid,
},
nil,
&ct,
)
if ct != nil {
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
catalogCollection,
bson.M{
"uuid": catatlogUuid,
},
bson.M{
"$set": ct,
},
true,
)
//父目录的has_changed不更新
if catatlogUuid == msg.OnlineUUID {
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
catalogCollection,
bson.M{
"uuid": catatlogUuid,
},
bson.M{
"$set": bson.M{
"has_changed": false,
},
},
false,
)
}
catatlogUuid = ct.Parent_uuid
qmlog.QMLog.Info("父目录UUID:" + catatlogUuid)
} else {
contentVersionUuid = catatlogUuid
break
}
}
qmlog.QMLog.Info("结束上线父目录")
//更新内容版本信息
updateContentVersion(
contentVersionUuid,
true,
toConfig,
)
//更新孩子目录以及内容
updateChildeCatalogs(
msg.OnlineUUID,
toConfig,
)
} else if msg.OnlineType == "content_version" {
//更新版本、语言和课程信息
contentVersionUuid = msg.OnlineUUID
updateContentVersion(
msg.OnlineUUID,
false,
toConfig,
)
//上线子目录以及内容
updateChildeCatalogs(
msg.OnlineUUID,
toConfig,
)
}
//上线课程标签
qmlog.QMLog.Info("开始上线课程标签")
var tags []*editor.Content_tags
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
contentTagsCollection,
nil,
nil,
nil,
0,
0,
&tags,
)
for _, t := range tags {
qmlog.QMLog.Info(t.Key)
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
contentTagsCollection,
bson.M{
"key": t.Key,
},
bson.M{
"$set": t,
},
true,
)
}
qmlog.QMLog.Info("结束上线课程标签")
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
"online_jobs",
bson.M{
"uuid": msg.UUID,
},
bson.M{
"$set": bson.M{
"state": 2,
},
},
false,
)
var version *editor.Course_content_infos
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
contentVersionCollection,
bson.M{"uuid": contentVersionUuid},
nil,
&version,
)
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
contentVersionCollection,
bson.M{
"uuid": contentVersionUuid,
},
bson.M{
"$set": version,
},
true,
)
qmlog.QMLog.Info("结束上线")
fmt.Println(msg)
}
//上线课程内容到测试环境
func PushContentTest(msg *PushOnlineMsg) {
//设置recover,recover只能放在defer后面使用
defer func() {
if err := recover(); err != nil {
var buf [4096]byte
n := runtime.Stack(buf[:], false)
fmt.Printf("==> %s\n", string(buf[:n]))
}
}()
qmlog.QMLog.Info("开始上线")
contentVersionUuid := ""
//更新任务状态
toConfig := mgdb.EnvTest
if msg.OnlineType == "catalog" {
//当前目录以及上层目录信息到线上
catatlogUuid := msg.OnlineUUID
qmlog.QMLog.Info("开始上线父目录")
for {
var ct *editor.Catalogs
mgdb.FindOne(
mgdb.EnvEditor,
EDITOR_DB,
catalogCollection,
bson.M{
"uuid": catatlogUuid,
},
nil,
&ct,
)
if ct != nil {
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
catalogCollection,
bson.M{
"uuid": catatlogUuid,
},
bson.M{
"$set": ct,
},
true,
)
catatlogUuid = ct.Parent_uuid
qmlog.QMLog.Info("父目录UUID:" + catatlogUuid)
} else {
contentVersionUuid = catatlogUuid
break
}
}
qmlog.QMLog.Info("结束上线父目录")
//更新内容版本信息
updateContentVersion(
contentVersionUuid,
true,
toConfig,
)
//更新孩子目录以及内容
updateChildeCatalogs(
msg.OnlineUUID,
toConfig,
)
} else if msg.OnlineType == "content_version" {
//更新版本、语言和课程信息
contentVersionUuid = msg.OnlineUUID
updateContentVersion(
msg.OnlineUUID,
false,
toConfig,
)
//上线子目录以及内容
updateChildeCatalogs(
msg.OnlineUUID,
toConfig,
)
}
//上线课程标签
qmlog.QMLog.Info("开始上线课程标签")
var tags []*editor.Content_tags
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
contentTagsCollection,
nil,
nil,
nil,
0,
0,
&tags,
)
for _, t := range tags {
qmlog.QMLog.Info(t.Key)
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
contentTagsCollection,
bson.M{
"key": t.Key,
},
bson.M{
"$set": t,
},
true,
)
}
var version *editor.Course_content_infos
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
contentVersionCollection,
bson.M{"uuid": contentVersionUuid},
nil,
&version,
)
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
contentVersionCollection,
bson.M{
"uuid": contentVersionUuid,
},
bson.M{
"$set": version,
},
true,
)
qmlog.QMLog.Info("结束上线")
fmt.Println(msg)
}
func updateContentVersion(
contentVersionUUID string,
isParent bool,
toConfig mgdb.EnvConfig,
) {
//更新内容版本信息
var version *editor.Course_content_infos
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
contentVersionCollection,
bson.M{"uuid": contentVersionUUID},
nil,
&version,
)
if version != nil {
qmlog.QMLog.Info("上线内容版本信息:" + version.Code)
// mgdb.UpdateOne(
// toConfig,
// mgdb.DbContent,
// contentVersionCollection,
// bson.M{
// "uuid": version.Uuid,
// },
// bson.M{
// "$set": version,
// },
// true,
// )
if !isParent && toConfig == mgdb.EnvOnline {
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
contentVersionCollection,
bson.M{
"uuid": version.Uuid,
},
bson.M{
"$set": bson.M{
"has_changed": false,
},
},
false,
)
}
//上线课程信息
var course bson.M
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
courseInfoCollection,
bson.M{
"uuid": version.Parent_uuid,
},
nil,
&course,
)
if course != nil {
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
courseInfoCollection,
bson.M{
"uuid": course["uuid"],
},
bson.M{
"$set": course,
},
true,
)
}
//更新语言种类
var lang *editor.Course_langs
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
courseLangsCollection,
bson.M{"lan_code": course["lan_code"]},
nil,
&lang,
)
if lang != nil {
qmlog.QMLog.Info("上线语言种类信息:" + lang.Lan_code)
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
courseLangsCollection,
bson.M{
"lan_code": lang.Lan_code,
},
bson.M{
"$set": lang,
},
true,
)
}
}
}
//更新孩子目录以及相关内容
func updateChildeCatalogs(
catalogUUID string,
toConfig mgdb.EnvConfig,
) {
qmlog.QMLog.Info("上线catalogUUID:" + catalogUUID)
var wg sync.WaitGroup
//更新孩子目录以及相关内容
var childCatalogs []*editor.Catalogs
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
catalogCollection,
bson.M{
"parent_uuid": catalogUUID,
"has_changed": true,
},
nil,
nil,
0,
0,
&childCatalogs,
)
for _, ct := range childCatalogs {
qmlog.QMLog.Info("子目录UUID:" + ct.Uuid)
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
catalogCollection,
bson.M{
"uuid": ct.Uuid,
},
bson.M{
"$set": ct,
},
true,
)
if toConfig == mgdb.EnvOnline {
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
catalogCollection,
bson.M{
"uuid": ct.Uuid,
},
bson.M{
"$set": bson.M{
"has_changed": false,
},
},
false,
)
}
if ct.Type == "catalog" {
go func(tmpUUID string) {
wg.Add(1)
defer wg.Done()
goCatalogs(
tmpUUID,
toConfig,
)
}(ct.Uuid)
} else {
var contents []bson.M
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
ct.Content_model,
bson.M{
"parent_uuid": ct.Uuid,
"has_changed": true,
},
nil,
nil,
0,
0,
&contents,
)
for _, cnt := range contents {
qmlog.QMLog.Info("内容UUID:" + cnt["uuid"].(string))
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
ct.Content_model,
bson.M{
"uuid": cnt["uuid"],
},
bson.M{
"$set": cnt,
},
true,
)
if toConfig == mgdb.EnvOnline {
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
ct.Content_model,
bson.M{
"uuid": cnt["uuid"],
},
bson.M{
"$set": bson.M{
"has_changed": false,
},
},
true,
)
}
}
}
}
wg.Wait()
qmlog.QMLog.Info("结束上线子目录以及内容")
}
func goCatalogs(
catalogUUID string,
toConfig mgdb.EnvConfig,
) {
qmlog.QMLog.Info("上线catalogUUID:" + catalogUUID)
//更新孩子目录以及相关内容
var childCatalogs []*editor.Catalogs
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
catalogCollection,
bson.M{
"parent_uuid": catalogUUID,
"has_changed": true,
},
nil,
nil,
0,
0,
&childCatalogs,
)
for _, ct := range childCatalogs {
qmlog.QMLog.Info("子目录UUID:" + ct.Uuid)
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
catalogCollection,
bson.M{
"uuid": ct.Uuid,
},
bson.M{
"$set": ct,
},
true,
)
if toConfig == mgdb.EnvOnline {
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
catalogCollection,
bson.M{
"uuid": ct.Uuid,
},
bson.M{
"$set": bson.M{
"has_changed": false,
},
},
true,
)
}
if ct.Type == "catalog" {
goCatalogs(
ct.Uuid,
toConfig,
)
} else {
var contents []bson.M
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
ct.Content_model,
bson.M{
"parent_uuid": ct.Uuid,
"has_changed": true,
},
nil,
nil,
0,
0,
&contents,
)
for _, cnt := range contents {
qmlog.QMLog.Info("内容UUID:" + cnt["uuid"].(string))
mgdb.UpdateOne(
toConfig,
mgdb.DbContent,
ct.Content_model,
bson.M{
"uuid": cnt["uuid"],
},
bson.M{
"$set": cnt,
},
true,
)
if toConfig == mgdb.EnvOnline {
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
ct.Content_model,
bson.M{
"uuid": cnt["uuid"],
},
bson.M{
"$set": bson.M{
"has_changed": false,
},
},
true,
)
}
}
}
}
}
//上线课程信息,包括语种和课程,课程属性标签
func PushOnlineCourseInfos(msg *PushOnlineCourseMsg) {
//设置recover,recover只能放在defer后面使用
defer func() {
if err := recover(); err != nil {
var buf [4096]byte
n := runtime.Stack(buf[:], false)
fmt.Printf("==> %s\n", string(buf[:n]))
}
}()
//更新任务状态
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
onLineJobsCollection,
bson.M{
"uuid": msg.UUID,
},
bson.M{
"$set": bson.M{
"state": 1,
},
},
true,
)
//上线课程信息
where := bson.M{
"code": bson.M{
"$in": msg.CourseCodes,
},
}
if len(msg.CourseCodes) == 0 {
where = nil
}
var courses []bson.M
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
courseInfoCollection,
where,
nil,
bson.M{"_id": 0},
0,
0,
&courses,
)
for _, course := range courses {
mgdb.UpdateOne(
mgdb.EnvOnline,
mgdb.DbContent,
courseInfoCollection,
bson.M{
"code": course["code"],
},
bson.M{
"$set": course,
},
true,
)
mgdb.UpdateOne(
mgdb.EnvTest,
mgdb.DbContent,
courseInfoCollection,
bson.M{
"code": course["code"],
},
bson.M{
"$set": course,
},
true,
)
//上线语言种类
var lang *editor.Course_langs
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
courseLangsCollection,
bson.M{"lan_code": course["lan_code"]},
nil,
&lang,
)
if lang != nil {
qmlog.QMLog.Info("上线语言种类信息:" + lang.Lan_code)
mgdb.UpdateOne(
mgdb.EnvOnline,
mgdb.DbContent,
courseLangsCollection,
bson.M{
"lan_code": lang.Lan_code,
},
bson.M{
"$set": lang,
},
true,
)
mgdb.UpdateOne(
mgdb.EnvTest,
mgdb.DbContent,
courseLangsCollection,
bson.M{
"lan_code": lang.Lan_code,
},
bson.M{
"$set": lang,
},
true,
)
}
}
mgdb.UpdateOne(
mgdb.EnvEditor,
mgdb.DbEditor,
"online_jobs",
bson.M{
"uuid": msg.UUID,
},
bson.M{
"$set": bson.M{
"state": 2,
},
},
false,
)
//上线课程标签
qmlog.QMLog.Info("开始上线课程标签")
var tags []*editor.Content_tags
mgdb.Find(
mgdb.EnvEditor,
mgdb.DbEditor,
contentTagsCollection,
bson.M{
"has_changed": true,
},
nil, nil, 0, 0, &tags,
)
for _, t := range tags {
var ct *editor.Catalogs
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
tblCatalogs,
bson.M{
"attr_tag": t.Key,
},
nil, &ct,
)
if ct != nil {
uuid := getVersionUUID(ct.Parent_uuid)
mgdb.UpdateOne(
mgdb.EnvOnline,
mgdb.DbContent,
tblContentInfo,
bson.M{
"uuid": uuid,
},
bson.M{
"$set": bson.M{
"update_time": time.Now().Unix(),
},
},
false,
)
mgdb.UpdateOne(
mgdb.EnvTest,
mgdb.DbContent,
tblContentInfo,
bson.M{
"uuid": uuid,
},
bson.M{
"$set": bson.M{
"update_time": time.Now().Unix(),
},
},
false,
)
}
mgdb.UpdateOne(
mgdb.EnvOnline,
mgdb.DbContent,
contentTagsCollection,
bson.M{
"key": t.Key,
},
bson.M{
"$set": t,
},
true,
)
mgdb.UpdateOne(
mgdb.EnvTest,
mgdb.DbContent,
contentTagsCollection,
bson.M{
"key": t.Key,
},
bson.M{
"$set": t,
},
true,
)
}
qmlog.QMLog.Info("结束上线课程标签")
qmlog.QMLog.Info("结束上线")
fmt.Println(msg)
}
//获取内容版本的UUID
func getVersionUUID(parentUUID string) string {
var ct *editor.Catalogs
mgdb.FindOne(
mgdb.EnvEditor,
mgdb.DbEditor,
tblCatalogs,
bson.M{
"uuid": parentUUID,
},
nil,
&ct,
)
if ct == nil {
return parentUUID
} else {
return getVersionUUID(ct.Parent_uuid)
}
return ""
}
|
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You
// may not use this file except in compliance with the License. A copy of
// the License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is
// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
// ANY KIND, either express or implied. See the License for the specific
// language governing permissions and limitations under the License.
package secretcache_test
import (
"bytes"
"errors"
"github.com/aws/aws-secretsmanager-caching-go/secretcache"
"testing"
"github.com/aws/aws-sdk-go/service/secretsmanager"
)
func TestInstantiatesClient(t *testing.T) {
secretCache, err := secretcache.New()
if err != nil || secretCache.Client == nil {
t.Fatalf("Failed to instantiate default Client")
}
}
func TestGetSecretString(t *testing.T) {
mockClient, _, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
result, err := secretCache.GetSecretString("test")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
func TestGetSecretBinary(t *testing.T) {
mockClient, _, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 1, 0, 0, 1, 1, 0}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = secretBinary
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
result, err := secretCache.GetSecretBinary("test")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result secret binary are different ")
}
}
func TestGetSecretMissing(t *testing.T) {
versionIdsToStages := make(map[string][]*string)
versionIdsToStages["01234567890123456789012345678901"] = []*string{getStrPtr("AWSCURRENT")}
mockClient := mockSecretsManagerClient{
MockedGetResult: &secretsmanager.GetSecretValueOutput{Name: getStrPtr("test")},
MockedDescribeResult: &secretsmanager.DescribeSecretOutput{VersionIdsToStages: versionIdsToStages},
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString("test")
if err == nil {
t.Fatalf("Expected to not find a SecretString in this version")
}
_, err = secretCache.GetSecretBinary("test")
if err == nil {
t.Fatalf("Expected to not find a SecretString in this version")
}
}
func TestGetSecretNoCurrent(t *testing.T) {
versionIdsToStages := make(map[string][]*string)
versionIdsToStages["01234567890123456789012345678901"] = []*string{getStrPtr("NOT_CURRENT")}
mockClient := mockSecretsManagerClient{
MockedGetResult: &secretsmanager.GetSecretValueOutput{
Name: getStrPtr("test"),
SecretString: getStrPtr("some secret string"),
VersionId: getStrPtr("01234567890123456789012345678901"),
},
MockedDescribeResult: &secretsmanager.DescribeSecretOutput{VersionIdsToStages: versionIdsToStages},
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString("test")
if err == nil {
t.Fatalf("Expected to not find secret version")
}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = []byte{0, 1, 0, 1, 0, 1, 0, 1}
_, err = secretCache.GetSecretBinary("test")
if err == nil {
t.Fatalf("Expected to not find secret version")
}
}
func TestGetSecretVersionNotFound(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
mockClient.MockedGetResult = nil
mockClient.GetSecretValueErr = errors.New("resourceNotFound")
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
_, err = secretCache.GetSecretBinary(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
}
func TestGetSecretNoVersions(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
mockClient.MockedGetResult = nil
mockClient.MockedDescribeResult.VersionIdsToStages = nil
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
_, err := secretCache.GetSecretString(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
_, err = secretCache.GetSecretBinary(secretId)
if err == nil {
t.Fatalf("Expected to not find secret version")
}
}
func TestGetSecretStringMultipleTimes(t *testing.T) {
mockClient, secretId, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
result, err := secretCache.GetSecretString(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected DescribeSecret to be called once, was called - \"%d\" times", mockClient.DescribeSecretCallCount)
}
if mockClient.GetSecretValueCallCount != 1 {
t.Fatalf("Expected GetSecretValue to be called once, was called - \"%d\" times", mockClient.GetSecretValueCallCount)
}
}
func TestGetSecretBinaryMultipleTimes(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 0, 1, 1, 1, 0, 0}
mockClient.MockedGetResult.SecretBinary = secretBinary
mockClient.MockedGetResult.SecretString = nil
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
result, err := secretCache.GetSecretBinary(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result binary are different")
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected DescribeSecret to be called once, was called - \"%d\" times", mockClient.DescribeSecretCallCount)
}
if mockClient.GetSecretValueCallCount != 1 {
t.Fatalf("Expected GetSecretValue to be called once, was called - \"%d\" times", mockClient.GetSecretValueCallCount)
}
}
func TestGetSecretStringRefresh(t *testing.T) {
mockClient, secretId, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.CacheItemTTL = 1 },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretString(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
}
func TestGetSecretBinaryRefresh(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 1, 1, 1, 1, 0, 0}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = secretBinary
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.CacheItemTTL = 1 },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretBinary(secretId)
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result secret binary are different")
}
}
}
func TestGetSecretStringWithStage(t *testing.T) {
mockClient, secretId, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretStringWithStage(secretId, "versionStage-42")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
}
func TestGetSecretBinaryWithStage(t *testing.T) {
mockClient, secretId, _ := newMockedClientWithDummyResults()
secretBinary := []byte{0, 1, 1, 0, 0, 1, 0, 1}
mockClient.MockedGetResult.SecretString = nil
mockClient.MockedGetResult.SecretBinary = secretBinary
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 10; i++ {
result, err := secretCache.GetSecretBinaryWithStage(secretId, "versionStage-42")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if !bytes.Equal(result, secretBinary) {
t.Fatalf("Expected and result secret binary are different")
}
}
}
func TestGetSecretStringMultipleNotFound(t *testing.T) {
mockClient := mockSecretsManagerClient{
GetSecretValueErr: errors.New("versionNotFound"),
DescribeSecretErr: errors.New("secretNotFound"),
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
_, err := secretCache.GetSecretStringWithStage("test", "versionStage-42")
if err == nil {
t.Fatalf("Expected error: secretNotFound for a missing secret")
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected a single call to DescribeSecret API, got %d", mockClient.DescribeSecretCallCount)
}
}
func TestGetSecretBinaryMultipleNotFound(t *testing.T) {
mockClient := mockSecretsManagerClient{
GetSecretValueErr: errors.New("versionNotFound"),
DescribeSecretErr: errors.New("secretNotFound"),
}
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
for i := 0; i < 100; i++ {
_, err := secretCache.GetSecretBinaryWithStage("test", "versionStage-42")
if err == nil {
t.Fatalf("Expected error: secretNotFound for a missing secret")
}
}
if mockClient.DescribeSecretCallCount != 1 {
t.Fatalf("Expected a single call to DescribeSecret API, got %d", mockClient.DescribeSecretCallCount)
}
}
func TestGetSecretVersionStageEmpty(t *testing.T) {
mockClient, _, secretString := newMockedClientWithDummyResults()
secretCache, _ := secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
)
result, err := secretCache.GetSecretStringWithStage("test", "")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
//New cache for new config
secretCache, _ = secretcache.New(
func(c *secretcache.Cache) { c.Client = &mockClient },
func(c *secretcache.Cache) { c.CacheConfig.VersionStage = "" },
)
result, err = secretCache.GetSecretStringWithStage("test", "")
if err != nil {
t.Fatalf("Unexpected error - %s", err.Error())
}
if result != secretString {
t.Fatalf("Expected and result secret string are different - \"%s\", \"%s\"", secretString, result)
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cli
import (
"context"
"fmt"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/stretchr/testify/assert"
)
func TestTestServerArgsForTransientCluster(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
stickyEnginesRegistry := server.NewStickyInMemEnginesRegistry()
testCases := []struct {
nodeID roachpb.NodeID
joinAddr string
sqlPoolMemorySize int64
cacheSize int64
expected base.TestServerArgs
}{
{
nodeID: roachpb.NodeID(1),
joinAddr: "127.0.0.1",
sqlPoolMemorySize: 2 << 10,
cacheSize: 1 << 10,
expected: base.TestServerArgs{
PartOfCluster: true,
JoinAddr: "127.0.0.1",
DisableTLSForHTTP: true,
SQLAddr: ":1234",
HTTPAddr: ":4567",
SQLMemoryPoolSize: 2 << 10,
CacheSize: 1 << 10,
NoAutoInitializeCluster: true,
TenantAddr: new(string),
EnableDemoLoginEndpoint: true,
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyEngineRegistry: stickyEnginesRegistry,
},
},
},
},
{
nodeID: roachpb.NodeID(3),
joinAddr: "127.0.0.1",
sqlPoolMemorySize: 4 << 10,
cacheSize: 4 << 10,
expected: base.TestServerArgs{
PartOfCluster: true,
JoinAddr: "127.0.0.1",
SQLAddr: ":1236",
HTTPAddr: ":4569",
DisableTLSForHTTP: true,
SQLMemoryPoolSize: 4 << 10,
CacheSize: 4 << 10,
NoAutoInitializeCluster: true,
TenantAddr: new(string),
EnableDemoLoginEndpoint: true,
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
StickyEngineRegistry: stickyEnginesRegistry,
},
},
},
},
}
for i, tc := range testCases {
t.Run(fmt.Sprint(i), func(t *testing.T) {
demoCtxTemp := demoCtx
demoCtx.sqlPoolMemorySize = tc.sqlPoolMemorySize
demoCtx.cacheSize = tc.cacheSize
actual := testServerArgsForTransientCluster(unixSocketDetails{}, tc.nodeID, tc.joinAddr, "", 1234, 4567, stickyEnginesRegistry)
stopper := actual.Stopper
defer stopper.Stop(context.Background())
assert.Len(t, actual.StoreSpecs, 1)
assert.Equal(
t,
fmt.Sprintf("demo-node%d", tc.nodeID),
actual.StoreSpecs[0].StickyInMemoryEngineID,
)
// We cannot compare these.
actual.Stopper = nil
actual.StoreSpecs = nil
assert.Equal(t, tc.expected, actual)
// Restore demoCtx state after each test.
demoCtx = demoCtxTemp
})
}
}
|
package headlysis
type Output struct {
Headlysis []UrlAnalysisOutput `json:"headlysis"`
}
type UrlAnalysisOutput struct {
Url string `json:"target_url"`
PresentHeaders []PresentHeader `json:"present_security_headers"`
NotPresentHeaders []NotPresentHeader `json:"not_present_security_header"`
}
type PresentHeader struct {
Name string `json:"header_name"`
Value string `json:"header_value"`
}
type NotPresentHeader struct {
Name string `json:"header_name"`
InfoUrl string `json:"info_url"`
}
|
package static
// sheetFileName: cfg_battle_npc.xlsx
const (
BattleNpcTypeSystem = 1 // 系统NPC
BattleNpcTypePlayer = 2 // 玩家NPC
)
|
package parser
import "fmt"
type TypeLit struct {
pos posRange
}
func (t TypeLit) Pos() Position { return t.pos }
func (t TypeLit) String() string { return "Type" }
type StructLit struct {
pos posRange
}
func (t StructLit) Pos() Position { return t.pos }
func (t StructLit) String() string { return "Struct" }
type FuncLit struct {
pos posRange
}
func (t FuncLit) Pos() Position { return t.pos }
func (t FuncLit) String() string { return "Func" }
type InterfaceLit struct {
pos posRange
}
func (t InterfaceLit) Pos() Position { return t.pos }
func (t InterfaceLit) String() string { return "Interface" }
type PackageLit struct {
pos posRange
}
func (t PackageLit) Pos() Position { return t.pos }
func (t PackageLit) String() string { return "Package" }
type ReturnLit struct {
pos posRange
}
func (t ReturnLit) Pos() Position { return t.pos }
func (t ReturnLit) String() string { return "Return" }
type Ident struct {
str string
pos posRange
}
func (t Ident) Pos() Position { return t.pos }
func (t Ident) String() string { return fmt.Sprintf("Ident %s", t.str) }
|
package routes
import (
"bytes"
"encoding/json"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
"hero/configs"
"hero/pkg/db/mysql"
"hero/pkg/logger"
"hero/utils"
"io/ioutil"
"os"
)
func Run() error {
port := configs.Get("server.port")
e := echo.New()
e.Use(middleware.Logger())
e.Use(middleware.Recover())
e.Use(middleware.CORS())
e.Use(middleware.RequestID())
if true {
e.Use(middleware.BodyDump(func(c echo.Context, reqBody, resBody []byte) {
requestID := c.Response().Header().Get(echo.HeaderXRequestID)
logger.Print(c.Request().URL, "BodyDump reqBody", requestID, string(reqBody))
logger.Print(c.Request().URL, "BodyDump resBody", requestID, string(resBody))
}))
//安全性驗證
e.Use(middleware.KeyAuth(func(key string, c echo.Context) (bool, error) {
if c.Request().Method == "GET" {
key = "Xn2r5u8x"
return true, nil
}
requestID := c.Response().Header().Get(echo.HeaderXRequestID)
body := &struct {
FbUserID string `json:"fb_user_id"`
}{}
if c.Request().Body != nil { // Read
reqBody, _ := ioutil.ReadAll(c.Request().Body)
err := json.Unmarshal(reqBody, body)
if err != nil {
logger.Print("KeyAuth", "err", err.Error())
return false, nil
}
c.Request().Body = ioutil.NopCloser(bytes.NewBuffer(reqBody)) // Reset
}
md5Key := utils.GenMd5Key(body.FbUserID)
logger.Print(c.Request().URL, "KeyAuth", requestID, body.FbUserID, key, md5Key, key == md5Key)
return key == md5Key, nil
}))
}
InitApi(e)
InitAdmin(e)
if configs.EnvPath != "local" {
port = os.Getenv("PORT")
}
return e.Start(":" + port)
}
func Shutdown() error {
logger.Print("showdown db ...")
if err:=mysql.Close();err!=nil{
return err
}
logger.Print("showdown db done.")
return nil
}
|
package main
import (
"fmt"
"encoding/csv"
"os"
"database/sql"
_ "github.com/go-sql-driver/mysql"
"strconv"
)
type Usermove struct {
utctime int `json:"utctime"`
idfa string `json:"idfa"`
geohash string `json:"geohash"`
latitude float64 `json:"latitude"`
longitude float64 `json:"longitude"`
horizontal float64 `json:"horizontal"`
userid int `json:"userid"`
hourofday int `json:"hourofday"`
lcltime string `json:"lcltime"`
}
func main() {
fmt.Printf("hello world")
f, err := os.Open("../../../../../reverse_geocoder/output.csv")
if err != nil {
fmt.Printf("error")
panic(err.Error())
}
defer f.Close()
csvr := csv.NewReader(f)
for i :=0; i <= 10; i++{
row, err := csvr.Read()
if err != nil {
fmt.Printf("error 1")
panic(err.Error())
}
fmt.Printf(row[0]+" "+row[1]+" "+row[3]+"\n")
}
db, err := sql.Open("mysql", "root:welcome123@tcp(127.0.0.1:3306)/test")
if err != nil {
panic(err.Error())
}
defer db.Close()
results, err := db.Query("SELECT utctime, idfa, geohash, latitude, longitude, horizontal, userid, hourofday, lcltime from outputv3")
if err != nil {
panic(err.Error())
}
count := 0
for results.Next() {
var usrmove Usermove
count = count + 1
if count == 10 {
break
}
err = results.Scan(&usrmove.utctime, &usrmove.idfa, &usrmove.geohash, &usrmove.latitude, &usrmove.longitude, &usrmove.horizontal, &usrmove.userid, &usrmove.hourofday, &usrmove.lcltime)
if err != nil {
panic(err.Error())
}
fmt.Printf(strconv.Itoa(usrmove.utctime)+" "+strconv.FormatFloat(usrmove.latitude,'f',2,32)+" "+strconv.FormatFloat(usrmove.longitude,'f',2,32)+"\n")
}
} |
package dao
import (
"errors"
"go-blogs-webapp/main/models"
"io/ioutil"
"log"
"time"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type BlogsDAO struct {
Server string
Database string
}
var db *mgo.Database
const (
COLLECTION = "blogs"
)
// Establish a connection to database
func (m *BlogsDAO) Connect() {
session, err := mgo.Dial(m.Server)
if err != nil {
log.Fatal(err)
}
db = session.DB(m.Database)
}
func (m *BlogsDAO) Insert(blog *models.BlogJson) error {
blog.PostDate = time.Now()
uploadErr := UploadImage(blog)
if uploadErr != nil {
return uploadErr
}
blogMeta := models.BlogMeta{ID: blog.ID, Username: blog.Username,
Description: blog.Description, PostDate: blog.PostDate}
err := db.C(COLLECTION).Insert(&blogMeta)
return err
}
func UploadImage(blog *models.BlogJson) error {
file, createErr := db.GridFS("fs").Create(blog.ID.Hex() + ".jpg")
if createErr != nil {
return createErr
}
fileBytes, readErr := ioutil.ReadFile(blog.Image)
if readErr != nil {
return readErr
}
_, writeErr := file.Write(fileBytes)
if writeErr != nil {
return writeErr
}
closeErr := file.Close()
if closeErr != nil {
return writeErr
}
return nil
}
func (m *BlogsDAO) Mock_Success_Insert(blog *models.BlogJson) error {
return nil
}
func (m *BlogsDAO) Mock_Failure_Insert(blog *models.BlogJson) error {
return errors.New("Insertion failed")
}
func (m *BlogsDAO) Find(username string) (error, []*models.BlogMeta) {
blogs := []*models.BlogMeta{}
err := db.C(COLLECTION).Find(bson.M{"Username": username}).All(&blogs)
return err, blogs
}
func RetriveImage(blog *models.BlogMeta) error {
return nil
}
|
package validator
import (
"net/http"
"github.com/gtongy/demo-echo-app/errors"
"github.com/gtongy/demo-echo-app/models"
"github.com/gtongy/demo-echo-app/mysql"
"github.com/labstack/echo"
validator "gopkg.in/go-playground/validator.v9"
)
type CustomValidator struct {
Validator *validator.Validate
}
func (cv *CustomValidator) Validate(i interface{}) error {
return cv.Validator.Struct(i)
}
func New() *validator.Validate {
return validator.New()
}
func ApiAccessTokenValidator(key string, c echo.Context) (bool, error) {
var user models.User
db := mysql.GetDB()
defer db.Close()
err := db.Where("access_token = ?", key).Find(&user).Error
if err != nil {
return false, errors.APIError.JSONErrorHandler(err, c, http.StatusUnauthorized, "Access token is invalid")
}
return true, nil
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//go:build ignore
// +build ignore
package main
import (
"bufio"
"bytes"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"text/template"
)
const includeErrno = `
#include <asm-generic/errno.h>
`
type ErrorNumber struct {
Name string
Value int
}
// TemplateParams is the data used in evaluating the template.
type TemplateParams struct {
Command string
NameToNum []ErrorNumber
NumToName []ErrorNumber
}
const header = `
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated by {{.Command}} - DO NOT EDIT.
`
var headerTmpl = template.Must(template.New("header").Parse(header))
const godefsTemplate = `
package auparse
/*
#include <asm-generic/errno.h>
*/
import "C"
// AuditErrnoToNum contains a mapping of POSIX error names to errnos (error numbers).
var AuditErrnoToNum = map[string]int{
{{- range $err := .NameToNum }}
"{{ $err.Name }}": C.{{ $err.Name }},
{{- end }}
}
// AuditErrnoToName contains a mapping of errnos (error numbers) to POSIX names.
var AuditErrnoToName = map[int]string{
{{- range $err := .NumToName }}
{{ $err.Value }}: "{{ $err.Name }}",
{{- end }}
}
`
var tmpl = template.Must(template.New("message_types").Parse(godefsTemplate))
var errnoDefRegex = regexp.MustCompile(`^#define\s+(E\w+)\s+(\w+)`)
func readErrorNumbers() ([]ErrorNumber, error) {
cmd := exec.Command("gcc", "-E", "-dD", "-")
cmd.Stdin = bytes.NewBufferString(includeErrno)
out, err := cmd.Output()
if err != nil {
return nil, err
}
errorToNum := map[string]int{}
s := bufio.NewScanner(bytes.NewReader(out))
for s.Scan() {
matches := errnoDefRegex.FindStringSubmatch(s.Text())
if len(matches) != 3 {
continue
}
errno, err := strconv.Atoi(matches[2])
if err != nil {
errorToNum[matches[1]] = -1
continue
}
errorToNum[matches[1]] = errno
}
var errnos []ErrorNumber
for name, value := range errorToNum {
errnos = append(errnos, ErrorNumber{
Name: name,
Value: value,
})
}
sort.Slice(errnos, func(i, j int) bool {
if errnos[i].Value == errnos[j].Value {
return errnos[i].Name < errnos[j].Name
}
return errnos[i].Value < errnos[j].Value
})
return errnos, nil
}
func run() error {
tmp, err := ioutil.TempDir("", "mk_audit_exit_codes")
if err != nil {
return err
}
defer os.RemoveAll(tmp)
if err := os.Chdir(tmp); err != nil {
return err
}
errnos, err := readErrorNumbers()
if err != nil {
return err
}
// Filter duplicates and sort by name.
var numToName []ErrorNumber
for _, errno := range errnos {
if errno.Value >= 0 {
numToName = append(numToName, errno)
}
}
// Create output file.
f, err := os.Create("defs.go")
if err != nil {
return err
}
defer f.Close()
// Evaluate template.
r := TemplateParams{
Command: filepath.Base(os.Args[0]),
NameToNum: errnos,
NumToName: numToName,
}
if err := tmpl.Execute(f, r); err != nil {
return err
}
output, err := exec.Command("go", "tool", "cgo", "-godefs", "defs.go").Output()
if err != nil {
return err
}
buf := new(bytes.Buffer)
if err = headerTmpl.Execute(buf, r); err != nil {
return nil
}
s := bufio.NewScanner(bytes.NewReader(output))
for s.Scan() {
if !bytes.HasPrefix(s.Bytes(), []byte("//")) {
buf.Write(s.Bytes())
buf.WriteByte('\n')
}
}
if err = ioutil.WriteFile(flagOut, buf.Bytes(), 0o644); err != nil {
return err
}
_, err = exec.Command("gofmt", "-w", "-s", flagOut).Output()
if err != nil {
return err
}
return nil
}
var flagOut string
func main() {
flag.StringVar(&flagOut, "out", "zaudit_exit_codes.go", "output file")
flag.Parse()
var err error
flagOut, err = filepath.Abs(flagOut)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
if err := run(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
|
package e2e
import (
"encoding/json"
"net/http"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tommy351/kubenvoy/test/echo"
)
func decodeResponse(r *http.Response) (*echo.Response, error) {
var res echo.Response
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&res); err != nil {
return nil, err
}
return &res, nil
}
func mustDecodeResponse(r *http.Response) *echo.Response {
data, err := decodeResponse(r)
Expect(err).NotTo(HaveOccurred())
return data
}
var _ = Describe("kds", func() {
var (
req *http.Request
res *http.Response
err error
)
BeforeEach(func() {
req, err = http.NewRequest(http.MethodGet, "http://localhost:10000", nil)
Expect(err).NotTo(HaveOccurred())
})
JustBeforeEach(func() {
res, err = http.DefaultClient.Do(req)
Expect(err).NotTo(HaveOccurred())
})
Describe("given host = single-port.echo", func() {
BeforeEach(func() {
req.Host = "single-port.echo"
})
It("should respond status 200", func() {
Expect(res.StatusCode).To(Equal(http.StatusOK))
})
It("check response", func() {
data := mustDecodeResponse(res)
Expect(data.Method).To(Equal(http.MethodGet))
Expect(data.Host).To(Equal(req.Host))
Expect(data.URL).To(Equal("/"))
})
})
Describe("given host = named-port.echo", func() {
BeforeEach(func() {
req.Host = "named-port.echo"
})
It("should respond status 200", func() {
Expect(res.StatusCode).To(Equal(http.StatusOK))
})
It("check response", func() {
data := mustDecodeResponse(res)
Expect(data.Method).To(Equal(http.MethodGet))
Expect(data.Host).To(Equal(req.Host))
Expect(data.URL).To(Equal("/"))
})
})
})
|
package search
import (
"fmt"
"testing"
)
func TestBsearch(t *testing.T) {
s := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
index, err := Bsearch(s, 8)
if err != nil {
fmt.Println(err)
}
fmt.Println("查找的数的index为:", index)
index, err = Bsearch(s, 10)
if err != nil {
fmt.Println(err)
}
}
func TestBM(t *testing.T) {
s := []byte("sbc123def456ghi")
tt := []byte("3ghi23")
BM(s, tt)
tt = []byte("ghi")
BM(s, tt)
}
func TestKMP(t *testing.T) {
s := []byte("ababazababacababab")
tt := []byte("ababab")
KMP(s, tt)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"github.com/jroimartin/gocui"
"regexp"
)
func cursorDown(g *gocui.Gui, v *gocui.View) error {
if v != nil {
cx, cy := v.Cursor()
if err := v.SetCursor(cx, cy+1); err != nil {
ox, oy := v.Origin()
if err := v.SetOrigin(ox, oy+1); err != nil {
return err
}
}
}
return nil
}
func sendScriptAutomatically(g *gocui.Gui, v *gocui.View) error {
scriptView, err := g.View("script");
if(err != nil){
return err
}
content := scriptView.ViewBuffer()
bestNode := findBestNode()
go sendScriptToRemote(g, bestNode,content)
return nil
}
func getLine(v *gocui.View) string{
_, cy := v.Cursor()
l, err := v.Line(cy)
if err != nil{
l = ""
}
return l
}
func sendScript(g *gocui.Gui, v *gocui.View) error {
scriptView, err := g.View("script");
if(err != nil){
return err
}
line := getLine(v)
var isValidHostPort = regexp.MustCompile(`(\w|\d)*[:]\d*`)
if isValidHostPort.MatchString(line){
hostPort := isValidHostPort.FindString(line)
content := scriptView.ViewBuffer()
go sendScriptToRemote(g, (nodeList.nodes[hostPort]),content)
}
return nil
}
func cursorUp(g *gocui.Gui, v *gocui.View) error {
if v != nil {
ox, oy := v.Origin()
cx, cy := v.Cursor()
if err := v.SetCursor(cx, cy-1); err != nil && oy > 0 {
if err := v.SetOrigin(ox, oy-1); err != nil {
return err
}
}
}
return nil
}
func getScriptName(g *gocui.Gui, v *gocui.View) error {
maxX, maxY := g.Size()
if v, err := g.SetView("msg", maxX/2-30, maxY/2, maxX/2+30, maxY/2+2); err != nil {
v.Editable = true
if err != gocui.ErrUnknownView {
return err
}
if _, err := g.SetCurrentView("msg"); err != nil {
return err
}
}
return nil
}
func loadScript(g *gocui.Gui, v *gocui.View) error {
var l string
var err error
_, cy := v.Cursor()
l, err = v.Line(cy)
if err != nil{
return nil
}
var scriptView *gocui.View
scriptView, _ = g.View("script")
scriptView.Clear()
fileContent, err := ioutil.ReadFile(l[0:len(l)-1])
if err != nil{
fmt.Fprintf(scriptView, "%s", "Failed to load script! Check path and try again")
}
fmt.Fprintf(scriptView, "%s", fileContent)
closeMsg(g,v)
return nil
}
func closeMsg(g *gocui.Gui, v *gocui.View) error {
if err := g.DeleteView("msg"); err != nil {
return err
}
if _, err := g.SetCurrentView("script"); err != nil {
return err
}
return nil
}
func quit(g *gocui.Gui, v *gocui.View) error {
return gocui.ErrQuit
}
func nextView(g *gocui.Gui, v *gocui.View) error {
if v == nil || v.Name() == "script" {
_, err := g.SetCurrentView("main")
return err
}
if v.Name() == "main" {
_, err := g.SetCurrentView("jobs")
return err
}
if v.Name() == "jobs" {
_, err := g.SetCurrentView("script")
return err
}
return nil
}
func layout(g *gocui.Gui) error {
maxX, maxY := g.Size()
if v, err := g.SetView("main", -1, -1, 45, maxY/2); err != nil {
if err != gocui.ErrUnknownView {
return err
}
v.Highlight = true
v.SelBgColor = gocui.ColorWhite
v.SelFgColor = gocui.ColorBlack
for _, node := range nodeList.nodes{
fmt.Fprintln(v,node.class, " ", node.hostPort, " ", node.usage)
}
if _, err := g.SetCurrentView("main"); err != nil {
return err
}
}
if v, err := g.SetView("script", 45, -1, maxX, maxY); err != nil {
if err != gocui.ErrUnknownView {
return err
}
fmt.Fprintf(v, "%s", "[Enter] to load script")
v.Editable = false
v.Wrap = true
}
if v, err := g.SetView("jobs", -1, maxY/2, 45, maxY); err != nil {
if err != gocui.ErrUnknownView {
return err
}
fmt.Fprintf(v, "%s", "jobs")
v.Editable = false
v.Wrap = true
}
return nil
}
var nodeList NodeList
func main() {
g, err := gocui.NewGui(gocui.OutputNormal)
if err != nil {
log.Panicln(err)
}
defer g.Close()
g.Cursor = true
g.SetManagerFunc(layout)
if err := keybindings(g); err != nil {
log.Panicln(err)
}
nodeList = readConfig()
go scanUsages(g)
if err := g.MainLoop(); err != nil && err != gocui.ErrQuit {
log.Panicln(err)
}
}
|
package azure
import (
"bufio"
"bytes"
"context"
"encoding/base64"
"encoding/binary"
"io"
"io/ioutil"
"path"
"strings"
blob "github.com/Azure/azure-storage-blob-go/azblob"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/grafana/tempo/tempodb/backend"
)
const (
// dir represents the char separator used by the blob virtual directory structure
dir = "/"
// max parallelism on uploads
maxParallelism = 3
)
type readerWriter struct {
cfg *Config
containerURL blob.ContainerURL
hedgedContainerURL blob.ContainerURL
}
type appendTracker struct {
Name string
}
// New gets the Azure blob container
func New(cfg *Config) (backend.RawReader, backend.RawWriter, backend.Compactor, error) {
ctx := context.Background()
container, err := GetContainer(ctx, cfg, false)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "getting storage container")
}
hedgedContainer, err := GetContainer(ctx, cfg, true)
if err != nil {
return nil, nil, nil, errors.Wrap(err, "getting hedged storage container")
}
rw := &readerWriter{
cfg: cfg,
containerURL: container,
hedgedContainerURL: hedgedContainer,
}
return rw, rw, rw, nil
}
// Write implements backend.Writer
func (rw *readerWriter) Write(ctx context.Context, name string, keypath backend.KeyPath, data io.Reader, _ int64, _ bool) error {
return rw.writer(ctx, bufio.NewReader(data), backend.ObjectFileName(keypath, name))
}
// Append implements backend.Writer
func (rw *readerWriter) Append(ctx context.Context, name string, keypath backend.KeyPath, tracker backend.AppendTracker, buffer []byte) (backend.AppendTracker, error) {
var a appendTracker
if tracker == nil {
a.Name = backend.ObjectFileName(keypath, name)
err := rw.writeAll(ctx, a.Name, buffer)
if err != nil {
return nil, err
}
} else {
a = tracker.(appendTracker)
err := rw.append(ctx, buffer, a.Name)
if err != nil {
return nil, err
}
}
return a, nil
}
// CloseAppend implements backend.Writer
func (rw *readerWriter) CloseAppend(ctx context.Context, tracker backend.AppendTracker) error {
return nil
}
// List implements backend.Reader
func (rw *readerWriter) List(ctx context.Context, keypath backend.KeyPath) ([]string, error) {
marker := blob.Marker{}
prefix := path.Join(keypath...)
if len(prefix) > 0 {
prefix = prefix + dir
}
objects := make([]string, 0)
for {
list, err := rw.containerURL.ListBlobsHierarchySegment(ctx, marker, dir, blob.ListBlobsSegmentOptions{
Prefix: prefix,
Details: blob.BlobListingDetails{},
})
if err != nil {
return objects, errors.Wrap(err, "iterating tenants")
}
marker = list.NextMarker
for _, blob := range list.Segment.BlobPrefixes {
objects = append(objects, strings.TrimPrefix(strings.TrimSuffix(blob.Name, dir), prefix))
}
// Continue iterating if we are not done.
if !marker.NotDone() {
break
}
}
return objects, nil
}
// Read implements backend.Reader
func (rw *readerWriter) Read(ctx context.Context, name string, keypath backend.KeyPath, _ bool) (io.ReadCloser, int64, error) {
span, derivedCtx := opentracing.StartSpanFromContext(ctx, "Read")
defer span.Finish()
object := backend.ObjectFileName(keypath, name)
b, err := rw.readAll(derivedCtx, object)
if err != nil {
return nil, 0, readError(err)
}
return ioutil.NopCloser(bytes.NewReader(b)), int64(len(b)), nil
}
// ReadRange implements backend.Reader
func (rw *readerWriter) ReadRange(ctx context.Context, name string, keypath backend.KeyPath, offset uint64, buffer []byte) error {
span, derivedCtx := opentracing.StartSpanFromContext(ctx, "ReadRange")
defer span.Finish()
object := backend.ObjectFileName(keypath, name)
err := rw.readRange(derivedCtx, object, int64(offset), buffer)
if err != nil {
return readError(err)
}
return nil
}
// Shutdown implements backend.Reader
func (rw *readerWriter) Shutdown() {
}
func (rw *readerWriter) writeAll(ctx context.Context, name string, b []byte) error {
err := rw.writer(ctx, bytes.NewReader(b), name)
if err != nil {
return err
}
return nil
}
func (rw *readerWriter) append(ctx context.Context, src []byte, name string) error {
appendBlobURL := rw.containerURL.NewBlockBlobURL(name)
// These helper functions convert a binary block ID to a base-64 string and vice versa
// NOTE: The blockID must be <= 64 bytes and ALL blockIDs for the block must be the same length
blockIDBinaryToBase64 := func(blockID []byte) string { return base64.StdEncoding.EncodeToString(blockID) }
blockIDIntToBase64 := func(blockID int) string {
binaryBlockID := (&[64]byte{})[:]
binary.LittleEndian.PutUint32(binaryBlockID, uint32(blockID))
return blockIDBinaryToBase64(binaryBlockID)
}
l, err := appendBlobURL.GetBlockList(ctx, blob.BlockListAll, blob.LeaseAccessConditions{})
if err != nil {
return err
}
// generate the next block id
id := blockIDIntToBase64(len(l.CommittedBlocks) + 1)
_, err = appendBlobURL.StageBlock(ctx, id, bytes.NewReader(src), blob.LeaseAccessConditions{}, nil)
if err != nil {
return err
}
base64BlockIDs := make([]string, len(l.CommittedBlocks)+1)
for i := 0; i < len(l.CommittedBlocks); i++ {
base64BlockIDs[i] = l.CommittedBlocks[i].Name
}
base64BlockIDs[len(l.CommittedBlocks)] = id
// After all the blocks are uploaded, atomically commit them to the blob.
_, err = appendBlobURL.CommitBlockList(ctx, base64BlockIDs, blob.BlobHTTPHeaders{}, blob.Metadata{}, blob.BlobAccessConditions{})
if err != nil {
return err
}
return nil
}
func (rw *readerWriter) writer(ctx context.Context, src io.Reader, name string) error {
blobURL := rw.containerURL.NewBlockBlobURL(name)
if _, err := blob.UploadStreamToBlockBlob(ctx, src, blobURL,
blob.UploadStreamToBlockBlobOptions{
BufferSize: rw.cfg.BufferSize,
MaxBuffers: rw.cfg.MaxBuffers,
},
); err != nil {
return errors.Wrapf(err, "cannot upload blob, name: %s", name)
}
return nil
}
func (rw *readerWriter) readRange(ctx context.Context, name string, offset int64, destBuffer []byte) error {
blobURL := rw.hedgedContainerURL.NewBlockBlobURL(name)
var props *blob.BlobGetPropertiesResponse
props, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{})
if err != nil {
return err
}
length := int64(len(destBuffer))
var size int64
if length > 0 && length <= props.ContentLength()-offset {
size = length
} else {
size = props.ContentLength() - offset
}
if err := blob.DownloadBlobToBuffer(context.Background(), blobURL.BlobURL, offset, size,
destBuffer, blob.DownloadFromBlobOptions{
BlockSize: blob.BlobDefaultDownloadBlockSize,
Parallelism: maxParallelism,
Progress: nil,
RetryReaderOptionsPerBlock: blob.RetryReaderOptions{
MaxRetryRequests: maxRetries,
},
},
); err != nil {
return errors.Wrapf(err, "cannot download blob, name: %s", name)
}
_, err = bytes.NewReader(destBuffer).Read(destBuffer)
if err != nil {
return err
}
return nil
}
func (rw *readerWriter) readAll(ctx context.Context, name string) ([]byte, error) {
blobURL := rw.hedgedContainerURL.NewBlockBlobURL(name)
var props *blob.BlobGetPropertiesResponse
props, err := blobURL.GetProperties(ctx, blob.BlobAccessConditions{})
if err != nil {
return nil, err
}
destBuffer := make([]byte, props.ContentLength())
if err := blob.DownloadBlobToBuffer(context.Background(), blobURL.BlobURL, 0, props.ContentLength(),
destBuffer, blob.DownloadFromBlobOptions{
BlockSize: blob.BlobDefaultDownloadBlockSize,
Parallelism: uint16(maxParallelism),
Progress: nil,
RetryReaderOptionsPerBlock: blob.RetryReaderOptions{
MaxRetryRequests: maxRetries,
},
},
); err != nil {
return nil, errors.Wrapf(err, "cannot download blob, name: %s", name)
}
return destBuffer, nil
}
func readError(err error) error {
ret, ok := err.(blob.StorageError)
if !ok {
return errors.Wrap(err, "reading storage container")
}
if ret.ServiceCode() == "BlobNotFound" {
return backend.ErrDoesNotExist
}
return errors.Wrap(err, "reading Azure blob container")
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package wifi
import (
"context"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"chromiumos/tast/common/network/ping"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/remote/network/ip"
remoteiw "chromiumos/tast/remote/network/iw"
remoteping "chromiumos/tast/remote/network/ping"
"chromiumos/tast/remote/wificell"
"chromiumos/tast/remote/wificell/hostapd"
"chromiumos/tast/remote/wificell/pcap"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: OverlappingBSSScan,
Desc: "Verifies that OBSS scan aborts and/or backs off when there is consistent outgoing traffic",
Contacts: []string{
"chromeos-wifi-champs@google.com", // WiFi oncall rotation; or http://b/new?component=893827
},
Attr: []string{"group:wificell", "wificell_func"},
Timeout: 5 * time.Minute,
ServiceDeps: []string{wificell.TFServiceName},
Fixture: "wificellFixtWithCapture",
// Skip on Marvell on 8997 platforms because of test failure post security fixes b/187853331
// Test failure is due to increased RTT time
HardwareDeps: hwdep.D(hwdep.WifiNotMarvell8997()),
})
}
func OverlappingBSSScan(ctx context.Context, s *testing.State) {
// To verify that OBSS scans will abort or back off when there's
// outgoing traffic instead of blocking it, this test samples a long
// period of pinging, and compares the maximum latency with or without
// OBSS so that we can assume that our traffic does hit some running
// scans if OBSS is enabled and it does not block the traffic too long
// which then implies scan backs off.
tf := s.FixtValue().(*wificell.TestFixture)
// Turn off power save in this test as we are using ping RTT
// as metric in this test. The default beacon interval (~100ms)
// is too large compared with our threshold/margin and we'll
// need much better resolution. Also, we don't want the timing
// of beacons to interfere with our results.
// e.g. default beacon interval is ~102ms and we might exceed
// the 100ms threshold just because we send request right
// after one beacon.
iwr := remoteiw.NewRemoteRunner(s.DUT().Conn())
clientIface, err := tf.ClientInterface(ctx)
if err != nil {
s.Fatal("Failed to get the client interface: ", err)
}
psMode, err := iwr.PowersaveMode(ctx, clientIface)
if err != nil {
s.Fatal("Failed to get the powersave mode: ", err)
}
if psMode {
defer func(ctx context.Context) {
s.Logf("Restoring power save mode to %t", psMode)
if err := iwr.SetPowersaveMode(ctx, clientIface, psMode); err != nil {
s.Errorf("Failed to restore powersave mode to %t: %v", psMode, err)
}
}(ctx)
var cancel context.CancelFunc
ctx, cancel = ctxutil.Shorten(ctx, time.Second)
defer cancel()
s.Log("Disabling power save in the test")
if err := iwr.SetPowersaveMode(ctx, clientIface, false); err != nil {
s.Fatal("Failed to turn off powersave: ", err)
}
}
// AP options with(out) OBSS scan for this test.
genAPOps := func(obss bool) []hostapd.Option {
ops := []hostapd.Option{
hostapd.Channel(6),
hostapd.Mode(hostapd.Mode80211nPure),
hostapd.HTCaps(hostapd.HTCapHT40),
}
if obss {
ops = append(ops, hostapd.OBSSInterval(10))
}
return ops
}
// setupAndPing sets up an AP with(out) OBSS scan, connects DUT to it
// and collects ping statistics. The Capturer object is also returned
// so the caller can verify the OBSS scan setting works properly.
setupAndPing := func(ctx context.Context, obss bool) (ret *ping.Result, retPcap *pcap.Capturer, retErr error) {
const (
pingInterval = 0.1 // In seconds.
pingCountOBSS = 1000 // Total 100 seconds of ping-ing.
pingCountNoOBSS = 100 // Total 10 seconds of ping-ing.
)
// Utility function for collecting errors in defer.
collectErr := func(err error) {
if err == nil {
return
}
s.Log("Error in setupAndPing: ", err)
if retErr == nil {
ret = nil
retPcap = nil
retErr = err
}
}
ap, err := tf.ConfigureAP(ctx, genAPOps(obss), nil)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to configure AP")
}
defer func(ctx context.Context) {
s.Log("Deconfiguring AP")
if err := tf.DeconfigAP(ctx, ap); err != nil {
collectErr(errors.Wrap(err, "failed to deconfig AP"))
}
}(ctx)
ctx, cancel := tf.ReserveForDeconfigAP(ctx, ap)
defer cancel()
s.Log("Connecting")
if _, err := tf.ConnectWifiAP(ctx, ap); err != nil {
return nil, nil, errors.Wrap(err, "failed to connect to WiFi")
}
defer func(ctx context.Context) {
if err := tf.CleanDisconnectWifi(ctx); err != nil {
collectErr(errors.Wrap(err, "failed to disconnect WiFi"))
}
}(ctx)
ctx, cancel = tf.ReserveForDisconnect(ctx)
defer cancel()
pr := remoteping.NewRemoteRunner(s.DUT().Conn())
var count int
var desc string
var pingLogPath string
if obss {
desc = "with OBSS scan"
count = pingCountOBSS
pingLogPath = "ping_obss_enabled.log"
} else {
desc = "without OBSS scan"
count = pingCountNoOBSS
pingLogPath = "ping_obss_disabled.log"
}
s.Logf("Pinging router %s, count=%d, interval=%fs", desc, count, pingInterval)
pingStats, err := pr.Ping(ctx, ap.ServerIP().String(), ping.Count(count),
ping.Interval(pingInterval), ping.SaveOutput(pingLogPath))
if err != nil {
return nil, nil, errors.Wrapf(err, "failed to ping router %s", desc)
}
s.Logf("Ping statistic %s: %v", desc, pingStats)
capturer, ok := tf.Capturer(ap)
if !ok {
return nil, nil, errors.New("no capturer spawned")
}
return pingStats, capturer, nil
}
// The latency thresholds in ms to match the unit of ping.Result.
const (
latencyBaseline = 100
// Dwell time for scanning is usually configured to be around 100 ms (some
// are higher, around 150 ms), since this is also the standard beacon
// interval. Tolerate spikes in latency up to 250 ms as a way of asking that
// our PHY be servicing foreground traffic regularly during background scans.
latencyMargin = 250
)
statsNoBgscan, _, err := setupAndPing(ctx, false)
if err != nil {
s.Fatal("Failed to measure latency without OBSS scan: ", err)
}
if statsNoBgscan.MaxLatency > latencyBaseline {
s.Fatalf("RTT latency is too high even without OBSS scan: %f ms > %f ms",
statsNoBgscan.MaxLatency, float64(latencyBaseline))
}
statsBgscan, capturer, err := setupAndPing(ctx, true)
if err != nil {
s.Fatal("Failed to measure latency with OBSS scan: ", err)
}
if statsBgscan.MaxLatency > statsNoBgscan.MaxLatency+latencyMargin {
s.Errorf("Significant difference in RTT due to OBSS scan: diff RTT (%f ms with OBSS - %f ms without OBSS) > %f ms",
statsBgscan.MaxLatency, statsNoBgscan.MaxLatency, float64(latencyMargin))
}
s.Log("Parsing packets to see if coexistence management frames are sent")
// Get the MAC address of DUT's WiFi interface.
ipr := ip.NewRemoteRunner(s.DUT().Conn())
mac, err := ipr.MAC(ctx, clientIface)
if err != nil {
s.Fatal("Failed to get MAC of WiFi interface: ", err)
}
pcapPath, err := capturer.PacketPath(ctx)
if err != nil {
s.Fatal("Failed to get path of packet file: ", err)
}
// Filtering coexistence management frame.
filters := []pcap.Filter{
pcap.Dot11FCSValid(),
pcap.TransmitterAddress(mac),
pcap.TypeFilter(layers.LayerTypeDot11MgmtAction,
func(layer gopacket.Layer) bool {
contents := layer.LayerContents()
// Check fixed parameter:
// contents[0]: Category = Public Action (4)
// contents[1]: Action = 20/40 BSS Coexistence Management (0)
if len(contents) < 2 {
return false
}
if contents[0] != 4 || contents[1] != 0 {
return false
}
// Parse tagged parameters to find 20/40 BSS coexistence element.
e := gopacket.NewPacket(contents[2:], layers.LayerTypeDot11InformationElement, gopacket.NoCopy)
if err := e.ErrorLayer(); err != nil {
// Malformed packet, log and skip.
s.Logf("Found malformed coexistence management frame, content=%v, err=%v", contents, err)
return false
}
for _, l := range e.Layers() {
element, ok := l.(*layers.Dot11InformationElement)
if !ok {
// Unexpected layer, log and skip the packet.
s.Log("Found unexpected layer when parsing informantion element ", l)
return false
}
if element.ID == layers.Dot11InformationElementID2040BSSCoExist {
return true
}
}
return false
},
),
}
packets, err := pcap.ReadPackets(pcapPath, filters...)
if err != nil {
s.Fatal("Failed to read packets: ", err)
}
s.Logf("Total %d packets found", len(packets))
if len(packets) == 0 {
s.Fatal("No coexistence management packet found in pcap")
}
}
|
/*
@Time : 2020/4/9 4:43 PM
*/
package main
import (
"fmt"
"log"
"net/http"
"time"
"workerqueue/workerqueue"
)
func main() {
workerQueue := workerqueue.New(10)
workerQueue.Start()
// for i in {1..4096}; do curl localhost:8000/submit-work -d name=$USER -d delay=$(expr $i % 11)s; done
http.HandleFunc("/submit-work", func(w http.ResponseWriter, r *http.Request) {
name := r.FormValue("name")
delay, _ := time.ParseDuration(r.FormValue("delay"))
work := func() {
time.Sleep(delay)
fmt.Printf("after delay %f seconds,Hello, %s!\n", delay.Seconds(), name)
}
workerQueue.SubmitWork(work)
})
log.Fatal(http.ListenAndServe(":8000", nil))
}
|
package hivesql
import (
"crypto/tls"
"crypto/x509"
"net"
)
type config struct {
addr string
user string
password string
dbName string
auth string
hiveConfig map[string]string
tlsConfig *tls.Config
}
func (c *config) turnTLS() {
if c.tlsConfig == nil {
c.tlsConfig = new(tls.Config)
rootCAs, err := x509.SystemCertPool()
if err != nil {
rootCAs = x509.NewCertPool()
}
c.tlsConfig.RootCAs = rootCAs
}
}
func (c *config) normalize() {
if c.addr == "" {
c.addr = "localhost:10000"
}
c.addr = ensureHavePort(c.addr)
if c.dbName == "" {
c.dbName = "default"
}
if c.auth == "" {
c.auth = "NONE"
}
}
func ensureHavePort(addr string) string {
if _, _, err := net.SplitHostPort(addr); err != nil {
return net.JoinHostPort(addr, "10000")
}
return addr
}
|
/*
* Copyright (c) 2016, Randy Westlund. All rights reserved.
* This code is under the BSD-2-Clause license.
*
* This file contains HTTP handlers for the application.
*/
package router
import (
"bytes"
"database/sql"
"encoding/json"
"image"
"image/jpeg"
"image/png"
"log"
"mime/multipart"
"net/http"
"net/url"
"strconv"
"github.com/gorilla/mux"
"github.com/nfnt/resize"
"github.com/rwestlund/photos/db"
"github.com/rwestlund/photos/defs"
)
// buildItemFilder takes a url.URL object from req.URL and fills an ItemFilter.
func buildItemFilter(url *url.URL) *defs.ItemFilter {
// We can ignore the error because count=0 means disabled.
var bigcount, _ = strconv.ParseUint(url.Query().Get("count"), 10, 32)
var bigskip, _ = strconv.ParseUint(url.Query().Get("skip"), 10, 32)
// Build ItemFilter from query params.
var filter = defs.ItemFilter{
Query: url.Query().Get("query"),
Count: uint32(bigcount),
Skip: uint32(bigskip),
Album: url.Query().Get("album"),
}
return &filter
}
// handlePhotos requests a list of photos.
// GET /api/photos
func handlePhotos(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
var filter = buildItemFilter(req.URL)
var photos, err = db.FetchPhotos(filter)
if err != nil {
log.Println(err)
res.WriteHeader(500)
return
}
j, e := json.Marshal(photos)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
// handlePutPhoto updates an existing photo.
// PUT /api/photos/4
func handlePutPhoto(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Decode body.
var photo defs.Photo
err = json.NewDecoder(req.Body).Decode(&photo)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var newPhoto *defs.Photo
// Update it.
newPhoto, err = db.SavePhoto(&photo)
if err == sql.ErrNoRows {
res.WriteHeader(404)
return
}
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Send it back.
j, e := json.Marshal(newPhoto)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
// handlePostPhoto creates a new photo.
// POST /api/photos
func handlePostPhoto(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Hold the first 200MB in RAM; the rest goes to temporary files.
err = req.ParseMultipartForm(200 * 1024 * 1024)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var file multipart.File
var header *multipart.FileHeader
file, header, err = req.FormFile("file")
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Decode body.
var photo = defs.Photo{
Filename: header.Filename,
Mimetype: header.Header.Get("Content-Type"),
}
// Full size image.
var photoBuff bytes.Buffer
photo.Size, err = photoBuff.ReadFrom(file)
// Create thumbnails.
var img image.Image
var imgType string
img, imgType, err = image.Decode(&photoBuff)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Create small thumbnail.
var thumb = resize.Thumbnail(800, 800, img, resize.Lanczos3)
// Create big thumbnail.
var bigThumb = resize.Thumbnail(1600, 1600, img, resize.Lanczos3)
// Put thumbnail into a byte arrays for the database.
var thumbBuff bytes.Buffer
if imgType == "jpeg" {
err = jpeg.Encode(&thumbBuff, thumb, nil)
} else if imgType == "png" {
err = png.Encode(&thumbBuff, thumb)
} else {
log.Println("Unsupported image type: " + imgType)
res.WriteHeader(400)
return
}
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Put big thumbnail into a byte array for the database.
var bigThumbBuff bytes.Buffer
if imgType == "jpeg" {
err = jpeg.Encode(&bigThumbBuff, bigThumb, nil)
} else if imgType == "png" {
err = png.Encode(&bigThumbBuff, bigThumb)
} else {
log.Println("Unsupported image type: " + imgType)
res.WriteHeader(400)
return
}
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Add albums
var albumsString = req.FormValue("albums")
err = json.Unmarshal([]byte(albumsString), &photo.Albums)
if err != nil {
log.Println(err)
log.Println(photo.Albums)
}
var newPhoto *defs.Photo
// Fill in the currently logged-in user as the author.
photo.AuthorID = usr.ID
newPhoto, err = db.CreatePhoto(&photo, photoBuff.Bytes(),
thumbBuff.Bytes(), bigThumbBuff.Bytes())
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Send it back.
j, e := json.Marshal(newPhoto)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
// handlePhoto requests a specific photo.
// GET /api/photo/3
func handlePhoto(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Get id parameter.
bigid, err := strconv.ParseUint(mux.Vars(req)["id"], 10, 32)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var id = uint32(bigid)
var photo *defs.Photo
photo, err = db.FetchPhoto(id)
if err == sql.ErrNoRows {
res.WriteHeader(404)
return
} else if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
j, e := json.Marshal(photo)
if e != nil {
log.Println(err)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
// handlePhotoImage requests a specific photo image.
// GET /api/photo/3/image
func handlePhotoImage(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Content-Type", "application/binary")
// Get id parameter.
bigid, err := strconv.ParseUint(mux.Vars(req)["id"], 10, 32)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var id = uint32(bigid)
var image []byte
image, err = db.FetchPhotoImage(id)
if err == sql.ErrNoRows {
res.WriteHeader(404)
return
} else if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
// If we made it here, send good response.
res.Write(image)
}
// handlePhotoThumbnail requests a specific photo.
// GET /api/photo/3/thumbnail
func handlePhotoThumbnail(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Content-Type", "application/binary")
// Get id parameter.
bigid, err := strconv.ParseUint(mux.Vars(req)["id"], 10, 32)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var id = uint32(bigid)
var image []byte
image, err = db.FetchPhotoThumbnail(id)
if err == sql.ErrNoRows {
res.WriteHeader(404)
return
} else if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
// If we made it here, send good response.
res.Write(image)
}
// handlePhotoBigThumbnail requests a specific photo's big thumbnail.
// GET /api/photo/3/big_thumbnail
func handlePhotoBigThumbnail(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Content-Type", "application/binary")
// Get id parameter.
bigid, err := strconv.ParseUint(mux.Vars(req)["id"], 10, 32)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var id = uint32(bigid)
var image []byte
image, err = db.FetchPhotoBigThumbnail(id)
if err == sql.ErrNoRows {
res.WriteHeader(404)
return
} else if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
// If we made it here, send good response.
res.Write(image)
}
// handleDeletePhoto deletes a photo by id.
// DELETE /api/photos/4
func handleDeletePhoto(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Get id parameter.
var bigid uint64
bigid, err = strconv.ParseUint(mux.Vars(req)["id"], 10, 32)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var photoID = uint32(bigid)
err = db.DeletePhoto(photoID)
if err == sql.ErrNoRows {
res.WriteHeader(404)
return
}
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// If we made it here, send good response.
res.WriteHeader(200)
}
// handleUsers requests a list of users.
// GET /api/users
func handleUsers(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
var filter = buildItemFilter(req.URL)
var users *[]defs.User
users, err = db.FetchUsers(filter)
if err != nil {
log.Println(err)
res.WriteHeader(500)
return
}
j, e := json.Marshal(users)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
// handlePostOrPutUser receive a new user to create.
// POST /api/users or PUT /api/users/4
// Example: { email: ..., role: ... }
func handlePostOrPutUser(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Decode body.
var user defs.User
err = json.NewDecoder(req.Body).Decode(&user)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var newUser *defs.User
// Update a user in the database.
if req.Method == "PUT" {
// Get id parameter.
bigid, err := strconv.ParseUint(mux.Vars(req)["id"], 10, 32)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var id = uint32(bigid)
newUser, err = db.UpdateUser(id, &user)
// Create new user in DB.
} else {
newUser, err = db.CreateUser(&user)
}
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Send it back.
j, e := json.Marshal(newUser)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
// handleDeleteUser deletes a user by id.
// DELETE /api/users/4
func handleDeleteUser(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Get id parameter.
var bigid uint64
bigid, err = strconv.ParseUint(mux.Vars(req)["id"], 10, 32)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var id = uint32(bigid)
err = db.DeleteUser(id)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// If we made it here, send good response.
res.WriteHeader(200)
}
func handleGetAlbums(res http.ResponseWriter, req *http.Request) {
var albums, err = db.FetchAlbums()
if err != nil {
log.Println(err)
res.WriteHeader(500)
return
}
j, e := json.Marshal(albums)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
func handleGetAlbum(res http.ResponseWriter, req *http.Request) {
// Get name parameter.
var album, err = db.FetchAlbum(mux.Vars(req)["albumName"])
if err == sql.ErrNoRows {
res.WriteHeader(404)
return
}
if err != nil {
log.Println(err)
res.WriteHeader(500)
return
}
j, e := json.Marshal(album)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
func handlePostAlbums(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Decode body.
var album defs.Album
err = json.NewDecoder(req.Body).Decode(&album)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var newAlbum *defs.Album
// Create new album in DB.
newAlbum, err = db.CreateAlbum(&album)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Send it back.
j, e := json.Marshal(newAlbum)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
// handlePutAlbums
func handlePutAlbums(res http.ResponseWriter, req *http.Request) {
// Access control.
var usr, err = checkAuth(res, req)
if err != nil {
res.WriteHeader(500)
log.Println(err)
return
}
if usr == nil {
res.WriteHeader(401)
return
}
if usr.Role != "Admin" {
res.WriteHeader(403)
return
}
res.Header().Set("Content-Type", "application/json; charset=UTF-8")
// Decode body.
var album defs.Album
err = json.NewDecoder(req.Body).Decode(&album)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
var newAlbum *defs.Album
// Update an album in the database.
// Get name parameter.
var params = mux.Vars(req)
newAlbum, err = db.UpdateAlbum(params["name"], &album)
if err != nil {
log.Println(err)
res.WriteHeader(400)
return
}
// Send it back.
j, e := json.Marshal(newAlbum)
if e != nil {
log.Println(e)
res.WriteHeader(500)
return
}
// If we made it here, send good response.
res.Write(j)
}
|
package router
import "sync"
type IRunnable interface {
Run() error
OnError(err error)
}
type RunnerQueue struct {
started bool
statusMutex *sync.Mutex
n int
queueLen int
queue chan IRunnable
}
func NewRunnerQueue(n, queueLen int) (r RunnerQueue) {
r.n = n
r.queueLen = queueLen
r.started = false
r.statusMutex = &sync.Mutex{}
return
}
func (rq *RunnerQueue) Start() {
rq.statusMutex.Lock()
defer rq.statusMutex.Unlock()
rq.started = true
rq.queue = make(chan IRunnable, rq.queueLen)
for i := 0; i < rq.n; i++ {
go func() {
for {
ir := <- rq.queue
ir.OnError(ir.Run())
}
}()
}
}
func (rq *RunnerQueue) Close() {
close(rq.queue)
}
func (rq *RunnerQueue) Put(r IRunnable) {
rq.queue <- r
}
func (rq *RunnerQueue) IsStarted() bool {
rq.statusMutex.Lock()
defer rq.statusMutex.Unlock()
return rq.started
}
var runnerQueue = NewRunnerQueue(10, 1000)
func InitRunnerQueue(n, queueLen int) {
runnerQueue = NewRunnerQueue(n, queueLen)
}
func Enqueue(r IRunnable) {
if !runnerQueue.IsStarted() {
runnerQueue.Start()
}
runnerQueue.Put(r)
} |
package Graph
import (
GE "GoGraph/Edge"
GV "GoGraph/Vertex"
"fmt"
)
type Graph struct {
AdjList []*GV.Vertex
AdjMatr [][]float32
UndirectedEdges []*GE.UndirectedEdge
}
//This method adds vertex to graph.
//The vertex is just an isolated vertex.
func (graph *Graph) AddVertex(v *GV.Vertex) {
graph.AdjList = append(graph.AdjList, v)
}
//Add slice of vertices
func (graph *Graph) AddVertices(vs []*GV.Vertex) {
for i := 0; i < len(vs); i++ {
graph.AdjList = append(graph.AdjList, vs[i])
}
}
//This method adds an undirected edge to an undirected graph.
//The method updates AdjList of endPoint of e.
func (graph *Graph) AddUndirectedEdge(e *GE.UndirectedEdge) {
e.U.AddAdjList(e.V)
e.V.AddAdjList(e.U)
graph.UndirectedEdges = append(graph.UndirectedEdges, e)
}
//add slice of edges
func (graph *Graph) AddUndirectedEdges(es []*GE.UndirectedEdge) {
for i := 0; i < len(es); i++ {
es[i].U.AddAdjList(es[i].V)
es[i].V.AddAdjList(es[i].U)
graph.UndirectedEdges = append(graph.UndirectedEdges, es[i])
}
}
//This method adds a directed edge to a directed graph.
//The method updates AdjList of endPoint of e.
func (graph *Graph) AddDirectedEdge(e *GE.DirectedEdge) {
e.FromVertex.AddAdjList(e.ToVertex)
}
func (graph *Graph) PrintGraph() {
length := len(graph.AdjList)
for i := 0; i < length; i++ {
l := len(graph.AdjList[i].AdjList)
fmt.Println(graph.AdjList[i].Id)
for j := 0; j < l; j++ {
fmt.Print(graph.AdjList[i].AdjList[j].Id)
}
fmt.Println()
}
}
func (graph *Graph) GetAdjMatrix() [][]float32 {
N := len(graph.AdjList)
//var adjMatr [][]int
for i := 0; i < N; i++ {
var array []float32
for j := 0; j < N; j++ {
array = append(array, 0)
}
graph.AdjMatr = append(graph.AdjMatr, array)
}
for i := 0; i < N; i++ {
for j := 0; j < N; j++ {
graph.AdjMatr[i][j] = 0
}
}
// for i := 0; i < N; i++ {
// v := graph.AdjList[i]
// length := len(v.AdjList)
// for j := 0; j < length; j++ {
// u := v.AdjList[j]
// adjMatr[v.Id-1][u.Id-1] = 1
// adjMatr[u.Id-1][v.Id-1] = 1
// }
// }
for _, e := range graph.UndirectedEdges {
graph.AdjMatr[e.U.Id-1][e.V.Id-1] = e.Weight
graph.AdjMatr[e.V.Id-1][e.U.Id-1] = e.Weight
}
return graph.AdjMatr
}
|
package main
import (
"testing"
)
func TestSmoke(t *testing.T) {
if false {
t.Errorf("smoke test")
}
}
func TestFirstNumber(t *testing.T) {
numbers := fizzbuzz()
got := numbers[0]
expected := "1"
if expected != got {
t.Errorf("expected:%s got:%s", expected, got)
}
}
func TestThreeIsFizz(t *testing.T) {
numbers := fizzbuzz()
got := numbers[2] // third number
expected := "fizz"
if expected != got {
t.Errorf("expected:%s got:%s", expected, got)
}
}
func TestTotalCount(t *testing.T) {
numbers := fizzbuzz()
got := len(numbers)
expected := 10
if expected != got {
t.Errorf("expected:%d got:%d", expected, got)
}
}
func fizzbuzz() []string {
s := make([]string, 10)
s[0] = "1"
s[2] = "fizz"
// s := []string{
// "1",
// "1",
// "fizz",
// "fizz",
// "fizz",
// "fizz",
// "fizz",
// "fizz",
// "fizz",
// "fizz",
// }
return s
}
/*
1
2
fizz
4
buzz
fizz
7
8
fizz
buzz
11
fizz
13
14
fizzbuzz
16
*/
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"os"
"strings"
"time"
"github.com/gin-gonic/gin"
)
// 全局配置信息
var (
droneScheme = os.Getenv("DRONE_SCHEME")
droneHost = os.Getenv("DRONE_HOST")
droneToken = os.Getenv("DRONE_TOKEN")
apiPrefix = os.Getenv("API_PREFIX")
apiTrigger = os.Getenv("API_TRIGGER")
corpID = os.Getenv("CORP_ID")
corpSecret = os.Getenv("CORP_SECRET")
agentID = os.Getenv("AGENT_ID")
)
// 模块加载函数
func init() {
initConfig()
initDB()
}
// 配置检测
func initConfig() {
const defaultPrefix = "/api"
if apiPrefix == "" {
apiPrefix = defaultPrefix
}
const defaultTrigger = "/drone"
if apiTrigger == "" {
apiTrigger = defaultTrigger
}
}
// 代理实例
var simpleHostProxy = httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL.Scheme = droneScheme
if req.Header.Get("Authorization") == "" && droneToken != "" {
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", droneToken))
}
req.URL.Host = droneHost
req.URL.Path = strings.Replace(req.URL.Path, fmt.Sprintf("%s%s", apiPrefix, apiTrigger), "/api", 1)
req.Host = droneHost
},
}
// 获取用户信息
func getUserInfo(accessToken string, code string) (gin.H, error) {
resp, err := http.Get(fmt.Sprintf("https://qyapi.weixin.qq.com/cgi-bin/user/getuserinfo?access_token=%s&code=%s", accessToken, code))
if err != nil {
log.Fatal(err)
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
return nil, err
}
result := gin.H{}
if err := json.Unmarshal([]byte(body), &result); err != nil {
log.Fatal(err)
return nil, err
}
return result, nil
}
// 获取企业微信access_token
func getToken() string {
tokenKey := "wwtoken"
dateFormat := "2006-01-02 15:04:05"
token := Get(tokenKey)
if token != "" {
split := strings.Split(token, "|")
expiresDate, err := time.Parse(dateFormat, split[1])
if err == nil && time.Now().Before(expiresDate) {
return split[0]
}
}
resp, err := http.Get(fmt.Sprintf("https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid=%s&corpsecret=%s", corpID, corpSecret))
if err != nil {
log.Fatal(err)
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal(err)
return ""
}
result := gin.H{}
if err := json.Unmarshal([]byte(body), &result); err != nil {
log.Fatal(err)
return ""
}
accessToken := result["access_token"].(string)
expiresIn := time.Now().Add(time.Duration(result["expires_in"].(float64)) * time.Second)
expiresFormat := expiresIn.Format(dateFormat)
tokenValue := fmt.Sprintf("%s|%s", accessToken, expiresFormat)
Set(tokenKey, tokenValue)
return accessToken
}
// 入口函数
func main() {
defer db.Close()
engine := gin.New()
engine.Static("/ui", "./ui")
engine.Static("/f7", "./f7")
vi := engine.Group(apiPrefix)
vi.GET("/login", func(c *gin.Context) {
token := getToken()
data, err := getUserInfo(token, c.Query("code"))
if err != nil {
c.JSON(http.StatusOK, gin.H{
"errcode": 500,
"errmsg": "Invalid code",
})
}
userID := data["UserId"].(string)
authToken := Get(userID)
c.Redirect(http.StatusMovedPermanently, fmt.Sprintf("/f7/?user_id=%s&auth_token=%s#!/", userID, authToken))
})
vi.Any(fmt.Sprintf("%s/*api", apiTrigger), func(c *gin.Context) {
simpleHostProxy.ServeHTTP(c.Writer, c.Request)
})
err := engine.Run(":3000")
if err != nil {
log.Println(err)
}
}
|
package text
import (
"io/ioutil"
"os"
"testing"
"github.com/aevea/quoad"
"github.com/stretchr/testify/assert"
)
func TestReleaseNotes(t *testing.T) {
notes := ReleaseNotes{Complex: true}
file, err := os.Open("../../expected-output.md")
assert.NoError(t, err)
defer file.Close()
b, err := ioutil.ReadAll(file)
assert.NoError(t, err)
expected := string(b)
sections := map[string][]quoad.Commit{
"features": []quoad.Commit{quoad.Commit{Category: "feat", Scope: "ci", Heading: "ci test", Body: "- Body"}},
"chores": []quoad.Commit{quoad.Commit{Category: "chore", Scope: "", Heading: "testing", Body: "- Body"}, quoad.Commit{Category: "improvement", Scope: "", Heading: "this should end up in chores", Issues: []int{12}}},
"bugs": []quoad.Commit{quoad.Commit{Category: "bug", Scope: "", Heading: "huge bug", Body: "Body"}},
"others": []quoad.Commit{quoad.Commit{Category: "other", Scope: "", Heading: "merge master in something"}, quoad.Commit{Category: "bs", Scope: "", Heading: "random"}},
}
releaseNotes := notes.Generate(sections, false)
assert.Equal(t, expected+"\n", "test heading"+releaseNotes)
}
func TestReleaseNotesSimple(t *testing.T) {
notes := ReleaseNotes{}
file, err := os.Open("../../expected-output-simple.md")
assert.NoError(t, err)
defer file.Close()
b, err := ioutil.ReadAll(file)
assert.NoError(t, err)
expected := string(b)
sections := map[string][]quoad.Commit{
"features": []quoad.Commit{quoad.Commit{Category: "feat", Scope: "ci", Heading: "ci test"}},
"chores": []quoad.Commit{quoad.Commit{Category: "chore", Scope: "", Heading: "testing"}, quoad.Commit{Category: "improvement", Scope: "", Heading: "this should end up in chores", Issues: []int{12}}},
"bugs": []quoad.Commit{quoad.Commit{Category: "bug", Scope: "", Heading: "huge bug"}},
"others": []quoad.Commit{quoad.Commit{Category: "other", Scope: "", Heading: "merge master in something"}, quoad.Commit{Category: "bs", Scope: "", Heading: "random"}},
}
releaseNotes := notes.Generate(sections, false)
assert.Equal(t, expected+"\n", "test heading"+releaseNotes)
}
func TestReleaseNotesWithMissingSections(t *testing.T) {
notes := ReleaseNotes{}
expected := "\n\n## :rocket: Features\n\n- 0000000 ci test\n\n"
sections := map[string][]quoad.Commit{
"features": []quoad.Commit{quoad.Commit{Heading: "ci test"}},
}
releaseNotes := notes.Generate(sections, false)
assert.Equal(t, expected, releaseNotes)
}
|
package jtlr
import (
"testing"
)
func TestPrettyPrint(t *testing.T) {
type args struct {
input string
}
tests := []struct {
name string
args args
}{
{
name: "a",
args: args{
input: `{"a": [134, 2], "b": {"a":1, "b":2}}`,
},
},
{
name: "b",
args: args{
input: `{"a": [134, {"a": 1}, true, [1, 2, 3], false], "b": {"a":1, "b":{"a":1, "b":2}}, "c": true, "d": null}`,
},
},
{
name: "c",
args: args{
input: `{"code":2,"message":"zz\u672a中"}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
PrettyPrint(tt.args.input)
})
}
}
|
package url_hash
import (
"github.com/gopherjs/gopherjs/js"
"github.com/winded/tyomaa/frontend/js/dom"
)
func Get() string {
hash := js.Global.Get("window").Get("location").Get("hash").String()
if len(hash) > 0 {
return hash[1:]
} else {
return ""
}
}
func Set(value string) {
js.Global.Get("window").Get("location").Set("hash", value)
}
func AddListener(listener func()) {
window := js.Global.Get("window")
dom.JQ(window).On("hashchange", listener)
}
|
// Go 提供了对 base64 编解码的内建支持
package main
// 这个语法引入了 encoding/base64 包, 并使用别名 b64 代替默认的 base64。
// 这样可以节省点空间
import (
b64 "encoding/base64"
"fmt"
)
func main() {
// 这是要编解码的字符串
data := "abc123!?$*&()'-=@~"
// Go 同时支持标准 base64 以及 URL 兼容 base64。
// 这是使用标准编码器进行编码的方法。
// 编码器需要一个 []byte,因此我们将 string 转换为该类型
sEnc := b64.StdEncoding.EncodeToString([]byte(data))
fmt.Println(sEnc)
// 解码可能会返回错误,如果不确定输入信息格式是否正确, 那么,你就需要进行错误检查了
sDec, _ := b64.StdEncoding.DecodeString(sEnc)
fmt.Println(string(sDec))
fmt.Println()
// 使用 URL base64 格式进行编解码
uEnc := b64.URLEncoding.EncodeToString([]byte(data))
fmt.Println(uEnc)
uDec, _ := b64.URLEncoding.DecodeString(uEnc)
fmt.Println(string(uDec))
// 标准 base64 编码和 URL base64 编码的 编码字符串存在稍许不同(后缀为 + 和 -), 但是两者都可以正确解码为原始字符串
}
|
// Package bot provides the implementation of the IRC bot.
package bot
import (
"fmt"
"log"
"regexp"
"github.com/caiofilipini/got/irc"
)
const (
// Action is the command that triggers the bot.
Action = "!got"
// WelcomeMsg is the message to be printed out when the bot is online.
WelcomeMsg = "OHAI"
// HelpCommand is the pattern for the help command.
HelpCommand = `(?i)help\s*(.*)`
)
// Command is the interface that registered commands need to
// implement in order to receive messages.
type Command interface {
// Name returns the command name.
Name() string
// Pattern returns the pattern to be matched against
// in order to check if this command should be triggered.
Pattern() *regexp.Regexp
// Help returns the help message for this command.
Help() string
// Usage returns details about how to use this command.
Usage() []string
// Run receives a query and returns a list of messages
// to be sent in response.
Run(string) []string
}
// Bot represents a running instance of the bot.
type Bot struct {
// The IRC connection.
irc irc.IRC
// The user representing the bot in the IRC channel.
user string
// The password for the IRC channel (if applicable).
passwd string
// The list of registered commands.
commands []Command
// A map where the key is the command name, and the
// value is the command itself.
commandsByName map[string]Command
// The regexp pattern that matches the action to trigger
// the bot.
action *regexp.Regexp
// The regexp pattern that matches the help command.
helpPattern *regexp.Regexp
// The channel where filtered requests are sent.
request chan string
// The channel where messages that match the configured
// action are sent.
in chan string
}
// NewBot creates and return a value representing
// a connected bot.
func NewBot(irc irc.IRC, user, passwd string) Bot {
return Bot{
irc: irc,
user: user,
passwd: passwd,
commands: make([]Command, 0),
commandsByName: make(map[string]Command),
action: regexp.MustCompile(fmt.Sprintf("PRIVMSG %s :%s (.*)", irc.Channel, Action)),
helpPattern: regexp.MustCompile(HelpCommand),
request: make(chan string),
in: make(chan string),
}
}
// Register registers the given command.
func (bot *Bot) Register(command Command) {
bot.commands = append(bot.commands, command)
bot.commandsByName[command.Name()] = command
}
// Start joins the channel, sends a welcome message and
// subscribes to messages that match the configured action.
func (bot Bot) Start() {
bot.irc.Subscribe(bot.action, bot.in)
bot.irc.Join(bot.user, bot.passwd)
bot.irc.SendMessages(WelcomeMsg)
}
// Listen starts a background process to listen to
// incoming requests.
func (bot Bot) Listen() {
go bot.handleRequests()
for msg := range bot.in {
if req := bot.action.FindStringSubmatch(msg); len(req) > 1 {
bot.request <- req[1]
}
}
}
// Shutdown closes the incoming request channels.
func (bot Bot) Shutdown() {
close(bot.in)
close(bot.request)
}
// recognise verifies if the given request is a recognised command.
// If the command is regonised, returns the command itself,
// the query part of the request, and a nil error;
// if the command is not recognised, returns an error.
func (bot Bot) recognise(request string) (Command, string, error) {
for _, c := range bot.commands {
if match := c.Pattern().FindStringSubmatch(request); len(match) > 0 {
return c, match[len(match)-1], nil
}
}
return nil, "", fmt.Errorf("Don't know how to handle \"%s\"", request)
}
// showHelp formats and sends a help message containing
// a list of all registered commands. If a command is given,
// shows the usage information for that command.
func (bot Bot) showHelp(command string) {
var helpMessages []string
if command != "" {
if c, found := bot.commandsByName[command]; found {
helpMessages = append(helpMessages, formatHelp(c.Usage()...)...)
} else {
helpMessages = append(helpMessages, "unknown command: "+command)
}
} else {
for _, c := range bot.commands {
helpMessages = append(helpMessages, formatHelp(c.Help())...)
}
helpHelp := []string{
"help – displays this message",
"help <command> – displays usage for the given command",
}
helpMessages = append(helpMessages, formatHelp(helpHelp...)...)
}
bot.irc.SendMessages(helpMessages...)
}
// handleRequests runs in the background and handles requests
// sent to the request channel.
func (bot Bot) handleRequests() {
for r := range bot.request {
info(fmt.Sprintf("Received request: %s", r))
if match := bot.helpPattern.FindStringSubmatch(r); len(match) > 0 {
command := match[len(match)-1]
bot.showHelp(command)
} else if command, query, err := bot.recognise(r); err == nil {
messages := command.Run(query)
bot.irc.SendMessages(messages...)
} else {
info(fmt.Sprintf("WARNING: %s", err.Error()))
}
}
}
// formatHelp adds the action string to all the help
// messages for clarity.
func formatHelp(messages ...string) []string {
formatted := make([]string, len(messages))
for i, m := range messages {
formatted[i] = fmt.Sprintf("%s %s", Action, m)
}
return formatted
}
// info prints the given message into the log.
func info(msg string) {
log.Printf("[Bot] %s\n", msg)
}
|
package schema
import (
"github.com/facebookincubator/ent"
"github.com/facebookincubator/ent/schema/edge"
)
// CourseItem holds the schema definition for the CourseItem entity.
type CourseItem struct {
ent.Schema
}
// Fields of the CourseItem.
func (CourseItem) Fields() []ent.Field {
return nil
}
// Edges of the CourseItem.
func (CourseItem) Edges() []ent.Edge {
return []ent.Edge{
edge.From("courses", Course.Type).
Ref("course_items").
Unique(),
edge.From("subjects", Subject.Type).
Ref("course_items").
Unique(),
edge.From("types", SubjectType.Type).
Ref("course_items").
Unique(),
}
}
|
package odoo
import (
"fmt"
)
// StockReturnPicking represents stock.return.picking model.
type StockReturnPicking struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
LocationId *Many2One `xmlrpc:"location_id,omptempty"`
MoveDestExists *Bool `xmlrpc:"move_dest_exists,omptempty"`
OriginalLocationId *Many2One `xmlrpc:"original_location_id,omptempty"`
ParentLocationId *Many2One `xmlrpc:"parent_location_id,omptempty"`
PickingId *Many2One `xmlrpc:"picking_id,omptempty"`
ProductReturnMoves *Relation `xmlrpc:"product_return_moves,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// StockReturnPickings represents array of stock.return.picking model.
type StockReturnPickings []StockReturnPicking
// StockReturnPickingModel is the odoo model name.
const StockReturnPickingModel = "stock.return.picking"
// Many2One convert StockReturnPicking to *Many2One.
func (srp *StockReturnPicking) Many2One() *Many2One {
return NewMany2One(srp.Id.Get(), "")
}
// CreateStockReturnPicking creates a new stock.return.picking model and returns its id.
func (c *Client) CreateStockReturnPicking(srp *StockReturnPicking) (int64, error) {
ids, err := c.CreateStockReturnPickings([]*StockReturnPicking{srp})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateStockReturnPicking creates a new stock.return.picking model and returns its id.
func (c *Client) CreateStockReturnPickings(srps []*StockReturnPicking) ([]int64, error) {
var vv []interface{}
for _, v := range srps {
vv = append(vv, v)
}
return c.Create(StockReturnPickingModel, vv)
}
// UpdateStockReturnPicking updates an existing stock.return.picking record.
func (c *Client) UpdateStockReturnPicking(srp *StockReturnPicking) error {
return c.UpdateStockReturnPickings([]int64{srp.Id.Get()}, srp)
}
// UpdateStockReturnPickings updates existing stock.return.picking records.
// All records (represented by ids) will be updated by srp values.
func (c *Client) UpdateStockReturnPickings(ids []int64, srp *StockReturnPicking) error {
return c.Update(StockReturnPickingModel, ids, srp)
}
// DeleteStockReturnPicking deletes an existing stock.return.picking record.
func (c *Client) DeleteStockReturnPicking(id int64) error {
return c.DeleteStockReturnPickings([]int64{id})
}
// DeleteStockReturnPickings deletes existing stock.return.picking records.
func (c *Client) DeleteStockReturnPickings(ids []int64) error {
return c.Delete(StockReturnPickingModel, ids)
}
// GetStockReturnPicking gets stock.return.picking existing record.
func (c *Client) GetStockReturnPicking(id int64) (*StockReturnPicking, error) {
srps, err := c.GetStockReturnPickings([]int64{id})
if err != nil {
return nil, err
}
if srps != nil && len(*srps) > 0 {
return &((*srps)[0]), nil
}
return nil, fmt.Errorf("id %v of stock.return.picking not found", id)
}
// GetStockReturnPickings gets stock.return.picking existing records.
func (c *Client) GetStockReturnPickings(ids []int64) (*StockReturnPickings, error) {
srps := &StockReturnPickings{}
if err := c.Read(StockReturnPickingModel, ids, nil, srps); err != nil {
return nil, err
}
return srps, nil
}
// FindStockReturnPicking finds stock.return.picking record by querying it with criteria.
func (c *Client) FindStockReturnPicking(criteria *Criteria) (*StockReturnPicking, error) {
srps := &StockReturnPickings{}
if err := c.SearchRead(StockReturnPickingModel, criteria, NewOptions().Limit(1), srps); err != nil {
return nil, err
}
if srps != nil && len(*srps) > 0 {
return &((*srps)[0]), nil
}
return nil, fmt.Errorf("stock.return.picking was not found with criteria %v", criteria)
}
// FindStockReturnPickings finds stock.return.picking records by querying it
// and filtering it with criteria and options.
func (c *Client) FindStockReturnPickings(criteria *Criteria, options *Options) (*StockReturnPickings, error) {
srps := &StockReturnPickings{}
if err := c.SearchRead(StockReturnPickingModel, criteria, options, srps); err != nil {
return nil, err
}
return srps, nil
}
// FindStockReturnPickingIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindStockReturnPickingIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(StockReturnPickingModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindStockReturnPickingId finds record id by querying it with criteria.
func (c *Client) FindStockReturnPickingId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(StockReturnPickingModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("stock.return.picking was not found with criteria %v and options %v", criteria, options)
}
|
/*
Copyright 2021 The KodeRover Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
import (
"encoding/json"
"fmt"
"go.mongodb.org/mongo-driver/bson/primitive"
"github.com/koderover/zadig/lib/microservice/aslan/config"
)
type Pipeline struct {
ID primitive.ObjectID `bson:"_id,omitempty" json:"id,omitempty"`
Name string `bson:"name" json:"name"`
Type config.PipelineType `bson:"type" json:"type"`
Enabled bool `bson:"enabled" json:"enabled"`
TeamName string `bson:"team" json:"team"`
ProductName string `bson:"product_name" json:"product_name"`
// OrgID 单租户ID
OrgID int `bson:"org_id" json:"org_id"`
// target 服务名称, k8s为容器名称, 物理机为服务名
Target string `bson:"target" json:"target"`
// 使用预定义编译管理模块中的内容生成SubTasks,
// 查询条件为 服务名称: Target, 版本: BuildModuleVer
// 如果为空,则使用pipeline自定义SubTasks
BuildModuleVer string `bson:"build_module_ver" json:"build_module_ver"`
SubTasks []map[string]interface{} `bson:"sub_tasks" json:"sub_tasks"`
Description string `bson:"description,omitempty" json:"description,omitempty"`
UpdateBy string `bson:"update_by" json:"update_by,omitempty"`
CreateTime int64 `bson:"create_time" json:"create_time,omitempty"`
UpdateTime int64 `bson:"update_time" json:"update_time,omitempty"`
Schedules ScheduleCtrl `bson:"schedules,omitempty" json:"schedules,omitempty"`
Hook *Hook `bson:"hook,omitempty" json:"hook,omitempty"`
Notifiers []string `bson:"notifiers,omitempty" json:"notifiers,omitempty"`
RunCount int `bson:"run_count,omitempty" json:"run_count,omitempty"`
DailyRunCount float32 `bson:"daily_run_count,omitempty" json:"daily_run_count,omitempty"`
PassRate float32 `bson:"pass_rate,omitempty" json:"pass_rate,omitempty"`
IsDeleted bool `bson:"is_deleted" json:"is_deleted"`
Slack *Slack `bson:"slack" json:"slack"`
NotifyCtl *NotifyCtl `bson:"notify_ctl" json:"notify_ctl"`
IsFavorite bool `bson:"-" json:"is_favorite"`
// 是否允许同时运行多次
MultiRun bool `bson:"multi_run" json:"multi_run"`
}
// Stage ...
type Stage struct {
// 注意: 同一个stage暂时不能运行不同类型的Task
TaskType config.TaskType `bson:"type" json:"type"`
Status config.Status `bson:"status" json:"status"`
RunParallel bool `bson:"run_parallel" json:"run_parallel"`
Desc string `bson:"desc,omitempty" json:"desc,omitempty"`
SubTasks map[string]map[string]interface{} `bson:"sub_tasks" json:"sub_tasks"`
AfterAll bool `json:"after_all" bson:"after_all"`
}
type Hook struct {
Enabled bool `bson:"enabled" json:"enabled"`
GitHooks []GitHook `bson:"git_hooks" json:"git_hooks,omitempty"`
}
// GitHook Events: push, pull_request
// MatchFolders: 包含目录或者文件后缀
// 以!开头的目录或者后缀名为不运行pipeline的过滤条件
type GitHook struct {
Owner string `bson:"repo_owner" json:"repo_owner"`
Repo string `bson:"repo" json:"repo"`
Branch string `bson:"branch" json:"branch"`
Events []string `bson:"events" json:"events"`
MatchFolders []string `bson:"match_folders" json:"match_folders,omitempty"`
CodehostId int `bson:"codehost_id" json:"codehost_id"`
AutoCancel bool `bson:"auto_cancel" json:"auto_cancel"`
}
type SubTask map[string]interface{}
type Preview struct {
TaskType config.TaskType `json:"type"`
Enabled bool `json:"enabled"`
}
func (sb *SubTask) ToPreview() (*Preview, error) {
var pre *Preview
if err := IToi(sb, &pre); err != nil {
return nil, fmt.Errorf("convert interface to SubTaskPreview error: %v", err)
}
return pre, nil
}
// ToBuildTask ...
func (sb *SubTask) ToBuildTask() (*Build, error) {
var task *Build
if err := IToi(sb, &task); err != nil {
return nil, fmt.Errorf("convert interface to BuildTaskV2 error: %v", err)
}
return task, nil
}
// ToTestingTask ...
func (sb *SubTask) ToTestingTask() (*Testing, error) {
var task *Testing
if err := IToi(sb, &task); err != nil {
return nil, fmt.Errorf("convert interface to Testing error: %v", err)
}
return task, nil
}
func IToi(before interface{}, after interface{}) error {
b, err := json.Marshal(before)
if err != nil {
return fmt.Errorf("marshal task error: %v", err)
}
if err := json.Unmarshal(b, &after); err != nil {
return fmt.Errorf("unmarshal task error: %v", err)
}
return nil
}
func (Pipeline) TableName() string {
return "pipeline_v2"
}
|
// Copyright 2020 Comcast Cable Communications Management, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kinesis
import (
"fmt"
"github.com/xeipuuv/gojsonschema"
)
// WithDefaults
func (sc SenderConfig) WithDefaults() SenderConfig {
cfg := sc
if cfg.MaxNumberOfMessages == nil {
cfg.MaxNumberOfMessages = DefaultSenderConfig.MaxNumberOfMessages
}
if cfg.SendTimeout == nil {
cfg.SendTimeout = DefaultSenderConfig.SendTimeout
}
if cfg.AWSRegion == "" {
cfg.AWSRegion = DefaultReceiverConfig.AWSRegion
}
return cfg
}
// Validate
func (sc *SenderConfig) Validate() error {
schema := gojsonschema.NewStringLoader(senderSchema)
doc := gojsonschema.NewGoLoader(*sc)
result, err := gojsonschema.Validate(schema, doc)
if err != nil {
return err
}
if !result.Valid() {
return fmt.Errorf(fmt.Sprintf("%+v", result.Errors()))
}
return nil
}
const senderSchema = `
{
"$schema": "http://json-schema.org/draft-06/schema#",
"$ref": "#/definitions/SenderConfig",
"definitions": {
"SenderConfig": {
"type": "object",
"additionalProperties": false,
"properties": {
"streamName": {
"type": "string"
},
"partitionKey": {
"type": "string"
},
"partitionKeyPath": {
"type": "string"
},
"awsRoleARN": {
"type": "string"
},
"awsAccessKeyId": {
"type": "string"
},
"awsSecretAccessKey": {
"type": "string"
},
"awsRegion": {
"type": "string"
},
"maxNumberOfMessages": {
"type": "integer",
"minimum": 1,
"maximum": 500
},
"sendTimeout": {
"type": "integer",
"minimum": 1,
"maximum": 60
}
},
"required": [
"streamName"
],
"title": "SenderConfig"
}
}
}
`
|
package struct_utils
import (
"reflect"
"go-corm/errorHandle"
"strings"
"time"
)
func Analysis(o interface{}) (mapField, mapTag ReflectFieldMap, mapFieldToTag map[string]string, pk []string, err error) {
defer errorHandle.CatchLoadDataError(&err)
mapField, mapTag, mapFieldToTag = NewReflectFieldMap(), NewReflectFieldMap(), make(map[string]string)
refE := reflect.ValueOf(o).Elem()
for i := 0; i < refE.NumField(); i++ {
if refE.Field(i).Kind() == reflect.Struct && !refE.Field(i).Type().AssignableTo(reflect.TypeOf(time.Time{})) {
mField, mTag, mFtoT, pkRet, err := Analysis(refE.Field(i).Addr().Interface())
if err != nil {
return nil, nil, nil, nil, err
}
mapField.mergeMap(mField)
mapTag.mergeMap(mTag)
MergeStringMap(mapFieldToTag, mFtoT)
pk = append(pk, pkRet...)
} else if refE.Field(i).CanSet() {
mapField[refE.Type().Field(i).Name] = refE.Field(i)
if fieldTag := refE.Type().Field(i).Tag.Get("corm"); fieldTag != "" { // 有corm 标签
// fieldTag: user_id[,pk]
tagArr := strings.Split(fieldTag, ",")
if len(tagArr) > 1 && (strings.ToUpper(strings.TrimSpace(tagArr[1])) == "PK") { // 判断是否主键标识
pk = append(pk, strings.TrimSpace(tagArr[0]))
}
mapTag[strings.TrimSpace(tagArr[0])] = refE.Field(i)
mapFieldToTag[refE.Type().Field(i).Name] = strings.TrimSpace(tagArr[0])
}
}
}
return
}
|
package cloudformation
// AWSAutoScalingAutoScalingGroup_LaunchTemplateSpecification AWS CloudFormation Resource (AWS::AutoScaling::AutoScalingGroup.LaunchTemplateSpecification)
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html
type AWSAutoScalingAutoScalingGroup_LaunchTemplateSpecification struct {
// LaunchTemplateId AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-launchtemplateid
LaunchTemplateId string `json:"LaunchTemplateId,omitempty"`
// LaunchTemplateName AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-launchtemplatename
LaunchTemplateName string `json:"LaunchTemplateName,omitempty"`
// Version AWS CloudFormation Property
// Required: true
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-autoscaling-autoscalinggroup-launchtemplatespecification.html#cfn-autoscaling-autoscalinggroup-launchtemplatespecification-version
Version string `json:"Version,omitempty"`
}
// AWSCloudFormationType returns the AWS CloudFormation resource type
func (r *AWSAutoScalingAutoScalingGroup_LaunchTemplateSpecification) AWSCloudFormationType() string {
return "AWS::AutoScaling::AutoScalingGroup.LaunchTemplateSpecification"
}
|
// Copyright (c) Jeevanandam M. (https://github.com/jeevatkm)
// go-aah/aah source code and usage is governed by a MIT style
// license that can be found in the LICENSE file.
package aah
import (
"errors"
"fmt"
"net/http"
"net/url"
"reflect"
"strings"
"aahframework.org/ahttp.v0"
"aahframework.org/essentials.v0"
"aahframework.org/valpar.v0"
"gopkg.in/go-playground/validator.v9"
)
const (
// KeyViewArgRequestParams key name is used to store HTTP Request Params instance
// into `ViewArgs`.
KeyViewArgRequestParams = "_aahRequestParams"
keyOverrideI18nName = "lang"
allContentTypes = "*/*"
)
var (
keyQueryParamName = keyOverrideI18nName
keyPathParamName = keyOverrideI18nName
requestParsers = make(map[string]requestParser)
isContentNegotiationEnabled bool
acceptedContentTypes []string
offeredContentTypes []string
autobindPriority []string
errInvalidParsedValue = errors.New("aah: parsed value is invalid")
)
type requestParser func(ctx *Context) flowResult
//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
// Package method
//___________________________________
// AddValueParser method adds given custom value parser for the `reflect.Type`
func AddValueParser(typ reflect.Type, parser valpar.Parser) error {
return valpar.AddValueParser(typ, parser)
}
// Validator method return the default validator of aah framework.
//
// Refer to https://godoc.org/gopkg.in/go-playground/validator.v9 for detailed
// documentation.
func Validator() *validator.Validate {
return valpar.Validator()
}
// Validate method is to validate struct via underneath validator.
//
// Returns:
//
// - For validation errors: returns `validator.ValidationErrors` and nil
//
// - For invalid input: returns nil, error (invalid input such as nil, non-struct, etc.)
//
// - For no validation errors: nil, nil
func Validate(s interface{}) (validator.ValidationErrors, error) {
return valpar.Validate(s)
}
// ValidateValue method is to validate individual value on demand.
//
// Returns -
//
// - true: validation passed
//
// - false: validation failed
//
// For example:
//
// i := 15
// result := valpar.ValidateValue(i, "gt=1,lt=10")
//
// emailAddress := "sample@sample"
// result := valpar.ValidateValue(emailAddress, "email")
//
// numbers := []int{23, 67, 87, 23, 90}
// result := valpar.ValidateValue(numbers, "unique")
func ValidateValue(v interface{}, rules string) bool {
return valpar.ValidateValue(v, rules)
}
//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
// Bind Middleware
//___________________________________
// BindMiddleware method parses the incoming HTTP request to collects request
// parameters (Path, Form, Query, Multipart) stores into context. Request
// params are made available in View via template functions.
func BindMiddleware(ctx *Context, m *Middleware) {
if AppI18n() != nil {
// i18n locale HTTP header `Accept-Language` value override via
// Path Variable and URL Query Param (config i18n { param_name { ... } }).
// Note: Query parameter takes precedence of all.
if locale := firstNonZeroString(
ctx.Req.QueryValue(keyQueryParamName),
ctx.Req.PathValue(keyPathParamName)); !ess.IsStrEmpty(locale) {
ctx.Req.Locale = ahttp.NewLocale(locale)
}
}
if ctx.Req.Method == ahttp.MethodGet {
goto PCont
}
ctx.Log().Debugf("Request Content-Type mime: %s", ctx.Req.ContentType.Mime)
// Content Negotitaion - Accepted & Offered, refer to GitHub #75
if isContentNegotiationEnabled {
if len(acceptedContentTypes) > 0 &&
!ess.IsSliceContainsString(acceptedContentTypes, ctx.Req.ContentType.Mime) {
ctx.Log().Warnf("Content type '%v' not accepted by server", ctx.Req.ContentType.Mime)
ctx.Reply().Error(&Error{
Reason: ErrContentTypeNotAccepted,
Code: http.StatusUnsupportedMediaType,
Message: http.StatusText(http.StatusUnsupportedMediaType),
})
return
}
if len(offeredContentTypes) > 0 &&
!ess.IsSliceContainsString(offeredContentTypes, ctx.Req.AcceptContentType.Mime) {
ctx.Reply().Error(&Error{
Reason: ErrContentTypeNotOffered,
Code: http.StatusNotAcceptable,
Message: http.StatusText(http.StatusNotAcceptable),
})
ctx.Log().Warnf("Content type '%v' not offered by server", ctx.Req.AcceptContentType.Mime)
return
}
}
// Prevent DDoS attacks by large HTTP request bodies by enforcing
// configured hard limit, GitHub #83.
if ctx.Req.ContentType.Mime != ahttp.ContentTypeMultipartForm.Mime {
ctx.Req.Unwrap().Body = http.MaxBytesReader(ctx.Res, ctx.Req.Unwrap().Body,
firstNonZeroInt64(ctx.route.MaxBodySize, appMaxBodyBytesSize))
}
// Parse request content by Content-Type
if parser, found := requestParsers[ctx.Req.ContentType.Mime]; found {
if res := parser(ctx); res == flowStop {
return
}
}
PCont:
// Compose request details, we can log at the end of the request.
if isDumpLogEnabled {
ctx.Set(keyAahRequestDump, composeRequestDump(ctx))
}
m.Next(ctx)
}
//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
// Content Parser methods
//___________________________________
func multipartFormParser(ctx *Context) flowResult {
if err := ctx.Req.Unwrap().ParseMultipartForm(appMultipartMaxMemory); err != nil {
ctx.Log().Errorf("Unable to parse multipart form: %s", err)
} else {
ctx.Req.Params.Form = ctx.Req.Unwrap().MultipartForm.Value
ctx.Req.Params.File = ctx.Req.Unwrap().MultipartForm.File
}
return flowCont
}
func formParser(ctx *Context) flowResult {
if err := ctx.Req.Unwrap().ParseForm(); err != nil {
ctx.Log().Errorf("Unable to parse form: %s", err)
} else {
ctx.Req.Params.Form = ctx.Req.Unwrap().Form
}
return flowCont
}
//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
// Action Parameters Auto Parse
//___________________________________
func parseParameters(ctx *Context) ([]reflect.Value, *Error) {
paramCnt := len(ctx.action.Parameters)
// If parameters not exists, return here
if paramCnt == 0 {
return emptyArg, nil
}
// Parse and Bind parameters
params := createParams(ctx)
var err error
actionArgs := make([]reflect.Value, paramCnt)
for idx, val := range ctx.action.Parameters {
var result reflect.Value
if vpFn, found := valpar.ValueParser(val.Type); found {
result, err = vpFn(val.Name, val.Type, params)
// GitHub #132 Validation implementation
if rule, found := ctx.route.ValidationRule(val.Name); found {
if !ValidateValue(result.Interface(), rule) {
errMsg := fmt.Sprintf("Path param validation failed [name: %s, rule: %s, value: %v]",
val.Name, rule, result.Interface())
ctx.Log().Error(errMsg)
return nil, &Error{
Reason: ErrValidation,
Code: http.StatusBadRequest,
Message: http.StatusText(http.StatusBadRequest),
Data: errMsg,
}
}
}
} else if val.kind == reflect.Struct {
ct := ctx.Req.ContentType.Mime
if ct == ahttp.ContentTypeJSON.Mime || ct == ahttp.ContentTypeJSONText.Mime ||
ct == ahttp.ContentTypeXML.Mime || ct == ahttp.ContentTypeXMLText.Mime {
result, err = valpar.Body(ct, ctx.Req.Body(), val.Type)
if isDumpLogEnabled && dumpRequestBody {
addReqBodyIntoCtx(ctx, result)
}
} else {
result, err = valpar.Struct("", val.Type, params)
}
}
// check error
if err != nil {
if !result.IsValid() {
ctx.Log().Errorf("Parsed result value is invalid or value parser not found [param: %s, type: %s]",
val.Name, val.Type)
}
return nil, &Error{
Reason: ErrInvalidRequestParameter,
Code: http.StatusBadRequest,
Message: http.StatusText(http.StatusBadRequest),
Data: err,
}
}
// Apply Validation for type `struct`
if val.kind == reflect.Struct {
if errs, _ := Validate(result.Interface()); errs != nil {
ctx.Log().Errorf("Param validation failed [name: %s, type: %s], Validation Errors:\n%v",
val.Name, val.Type, errs.Error())
return nil, &Error{
Reason: ErrValidation,
Code: http.StatusBadRequest,
Message: http.StatusText(http.StatusBadRequest),
Data: errs,
}
}
}
// set action parameter value
actionArgs[idx] = result
}
return actionArgs, nil
}
// Create param values based on autobind priority
func createParams(ctx *Context) url.Values {
params := make(url.Values)
for _, priority := range autobindPriority {
switch priority {
case "P": // Path Values
for k, v := range ctx.Req.Params.Path {
params.Set(k, v)
}
case "F": // Form Values
for k, v := range ctx.Req.Params.Form {
params[k] = v
}
case "Q": // Query Values
for k, v := range ctx.Req.Params.Query {
params[k] = v
}
}
}
return params
}
//‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾
// Template methods
//___________________________________
// tmplPathParam method returns Request Path Param value for the given key.
func tmplPathParam(viewArgs map[string]interface{}, key string) interface{} {
params := viewArgs[KeyViewArgRequestParams].(*ahttp.Params)
return sanatizeValue(params.PathValue(key))
}
// tmplFormParam method returns Request Form value for the given key.
func tmplFormParam(viewArgs map[string]interface{}, key string) interface{} {
params := viewArgs[KeyViewArgRequestParams].(*ahttp.Params)
return sanatizeValue(params.FormValue(key))
}
// tmplQueryParam method returns Request Query String value for the given key.
func tmplQueryParam(viewArgs map[string]interface{}, key string) interface{} {
params := viewArgs[KeyViewArgRequestParams].(*ahttp.Params)
return sanatizeValue(params.QueryValue(key))
}
func bindInitialize(e *Event) {
cfg := AppConfig()
keyPathParamName = cfg.StringDefault("i18n.param_name.path", keyOverrideI18nName)
keyQueryParamName = cfg.StringDefault("i18n.param_name.query", keyOverrideI18nName)
// Content Negotitaion, GitHub #75
isContentNegotiationEnabled = cfg.BoolDefault("request.content_negotiation.enable", false)
acceptedContentTypes, _ = cfg.StringList("request.content_negotiation.accepted")
for idx, v := range acceptedContentTypes {
acceptedContentTypes[idx] = strings.ToLower(v)
if v == allContentTypes {
// when `*/*` is mentioned, don't check the condition
// because it means every content type is allowed
acceptedContentTypes = make([]string, 0)
break
}
}
offeredContentTypes, _ = cfg.StringList("request.content_negotiation.offered")
for idx, v := range offeredContentTypes {
offeredContentTypes[idx] = strings.ToLower(v)
if v == allContentTypes {
// when `*/*` is mentioned, don't check the condition
// because it means every content type is allowed
offeredContentTypes = make([]string, 0)
break
}
}
// Auto Parse and Bind, GitHub #26
requestParsers[ahttp.ContentTypeMultipartForm.Mime] = multipartFormParser
requestParsers[ahttp.ContentTypeForm.Mime] = formParser
autobindPriority = reverseSlice(strings.Split(cfg.StringDefault("request.auto_bind.priority", "PFQ"), ""))
timeFormats, found := cfg.StringList("format.time")
if !found {
timeFormats = []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05Z",
"2006-01-02 15:04:05",
"2006-01-02"}
}
valpar.TimeFormats = timeFormats
valpar.StructTagName = cfg.StringDefault("request.auto_bind.tag_name", "bind")
}
func init() {
OnStart(bindInitialize)
}
|
package main
import (
"context"
"path"
"regexp"
"strconv"
"time"
"github.com/guitarrapc/watchdog-symlinker/directory"
"github.com/guitarrapc/watchdog-symlinker/filewatch"
)
type fileWatcher struct {
directoryPattern string
symlinkName string
option fileWatcherOption
}
type fileWatcherOption struct {
filePattern string
useFileWalk bool
}
// runWatcher
// @summary: file watcher to replace symlink to latest
func (e *fileWatcher) run(ctx context.Context, exit chan<- struct{}, exitError chan<- error) {
logger.Info("starting filewatcher ...")
// extract base path
logger.Infof("extract base path for %s ...", e.directoryPattern)
basePath, err := directory.GetBasePath(e.directoryPattern)
if err != nil {
exitError <- err
return
}
// loop until target directory found
var dirs []string
pattern := regexp.MustCompile(e.directoryPattern)
t := time.NewTicker(3 * time.Second)
defer t.Stop()
found := false
loop:
for {
select {
case <-t.C:
// target directories MUST sorted as [parent -> child] order.
// This is due to restriction of notify package.
//
// walk to get childs
logger.Infof("walking directories in %s ...", basePath)
dirs, err = directory.Dirwalk(basePath)
if err != nil {
logger.Error(err)
logger.Info("retrying to find target directory check ...")
break
}
// pretend basepath. (DO NOT APPEND TO LAST -> notify must pass parent before watch child)
dirs = append([]string{basePath}, dirs...)
// check each directory
logger.Infof("matching directories with pattern %s ...", pattern.String())
for _, directory := range dirs {
dir := directory
isMatch := pattern.MatchString(dir)
logger.Infof("(%s) %s", strconv.FormatBool(isMatch), dir)
if isMatch {
d := path.Join(dir, e.symlinkName)
logger.Infof("start checking %s ...", d)
h := filewatch.Handler{
Dest: d,
FilePattern: e.option.filePattern,
SymlinkName: e.symlinkName,
Directory: dir,
Logger: logger,
UseFileEvent: false,
}
if e.option.useFileWalk {
go h.Run(ctx, exit, exitError)
} else {
go h.RunEvent(ctx, exit, exitError)
}
found = true
}
}
if found {
break loop
}
}
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crash
import (
"context"
"os"
"golang.org/x/sys/unix"
"chromiumos/tast/local/crash"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: SenderLock,
Desc: "Check that only one crash_sender runs at a time",
Contacts: []string{
"mutexlox@chromium.org",
"iby@chromium.org",
"cros-telemetry@google.com",
"nya@chromium.org", // ported to Tast
},
Attr: []string{"group:mainline"},
// We only care about crash_sender on internal builds.
SoftwareDeps: []string{"cros_internal"},
})
}
func SenderLock(ctx context.Context, s *testing.State) {
if err := crash.SetUpCrashTest(ctx, crash.FilterCrashes(crash.FilterInIgnoreAllCrashes), crash.WithMockConsent()); err != nil {
s.Fatal("Setup failed: ", err)
}
defer crash.TearDownCrashTest(ctx)
const basename = "some_program.1.2.3.4"
if _, err := crash.AddFakeMinidumpCrash(ctx, basename); err != nil {
s.Fatal("Failed to add a fake minidump crash: ", err)
}
// Obtain the crash_sender lock. This should prevent crash_sender from running.
const lockPath = "/run/lock/crash_sender"
f, err := os.OpenFile(lockPath, os.O_CREATE|os.O_RDWR, 0644)
if err != nil {
s.Fatal("Failed to obtain crash_sender lock: ", err)
}
defer f.Close()
if err := unix.FcntlFlock(f.Fd(), unix.F_SETLK, &unix.Flock_t{Type: unix.F_WRLCK}); err != nil {
s.Fatal("Failed to obtain crash_sender lock: ", err)
}
if _, err := crash.RunSender(ctx); err == nil {
s.Fatal("crash_sender succeeded unexpectedly")
}
s.Log("crash_sender failed as expected")
}
|
// Copyright 2014 Aller Media AS. All rights reserved.
// License: GPL3
// Package command provides runnable commands for the cli interface.
// Command unlock provides unlocking options for hanging jobs.
package notifications
import (
"fmt"
log "github.com/Sirupsen/logrus"
"github.com/tbruyelle/hipchat-go/hipchat"
"time"
)
type Notifier interface {
Notify(Message)
}
type Message struct {
Jobname string
Status string
TimeTaken time.Duration
Rows uint
Accepted uint
Rejected uint
}
func (m Message) String() string {
return fmt.Sprintf("%s: processed %d rows; accepted %d and rejected %d in %s", m.Jobname, m.Rows, m.Accepted, m.Rejected, m.TimeTaken)
}
type HipChat struct {
Token string
Room string
}
func (h *HipChat) Notify(msg Message) {
c := hipchat.NewClient(h.Token)
// https://www.hipchat.com/docs/apiv2/method/send_room_notification
nr := &hipchat.NotificationRequest{
Message: msg.String(),
// Update info here based on what type of notify message we have (status)
Notify: false, // Send desktop notification
Color: "green",
}
_, err := c.Room.Notification(h.Room, nr)
if err != nil {
log.WithFields(log.Fields{
"notify": "hipchat",
}).Warn(err)
}
}
func (h *HipChat) String() string {
return "HipChat"
}
|
package kafka
import (
"log"
"fmt"
"gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
)
func logPartitions(s string, prts []kafka.TopicPartition) {
for _, p := range prts {
s = fmt.Sprintf("%v | %v", s, p.Partition)
}
log.Println(s)
} |
package moni
import (
"github.com/AsynkronIT/protoactor-go/actor"
"github.com/golang/protobuf/proto"
"github.com/saintEvol/go-rigger/rigger"
)
const mainApplicationName = "main_application"
const mainApplicationSupName = "main_application_sup"
const userManagerSupName = "user_manager_sup"
const userManagerServerName = "user_manager"
const userSupName = "user_sup"
const userServerName = "user_server"
const loginServerName = "user_login_server"
const missionServerSupName = "mission_server_sup"
const missionServerName = "missions_server"
const restServerSupName = "rest_server_sup"
const restServerName = "rest_server"
func init() {
rigger.Register(mainApplicationName, rigger.ApplicationBehaviourProducer(func() rigger.ApplicationBehaviour {
return &mainApplication{}
}))
rigger.Register(mainApplicationSupName, rigger.SupervisorBehaviourProducer(func() rigger.SupervisorBehaviour {
return &mainApplicationSup{}
}))
rigger.Register(userManagerSupName, rigger.SupervisorBehaviourProducer(func() rigger.SupervisorBehaviour {
return &userManagerSup{}
}))
rigger.Register(userManagerServerName, rigger.GeneralServerBehaviourProducer(func() rigger.GeneralServerBehaviour {
return &userManagerServer{}
}))
rigger.Register(userSupName, rigger.SupervisorBehaviourProducer(func() rigger.SupervisorBehaviour {
return &userSup{}
}))
rigger.Register(userServerName, rigger.GeneralServerBehaviourProducer(func() rigger.GeneralServerBehaviour {
return &userServer{}
}))
rigger.Register(loginServerName, rigger.GeneralServerBehaviourProducer(func() rigger.GeneralServerBehaviour {
return &userLoginServer{}
}))
rigger.Register(missionServerSupName, rigger.SupervisorBehaviourProducer(func() rigger.SupervisorBehaviour {
return &missionServerSup{}
}))
rigger.Register(missionServerName, rigger.GeneralServerBehaviourProducer(func() rigger.GeneralServerBehaviour {
return &missionServer{}
}))
rigger.Register(restServerSupName, rigger.SupervisorBehaviourProducer(func() rigger.SupervisorBehaviour {
return &restServerSup{}
}))
rigger.Register(restServerName, rigger.GeneralServerBehaviourProducer(func() rigger.GeneralServerBehaviour {
return &restServer{}
}))
}
type mainApplication struct {
}
func (m mainApplication) OnRestarting(ctx actor.Context) {
return
}
func (m mainApplication) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (m mainApplication) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (m mainApplication) OnStopping(ctx actor.Context) {
return
}
func (m mainApplication) OnStopped(ctx actor.Context) {
return
}
func (m mainApplication) OnGetSupFlag(ctx actor.Context) (supFlag rigger.SupervisorFlag, childSpecs []*rigger.SpawnSpec) {
return
}
type mainApplicationSup struct {
}
func (m mainApplicationSup) OnRestarting(ctx actor.Context) {
return
}
func (m mainApplicationSup) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (m mainApplicationSup) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (m mainApplicationSup) OnStopping(ctx actor.Context) {
return
}
func (m mainApplicationSup) OnStopped(ctx actor.Context) {
return
}
func (m mainApplicationSup) OnGetSupFlag(ctx actor.Context) (supFlag rigger.SupervisorFlag, childSpecs []*rigger.SpawnSpec) {
return
}
type userManagerSup struct {
}
func (u userManagerSup) OnRestarting(ctx actor.Context) {
return
}
func (u userManagerSup) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (u userManagerSup) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (u userManagerSup) OnStopping(ctx actor.Context) {
return
}
func (u userManagerSup) OnStopped(ctx actor.Context) {
return
}
func (u userManagerSup) OnGetSupFlag(ctx actor.Context) (supFlag rigger.SupervisorFlag, childSpecs []*rigger.SpawnSpec) {
return
}
type userManagerServer struct {
}
func (u userManagerServer) OnRestarting(ctx actor.Context) {
return
}
func (u userManagerServer) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (u userManagerServer) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (u userManagerServer) OnStopping(ctx actor.Context) {
return
}
func (u userManagerServer) OnStopped(ctx actor.Context) {
return
}
func (u userManagerServer) OnMessage(ctx actor.Context, message interface{}) proto.Message {
return nil
}
type userSup struct {
}
func (u userSup) OnRestarting(ctx actor.Context) {
return
}
func (u userSup) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (u userSup) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (u userSup) OnStopping(ctx actor.Context) {
return
}
func (u userSup) OnStopped(ctx actor.Context) {
return
}
func (u userSup) OnGetSupFlag(ctx actor.Context) (supFlag rigger.SupervisorFlag, childSpecs []*rigger.SpawnSpec) {
return
}
type userServer struct {
}
func (u userServer) OnRestarting(ctx actor.Context) {
return
}
func (u userServer) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (u userServer) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (u userServer) OnStopping(ctx actor.Context) {
return
}
func (u userServer) OnStopped(ctx actor.Context) {
return
}
func (u userServer) OnMessage(ctx actor.Context, message interface{}) proto.Message {
return nil
}
type userLoginServer struct {
}
func (u userLoginServer) OnRestarting(ctx actor.Context) {
return
}
func (u userLoginServer) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (u userLoginServer) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (u userLoginServer) OnStopping(ctx actor.Context) {
return
}
func (u userLoginServer) OnStopped(ctx actor.Context) {
return
}
func (u userLoginServer) OnMessage(ctx actor.Context, message interface{}) proto.Message {
return nil
}
type missionServerSup struct {
}
func (m missionServerSup) OnRestarting(ctx actor.Context) {
return
}
func (m missionServerSup) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (m missionServerSup) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (m missionServerSup) OnStopping(ctx actor.Context) {
return
}
func (m missionServerSup) OnStopped(ctx actor.Context) {
return
}
func (m missionServerSup) OnGetSupFlag(ctx actor.Context) (supFlag rigger.SupervisorFlag, childSpecs []*rigger.SpawnSpec) {
return
}
type missionServer struct {
}
func (m missionServer) OnRestarting(ctx actor.Context) {
return
}
func (m missionServer) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (m missionServer) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (m missionServer) OnStopping(ctx actor.Context) {
return
}
func (m missionServer) OnStopped(ctx actor.Context) {
return
}
func (m missionServer) OnMessage(ctx actor.Context, message interface{}) proto.Message {
return nil
}
type restServerSup struct {
}
func (r restServerSup) OnRestarting(ctx actor.Context) {
return
}
func (r restServerSup) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (r restServerSup) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (r restServerSup) OnStopping(ctx actor.Context) {
return
}
func (r restServerSup) OnStopped(ctx actor.Context) {
return
}
func (r restServerSup) OnGetSupFlag(ctx actor.Context) (supFlag rigger.SupervisorFlag, childSpecs []*rigger.SpawnSpec) {
return
}
type restServer struct {
}
func (r restServer) OnRestarting(ctx actor.Context) {
return
}
func (r restServer) OnStarted(ctx actor.Context, args interface{}) error {
return nil
}
func (r restServer) OnPostStarted(ctx actor.Context, args interface{}) {
return
}
func (r restServer) OnStopping(ctx actor.Context) {
return
}
func (r restServer) OnStopped(ctx actor.Context) {
return
}
func (r restServer) OnMessage(ctx actor.Context, message interface{}) proto.Message {
return nil
}
|
package render
import (
"image"
"image/draw"
"github.com/oakmound/oak/physics"
)
// The Compound type is intended for use to easily swap between multiple
// renderables that are drawn at the same position on the same layer.
// A common use case for this would be a character entitiy who switches
// their animation based on how they are moving or what they are doing.
//
// The Compound type removes the need to repeatedly draw and undraw elements
// of a character, which has a tendency to leave nothing drawn for a draw frame.
type Compound struct {
LayeredPoint
subRenderables map[string]Modifiable
curRenderable string
}
//NewCompound creates a new compound from a map of names to modifiables
func NewCompound(start string, m map[string]Modifiable) *Compound {
return &Compound{
LayeredPoint: NewLayeredPoint(0, 0, 0),
subRenderables: m,
curRenderable: start,
}
}
//Add makes a new entry in the Compounds map
func (c *Compound) Add(k string, v Modifiable) {
c.subRenderables[k] = v
}
//Set sets the current renderable to the one specified
func (c *Compound) Set(k string) {
if _, ok := c.subRenderables[k]; !ok {
panic("Unknown renderable for string " + k + " on compound")
}
c.curRenderable = k
}
//GetSub returns a given subrenderable from the map
func (c *Compound) GetSub(s string) Modifiable {
return c.subRenderables[s]
}
//Get returns the Compounds current Renderable
func (c *Compound) Get() string {
return c.curRenderable
}
//IsInterruptable returns whether the current renderable is interruptable
func (c *Compound) IsInterruptable() bool {
switch t := c.subRenderables[c.curRenderable].(type) {
case *Animation:
return t.Interruptable
case *Sequence:
return t.Interruptable
case *Reverting:
return t.IsInterruptable()
case *Compound:
return t.IsInterruptable()
}
return true
}
//IsStatic returns whether the current renderable is static
func (c *Compound) IsStatic() bool {
switch c.subRenderables[c.curRenderable].(type) {
case *Animation, *Sequence:
return false
case *Reverting:
return c.subRenderables[c.curRenderable].(*Reverting).IsStatic()
case *Compound:
return c.subRenderables[c.curRenderable].(*Compound).IsStatic()
}
return true
}
//SetOffsets sets the logical offset for the specified subrenderable
func (c *Compound) SetOffsets(k string, offsets physics.Vector) {
if r, ok := c.subRenderables[k]; ok {
r.SetPos(offsets.X(), offsets.Y())
}
}
//Copy creates a copy of the Compound
func (c *Compound) Copy() Modifiable {
newC := new(Compound)
newC.LayeredPoint = c.LayeredPoint.Copy()
newSubRenderables := make(map[string]Modifiable)
for k, v := range c.subRenderables {
newSubRenderables[k] = v.Copy()
}
newC.subRenderables = newSubRenderables
newC.curRenderable = c.curRenderable
return newC
}
//GetRGBA returns the current renderables rgba
func (c *Compound) GetRGBA() *image.RGBA {
return c.subRenderables[c.curRenderable].GetRGBA()
}
//Modify performs a series of modifications on the Compound
func (c *Compound) Modify(ms ...Modification) Modifiable {
for _, r := range c.subRenderables {
r.Modify(ms...)
}
return c
}
//DrawOffset draws the Compound at an offset from its logical location
func (c *Compound) DrawOffset(buff draw.Image, xOff float64, yOff float64) {
c.subRenderables[c.curRenderable].DrawOffset(buff, c.X()+xOff, c.Y()+yOff)
}
//Draw draws the Compound at its logical location
func (c *Compound) Draw(buff draw.Image) {
c.subRenderables[c.curRenderable].DrawOffset(buff, c.X(), c.Y())
}
//ShiftPos shifts the Compounds logical position
func (c *Compound) ShiftPos(x, y float64) {
c.SetPos(c.X()+x, c.Y()+y)
}
//ShiftY shifts the Compounds logical y position
func (c *Compound) ShiftY(y float64) {
c.SetPos(c.X(), c.Y()+y)
}
//ShiftX shifts the Compounds logical x position
func (c *Compound) ShiftX(x float64) {
c.SetPos(c.X()+x, c.Y())
}
//SetPos sets the Compound's logical position
func (c *Compound) SetPos(x, y float64) {
c.LayeredPoint.SetPos(x, y)
}
//GetDims gets the current Renderables dimensions
func (c *Compound) GetDims() (int, int) {
return c.subRenderables[c.curRenderable].GetDims()
}
//Pause stops the current Renderable if possible
func (c *Compound) Pause() {
switch t := c.subRenderables[c.curRenderable].(type) {
case *Animation:
t.Pause()
case *Sequence:
t.Pause()
case *Reverting:
t.Pause()
}
}
// Unpause tries to unpause the current Renderable if possible
func (c *Compound) Unpause() {
switch t := c.subRenderables[c.curRenderable].(type) {
case *Animation:
t.Unpause()
case *Sequence:
t.Unpause()
case *Reverting:
t.Unpause()
}
}
//Revert tries to revert the current Renderable if possible
func (c *Compound) Revert(mod int) {
for _, v := range c.subRenderables {
switch t := v.(type) {
case *Reverting:
t.Revert(mod)
}
}
}
//RevertAll tries to revert the all sub-Renderables if possible
func (c *Compound) RevertAll() {
for _, v := range c.subRenderables {
switch t := v.(type) {
case *Reverting:
t.RevertAll()
}
}
}
|
package bob // package name must match the package name in bob_test.go
import (
"strings"
"unicode"
)
const (
fine = "Fine. Be that way!"
sure = "Sure."
whatever = "Whatever."
chill = "Whoa, chill out!"
testVersion = 2
)
// Hey is what bob says, the lazy bastard
func Hey(phrase string) string {
// fmt.Println(phrase)
if isSayingNothing(phrase) {
return fine
}
if isYelling(phrase) {
return chill
}
if isQuestion(phrase) {
return sure
}
return whatever
}
func isSayingNothing(s string) bool {
for _, character := range s {
if !unicode.IsSpace(character) {
return false
}
}
return true
}
func isYelling(s string) bool {
if !hasLetters(s) {
return false
}
for _, character := range s {
if unicode.IsLower(character) && string(character) != "!" && string(character) != "?" {
return false
}
}
return true
}
func isQuestion(s string) bool {
return strings.HasSuffix(strings.TrimSpace(s), "?")
}
func hasLetters(s string) bool {
return strings.ContainsAny(s, "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
}
|
// Copyright 2020 Ye Zi Jie. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Author: FishGoddess
// Email: fishgoddess@qq.com
// Created at 2020/03/05 00:11:50
package files
import (
"errors"
"os"
"strconv"
"sync"
"time"
)
// SizeRollingFile is a file size sensitive file.
//
// file := NewSizeRollingFile(64*KB, func (now time.Time) string {
// return "D:/" + now.Format("20060102150405.000") + ".log"
// })
// defer file.Close()
// file.Write([]byte("Hello!"))
//
// You can use it like using os.File!
type SizeRollingFile struct {
// file points the writer which will be used this moment.
file *os.File
// directory is the target storing all created files.
directory string
// limitedSize is the limited size of this file.
// File will roll to next file if its size has reached to limitedSize.
// This field should be always larger than minLimitedSize for some safe considerations.
// Notice that it doesn't mean every file must be this size due to our performance optimization
// scheme. Generally it equals to file size, however, it will not equal to file size
// if someone modified this file. See currentSize.
limitedSize int64
// currentSize equals to the size of current file.
// The currentSize will reset to 0 when rolling to next file.
// The reason why we set this field is file.Stat() is too expensive!
// Every writing operations will fetch file size, that means each operation
// will call file.Stat() for size. It's not a good way to fetch file size.
// So we keep one field inside, and record current size of current file with it.
// Each time fetching file size, the only thing wo do is checking this field.
// This way is cheaper, even cheapest. Of course, we should maintain this field
// inside for precision, so it doesn't mean we won't call file.Stat() anymore.
// If currentSize >= limitedSize, we will still call file.Stat() for precision.
// Certainly, we will set currentSize to the real size of file. Hey, you know we
// won't waste the time we spent on file.Stat() ^_^.
currentSize int64
// nameGenerator is for generating the name of every created file.
// You can customize your format of filename by implementing this function.
// Default is DefaultNameGenerator().
nameGenerator NameGenerator
// mu is a lock for safe concurrency.
mu *sync.Mutex
}
const (
// minLimitedSize prevents io system from creating file too fast.
// Default is 64 KB (64 * 1024 bytes).
minLimitedSize = 64 * KB
)
// NewSizeRollingFile creates a new size rolling file.
// limitedSize is how big did it roll to next file.
// nextFilename is a function for generating next file name.
// Every times rolling to next file will call nextFilename first.
// now is the created time of next file. Notice that limitedSize's min value
// is 64 KB (64 * 1024 bytes). See minLimitedSize.
func NewSizeRollingFile(directory string, limitedSize int64) *SizeRollingFile {
// 防止文件限制尺寸太小导致滚动文件时 IO 的疯狂蠕动
if limitedSize < minLimitedSize {
panic(errors.New("LimitedSize is smaller than " + strconv.FormatUint(uint64(minLimitedSize)>>10, 10) + " KB!\n"))
}
return &SizeRollingFile{
directory: directory,
limitedSize: limitedSize,
currentSize: 0,
nameGenerator: DefaultNameGenerator(),
mu: &sync.Mutex{},
}
}
// rollingToNextFile will roll to next file for srf.
func (srf *SizeRollingFile) rollingToNextFile(now time.Time) {
// 如果创建新文件发生错误,就继续使用当前的文件,等到下一次时间间隔再重试
newFile, err := CreateFileOf(srf.nameGenerator.NextName(srf.directory, now))
if err != nil {
return
}
// 关闭当前使用的文件,初始化新文件
srf.file.Close()
srf.file = newFile
srf.currentSize = 0
}
// ensureFileIsCorrect ensures srf is writing to a correct file this moment.
func (srf *SizeRollingFile) ensureFileIsCorrect() {
// file 为 nil,进行初始化
if srf.file == nil {
srf.rollingToNextFile(time.Now())
return
}
// 判断文件大小是否超过限制值
if srf.currentSize >= srf.limitedSize {
// 这时候还不能确定 currentSize 是正确的,需要通过系统调用查询文件真实大小
fileInfo, err := srf.file.Stat()
// 需要划分文件的两种情况:
// 1. err != nil,获取文件真实大小失败,选择相信 currentSize
// 2. 真实文件大小确实大于 limitedSize
if err != nil || fileInfo.Size() >= srf.limitedSize {
srf.rollingToNextFile(time.Now())
return
}
// 否则修正 currentSize 为真实文件大小,不能浪费这一次系统调用
srf.currentSize = fileInfo.Size()
}
}
// writeAndUpdateCurrentSize writes p to srf.file and updates srf.currentSize with n.
func (srf *SizeRollingFile) writeAndUpdateCurrentSize(p []byte) (int, error) {
n, err := srf.file.Write(p)
srf.currentSize += int64(n)
return n, err
}
// Write writes len(p) bytes from p to the underlying data stream.
// It returns the number of bytes written from p (0 <= n <= len(p))
// and any error encountered that caused the write to stop early.
func (srf *SizeRollingFile) Write(p []byte) (n int, err error) {
srf.mu.Lock()
defer srf.mu.Unlock()
// 确保当前文件对于当前时间点来说是正确的
srf.ensureFileIsCorrect()
return srf.writeAndUpdateCurrentSize(p)
}
// Close releases any resources using just moment.
// It returns error when closing.
func (srf *SizeRollingFile) Close() error {
srf.mu.Lock()
defer srf.mu.Unlock()
return srf.file.Close()
}
// SetNameGenerator replaces srf.nameGenerator to newNameGenerator.
func (srf *SizeRollingFile) SetNameGenerator(nameGenerator NameGenerator) {
srf.mu.Lock()
defer srf.mu.Unlock()
srf.nameGenerator = nameGenerator
}
|
package config
import (
"encoding/json"
"io/ioutil"
)
type GlobalConfig struct {
/* Server configuration */
ServerName string
ServerHost string
ServerPort int
MaxPackageSize int
/* Log configuration */
EnableLog bool
LogToConsole bool
LogPath string
/* Database configuration */
DBType string
DBHost string
DBPort int
DBUser string
DBPassword string
DBSchemaName string
}
// global configuration
var GlobalConfiguration *GlobalConfig
// init() returns a global object
func init() {
GlobalConfiguration = &GlobalConfig{
ServerName: "Mint Server",
ServerHost: "127.0.0.1",
ServerPort: 30000,
MaxPackageSize: 2048,
EnableLog: true,
LogToConsole: true,
LogPath: "",
DBType: "MySQL",
DBHost: "127.0.0.1",
DBPort: 20000,
DBUser: "u1",
DBPassword: "123456",
DBSchemaName: "minigame"}
}
// Load the configuration file into server
func (gc *GlobalConfig) Load() {
data, err := ioutil.ReadFile("./config-example.json")
if err != nil {
panic(err.Error())
}
err = json.Unmarshal(data, &GlobalConfiguration)
if err != nil {
panic(err.Error())
}
} |
package list
import (
"errors"
"github.com/mah0x211/github-release-admin/github"
"github.com/mah0x211/github-release-admin/log"
)
type Option struct {
ItemsPerPage int
MaxItems uint64
BranchExists bool
Branch string
}
const (
flgReleaseOnly = 0x0
flgDraftRelease = 0x1
flgPreRelease = 0x2
flgAll = 0x3
)
func isListTarget(v *github.Release, flg int, o *Option) bool {
if flg == flgReleaseOnly {
if v.Draft || v.PreRelease {
log.Debug("ignore draft or prerelease: %d", v.ID)
return false
}
} else if flg != flgAll {
if flg&flgDraftRelease != 0 && !v.Draft {
log.Debug("ignore non-draft release: %d", v.ID)
return false
}
if flg&flgPreRelease != 0 && !v.PreRelease {
log.Debug("ignore non-prerelease: %d", v.ID)
return false
}
}
if o.Branch != "" && o.Branch != v.TargetCommitish {
log.Debug("ignore release that branch does not matched to %q: %d", o.Branch, v.ID)
return false
}
return true
}
var errEOL = errors.New("eol")
func listup(ghc *github.Client, flg int, o *Option) ([]*github.Release, error) {
list := []*github.Release{}
nitem := uint64(0)
if err := ghc.FetchRelease(1, o.ItemsPerPage, func(v *github.Release, _ int) error {
if !isListTarget(v, flg, o) {
return nil
} else if o.BranchExists {
if b, err := ghc.GetBranch(v.TargetCommitish); err != nil {
return err
} else if b == nil {
// branch does not exists
log.Debug("ignore release that branch %q does not exists: %d", v.TargetCommitish, v.ID)
return nil
}
}
list = append(list, v)
nitem++
if o.MaxItems > 0 && nitem >= o.MaxItems {
return errEOL
}
return nil
}); err != nil && !errors.Is(errEOL, err) {
return nil, err
}
return list, nil
}
func AllReleases(ghc *github.Client, o *Option) ([]*github.Release, error) {
return listup(ghc, flgDraftRelease|flgPreRelease, o)
}
func PreReleases(ghc *github.Client, o *Option) ([]*github.Release, error) {
return listup(ghc, flgPreRelease, o)
}
func DraftReleases(ghc *github.Client, o *Option) ([]*github.Release, error) {
return listup(ghc, flgDraftRelease, o)
}
func Releases(ghc *github.Client, o *Option) ([]*github.Release, error) {
return listup(ghc, flgReleaseOnly, o)
}
|
package server
import (
"bytes"
"context"
"fmt"
"testing"
"time"
"github.com/golang/protobuf/jsonpb"
"github.com/stretchr/testify/assert"
"github.com/tilt-dev/tilt/internal/testutils/bufsync"
"github.com/tilt-dev/tilt/internal/testutils"
proto_webview "github.com/tilt-dev/tilt/pkg/webview"
)
func TestViewsHandled(t *testing.T) {
f := newWebsocketReaderFixture(t)
f.start()
v := &proto_webview.View{Log: "hello world"}
f.sendView(v)
f.assertHandlerCallCount(1)
assert.Equal(t, "hello world", f.handler.lastViewLog)
v = &proto_webview.View{Log: "goodbye world"}
f.sendView(v)
f.assertHandlerCallCount(2)
assert.Equal(t, "goodbye world", f.handler.lastViewLog)
}
func TestHandlerErrorDoesntStopLoop(t *testing.T) {
f := newWebsocketReaderFixture(t)
f.start()
f.handler.nextErr = fmt.Errorf("aw nerts")
v := &proto_webview.View{Log: "hello world"}
f.sendView(v)
f.assertHandlerCallCount(1)
f.assertLogs("aw nerts")
// should still be running!
v = &proto_webview.View{Log: "goodbye world"}
f.sendView(v)
f.assertHandlerCallCount(2)
assert.Equal(t, "goodbye world", f.handler.lastViewLog)
}
func TestNonPersistentReaderExistsAfterHandling(t *testing.T) {
f := newWebsocketReaderFixture(t).withPersistent(false)
f.start()
v := &proto_webview.View{Log: "hello world"}
f.sendView(v)
f.assertHandlerCallCount(1)
assert.Equal(t, "hello world", f.handler.lastViewLog)
f.assertDone()
}
func TestWebsocketCloseOnNextReaderError(t *testing.T) {
f := newWebsocketReaderFixture(t)
f.start()
f.conn.readCh <- readerOrErr{err: fmt.Errorf("read error")}
time.Sleep(10 * time.Millisecond)
f.assertDone()
}
type websocketReaderFixture struct {
t *testing.T
ctx context.Context
cancel context.CancelFunc
out *bufsync.ThreadSafeBuffer
conn *fakeConn
handler *fakeViewHandler
wsr *WebsocketReader
done chan error
}
func newWebsocketReaderFixture(t *testing.T) *websocketReaderFixture {
out := bufsync.NewThreadSafeBuffer()
baseCtx, _, _ := testutils.ForkedCtxAndAnalyticsForTest(out)
ctx, cancel := context.WithCancel(baseCtx)
conn := newFakeConn()
handler := &fakeViewHandler{}
ret := &websocketReaderFixture{
t: t,
ctx: ctx,
cancel: cancel,
out: out,
conn: conn,
handler: handler,
wsr: newWebsocketReader(conn, true, handler),
done: make(chan error),
}
t.Cleanup(ret.tearDown)
return ret
}
func (f *websocketReaderFixture) withPersistent(persistent bool) *websocketReaderFixture {
f.wsr.persistent = persistent
return f
}
func (f *websocketReaderFixture) start() {
go func() {
err := f.wsr.Listen(f.ctx)
f.done <- err
close(f.done)
}()
}
func (f *websocketReaderFixture) sendView(v *proto_webview.View) {
buf := &bytes.Buffer{}
err := (&jsonpb.Marshaler{}).Marshal(buf, v)
assert.NoError(f.t, err)
f.conn.newMessageToRead(buf)
}
func (f *websocketReaderFixture) assertHandlerCallCount(n int) {
ctx, cancel := context.WithTimeout(f.ctx, time.Millisecond*10)
defer cancel()
isCanceled := false
for {
if f.handler.callCount == n {
return
}
if isCanceled {
f.t.Fatalf("Timed out waiting for handler.callCount = %d (got: %d)",
n, f.handler.callCount)
}
select {
case <-ctx.Done():
// Let the loop run the check one more time
isCanceled = true
case <-time.After(time.Millisecond):
}
}
}
func (f *websocketReaderFixture) assertLogs(msg string) {
f.out.AssertEventuallyContains(f.t, msg, time.Second)
}
func (f *websocketReaderFixture) tearDown() {
f.cancel()
f.assertDone()
}
func (f *websocketReaderFixture) assertDone() {
select {
case <-time.After(100 * time.Millisecond):
f.t.Fatal("timed out waiting for close")
case err := <-f.done:
assert.NoError(f.t, err)
}
}
type fakeViewHandler struct {
callCount int
lastViewLog string // use the Log field to differentiate the views we send, cuz why not
nextErr error
}
func (fvh *fakeViewHandler) Handle(v *proto_webview.View) error {
fvh.callCount += 1
if fvh.nextErr != nil {
err := fvh.nextErr
fvh.nextErr = nil
return err
}
fvh.lastViewLog = v.Log
return nil
}
|
package unc
import (
"bytes"
"go/ast"
"go/format"
"log"
"golang.org/x/tools/go/analysis"
"golang.org/x/tools/go/analysis/passes/inspect"
"golang.org/x/tools/go/ast/inspector"
)
// Analyser reports c-style nil checks.
var Analyzer = &analysis.Analyzer{
Name: "unc",
Doc: "Reports c style nil checks.",
Run: run,
Requires: []*analysis.Analyzer{inspect.Analyzer},
}
func run(pass *analysis.Pass) (interface{}, error) {
inspector := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
fileFilter := []ast.Node{
(*ast.BinaryExpr)(nil),
}
inspector.Preorder(fileFilter, func(node ast.Node) {
be, ok := node.(*ast.BinaryExpr)
x, ok := be.X.(*ast.Ident)
if !ok {
return
}
if x.Name == "nil" && x.Obj == nil {
buf := &bytes.Buffer{}
if err := format.Node(buf, pass.Fset, be.Y); err != nil {
log.Fatalf("failed to format new node: %+v", err)
}
pass.Report(analysis.Diagnostic{
Pos: be.Pos(),
Message: "lhs nil",
SuggestedFixes: []analysis.SuggestedFix{{
Message: "Reorder nil check",
TextEdits: []analysis.TextEdit{
{
Pos: be.X.Pos(),
End: be.X.End(),
NewText: buf.Bytes(),
},
{
Pos: be.Y.Pos(),
End: be.Y.End(),
NewText: []byte("nil"),
},
},
}},
})
}
})
return nil, nil
}
|
package command
import (
"fmt"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/BlackCodes/logbud/flag"
"github.com/rs/zerolog/log"
)
type Build struct {
projectDir string
buildDir string
}
func NewBuild(buildDir, projectDir string) *Build {
return &Build{projectDir: projectDir, buildDir: buildDir}
}
func (b *Build) Build() error {
build := "go build "
if len(flag.BuildArgs) > 0 {
build += flag.BuildArgs
}
if len(b.projectDir) > 0 {
os.Chdir(b.buildDir)
}
if err := b.clearBinary(); err != nil {
log.Err(err).Msg("clear binary error")
return err
}
fmt.Println(build)
cmd := exec.Command("/bin/bash", "-c", build)
_, err := cmd.Output()
if err != nil {
log.Err(err).Msg("build failed")
return err
}
return nil
}
func (b *Build) GetBinary() (string, error) {
bf, err := b.findBinary()
if err != nil {
return "", err
}
if len(bf) > 0 {
return bf[0], nil
}
return "", fmt.Errorf("nof found binary")
}
func (b *Build) Copy() error {
bf, err := b.findBinary()
if err != nil {
return err
}
for _, s := range bf {
newPath := fmt.Sprintf("%s/%s", b.projectDir, filepath.Base(s))
if err := os.Rename(s, newPath); err != nil {
return err
}
}
return nil
}
func (b *Build) clearBinary() error {
files, err := b.findBinary()
if err != nil {
return err
}
for _, item := range files {
if err := os.Remove(item); err != nil {
log.Err(err).Msg("remove exist binary file error")
return err
}
}
return nil
}
func (b *Build) findBinary() ([]string, error) {
files, err := filepath.Glob(b.buildDir + "/*")
binaries := make([]string, 0, 5)
if err != nil {
return binaries, err
}
for _, f := range files {
fh, err := os.Open(f)
if err != nil {
log.Err(err).Msg("open file error")
continue
}
info, _ := fh.Stat()
if info.IsDir() {
continue
}
mType, err := b.mimeType(fh)
if err != nil {
log.Err(err).Str("file", f).Msg("get file mime type error")
continue
}
if strings.HasPrefix(mType, "application") {
binaries = append(binaries, f)
}
}
return binaries, nil
}
func (b *Build) mimeType(f *os.File) (string, error) {
buf := make([]byte, 512)
if _, err := f.Read(buf); err != nil {
return "", err
}
mtype := http.DetectContentType(buf)
return mtype, nil
}
|
package main
import "fmt"
type Person struct {
First string
Last string
Age int
}
type DoubleZero struct {
Person
First string
LicenseToKill bool
}
func main() {
p1 := DoubleZero{
Person: Person{
First: "Denis",
Last: "John",
Age: 30,
},
First: "Denison",
LicenseToKill: true,
}
// fields and methods of the inner-types are promoted to the outer-type
fmt.Println(p1.First)
fmt.Println(p1.Person.First)
}
|
package swagger
import (
"fmt"
"net/http"
"strings"
"github.com/gorilla/mux"
)
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
type Routes []Route
func NewRouter() *mux.Router {
router := mux.NewRouter().StrictSlash(true)
for _, route := range routes {
var handler http.Handler
handler = route.HandlerFunc
handler = JsonResp(Logger(handler, route.Name))
router.
Methods(route.Method).
Path(route.Pattern).
Name(route.Name).
Handler(handler)
}
return router
}
func Index(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello World!")
}
var routes = Routes{
Route{
"Index",
"GET",
"/",
Index,
},
Route{
"VerificationsGet",
strings.ToUpper("Get"),
"/verifications",
VerificationsGet,
},
Route{
"VerificationsPost",
strings.ToUpper("Post"),
"/verifications",
VerificationsPost,
},
Route{
"VerificationsVerificationTitleDelete",
strings.ToUpper("Delete"),
"/verifications/{verification_title}",
VerificationsVerificationTitleDelete,
},
Route{
"VerificationsVerificationTitleGet",
strings.ToUpper("Get"),
"/verifications/{verification_title}",
VerificationsVerificationTitleGet,
},
Route{
"VerificationsVerificationTitlePut",
strings.ToUpper("Put"),
"/verifications/{verification_title}",
VerificationsVerificationTitlePut,
},
Route{
"VerificationsVerificationTitleStartGet",
strings.ToUpper("Get"),
"/verifications/{verification_title}/start",
VerificationsVerificationTitleStartGet,
},
Route{
"VerificationsVerificationTitleLastGet",
strings.ToUpper("Get"),
"/verifications/{verification_title}/last",
VerificationsVerificationTitleLastGet,
},
}
|
package main
import (
"fmt"
"runtime"
"sync"
)
func init() {
fmt.Println("this is init function. I'm learning about mutex")
}
var wgGlobal sync.WaitGroup
var wgF sync.WaitGroup
var wgB sync.WaitGroup
func main() {
fmt.Printf("Go Routines:\t %v\n", runtime.NumGoroutine())
wgGlobal.Add(2)
wgB.Add(1)
go foo()
go bar()
fmt.Printf("Go Routines:\t %v\n", runtime.NumGoroutine())
wgGlobal.Wait()
}
func foo() {
for i := 0; i <= 100; i++ {
wgF.Wait()
fmt.Printf("%v \n", i)
wgB.Done()
wgF.Add(1)
}
wgGlobal.Done()
}
func bar() {
for i := 100; i >= 0; i-- {
wgB.Wait()
fmt.Printf("\t%v \n", i)
wgF.Done()
wgB.Add(1)
}
wgGlobal.Done()
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// DatatypeMapperMurmur3 (Plugin) Specialised Datatype to compute hashes of values at index-time
// and store them in the index. Typically used within a multi-field, so that both the original value
// and its hash are stored in the index.
//
// See https://www.elastic.co/guide/en/elasticsearch/plugins/7.5/mapper-murmur3.html
// for details.
type DatatypeMapperMurmur3 struct {
Datatype
name string
copyTo []string
// fields specific to mapper murmur3 datatype
}
// NewDatatypeMapperMurmur3 initializes a new DatatypeMapperMurmur3.
func NewDatatypeMapperMurmur3(name string) *DatatypeMapperMurmur3 {
return &DatatypeMapperMurmur3{
name: name,
}
}
// Name returns field key for the Datatype.
func (m3 *DatatypeMapperMurmur3) Name() string {
return m3.name
}
// CopyTo sets the field(s) to copy to which allows the values of multiple fields to be
// queried as a single field.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/copy-to.html
// for details.
func (m3 *DatatypeMapperMurmur3) CopyTo(copyTo ...string) *DatatypeMapperMurmur3 {
m3.copyTo = append(m3.copyTo, copyTo...)
return m3
}
// Validate validates DatatypeMapperMurmur3.
func (m3 *DatatypeMapperMurmur3) Validate(includeName bool) error {
var invalid []string
if includeName && m3.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (m3 *DatatypeMapperMurmur3) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "murmur3",
// "copy_to": ["field_1", "field_2"]
// }
// }
options := make(map[string]interface{})
options["type"] = "murmur3"
if len(m3.copyTo) > 0 {
var copyTo interface{}
switch {
case len(m3.copyTo) > 1:
copyTo = m3.copyTo
break
case len(m3.copyTo) == 1:
copyTo = m3.copyTo[0]
break
default:
copyTo = ""
}
options["copy_to"] = copyTo
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[m3.name] = options
return source, nil
}
|
package dynakube
import (
"context"
"net/http"
"os"
"time"
dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/activegate"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/apimonitoring"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/connectioninfo"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/deploymentmetadata"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/dtpullsecret"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/dynatraceclient"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/istio"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/oneagent"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/status"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/token"
"github.com/Dynatrace/dynatrace-operator/src/controllers/dynakube/version"
"github.com/Dynatrace/dynatrace-operator/src/dtclient"
dtingestendpoint "github.com/Dynatrace/dynatrace-operator/src/ingestendpoint"
"github.com/Dynatrace/dynatrace-operator/src/initgeneration"
"github.com/Dynatrace/dynatrace-operator/src/kubeobjects"
"github.com/Dynatrace/dynatrace-operator/src/kubesystem"
"github.com/Dynatrace/dynatrace-operator/src/mapper"
"github.com/pkg/errors"
"github.com/spf13/afero"
appsv1 "k8s.io/api/apps/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
const (
errorUpdateInterval = 1 * time.Minute
changesUpdateInterval = 5 * time.Minute
defaultUpdateInterval = 30 * time.Minute
)
func Add(mgr manager.Manager, _ string) error {
kubeSysUID, err := kubesystem.GetUID(mgr.GetAPIReader())
if err != nil {
return errors.WithStack(err)
}
return NewController(mgr, string(kubeSysUID)).SetupWithManager(mgr)
}
// NewController returns a new ReconcileDynaKube
func NewController(mgr manager.Manager, clusterID string) *Controller {
return NewDynaKubeController(mgr.GetClient(), mgr.GetAPIReader(), mgr.GetScheme(), mgr.GetConfig(), clusterID)
}
func NewDynaKubeController(kubeClient client.Client, apiReader client.Reader, scheme *runtime.Scheme, config *rest.Config, clusterID string) *Controller { //nolint:revive // maximum number of return results per function exceeded; max 3 but got 4
return &Controller{
client: kubeClient,
apiReader: apiReader,
scheme: scheme,
fs: afero.Afero{Fs: afero.NewOsFs()},
dynatraceClientBuilder: dynatraceclient.NewBuilder(apiReader),
config: config,
operatorNamespace: os.Getenv("POD_NAMESPACE"),
clusterID: clusterID,
}
}
func (controller *Controller) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&dynatracev1beta1.DynaKube{}).
Owns(&appsv1.StatefulSet{}).
Owns(&appsv1.DaemonSet{}).
Complete(controller)
}
// Controller reconciles a DynaKube object
type Controller struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the api-server
client client.Client
apiReader client.Reader
scheme *runtime.Scheme
fs afero.Afero
dynatraceClientBuilder dynatraceclient.Builder
config *rest.Config
operatorNamespace string
clusterID string
}
// Reconcile reads that state of the cluster for a DynaKube object and makes changes based on the state read
// and what is in the DynaKube.Spec
// a Pod as an example
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (controller *Controller) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
log.Info("reconciling DynaKube", "namespace", request.Namespace, "name", request.Name)
requeueAfter := defaultUpdateInterval
dynakube, err := controller.getDynakubeOrUnmap(ctx, request.Name, request.Namespace)
if err != nil {
return reconcile.Result{}, err
} else if dynakube == nil {
return reconcile.Result{}, nil
}
oldStatus := *dynakube.Status.DeepCopy()
updated := controller.reconcileIstio(dynakube)
if updated {
log.Info("istio: objects updated")
}
err = controller.reconcileDynaKube(ctx, dynakube)
if err != nil {
requeueAfter = errorUpdateInterval
var serverErr dtclient.ServerError
isServerError := errors.As(err, &serverErr)
if isServerError && serverErr.Code == http.StatusTooManyRequests {
// should we set the phase to error ?
log.Info("request limit for Dynatrace API reached! Next reconcile in one minute")
return reconcile.Result{RequeueAfter: requeueAfter}, nil
}
dynakube.Status.SetPhase(dynatracev1beta1.Error)
log.Error(err, "error reconciling DynaKube", "namespace", dynakube.Namespace, "name", dynakube.Name)
} else {
dynakube.Status.SetPhase(controller.determineDynaKubePhase(dynakube))
}
if isStatusDifferent, err := kubeobjects.IsDifferent(oldStatus, dynakube.Status); err != nil {
log.Error(err, "failed to generate hash for the status section")
} else if isStatusDifferent {
log.Info("status changed, updating DynaKube")
requeueAfter = changesUpdateInterval
if errClient := controller.updateDynakubeStatus(ctx, dynakube); errClient != nil {
return reconcile.Result{}, errors.WithMessagef(errClient, "failed to update DynaKube after failure, original error: %s", err)
}
}
return reconcile.Result{RequeueAfter: requeueAfter}, err
}
func (controller *Controller) getDynakubeOrUnmap(ctx context.Context, dkName, dkNamespace string) (*dynatracev1beta1.DynaKube, error) {
dynakube := &dynatracev1beta1.DynaKube{
ObjectMeta: metav1.ObjectMeta{
Name: dkName,
Namespace: dkNamespace,
},
}
err := controller.apiReader.Get(ctx, client.ObjectKey{Name: dynakube.Name, Namespace: dynakube.Namespace}, dynakube)
if k8serrors.IsNotFound(err) {
return nil, controller.createDynakubeMapper(ctx, dynakube).UnmapFromDynaKube()
} else if err != nil {
return nil, errors.WithStack(err)
}
return dynakube, nil
}
func (controller *Controller) createDynakubeMapper(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) *mapper.DynakubeMapper {
dkMapper := mapper.NewDynakubeMapper(ctx, controller.client, controller.apiReader, controller.operatorNamespace, dynakube)
return &dkMapper
}
func (controller *Controller) reconcileIstio(dynakube *dynatracev1beta1.DynaKube) bool {
var err error
updated := false
if dynakube.Spec.EnableIstio {
updated, err = istio.NewReconciler(controller.config, controller.scheme).Reconcile(dynakube)
if err != nil {
// If there are errors log them, but move on.
log.Info("istio: failed to reconcile objects", "error", err)
}
}
return updated
}
func (controller *Controller) reconcileDynaKube(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) error {
tokenReader := token.NewReader(controller.apiReader, dynakube)
tokens, err := tokenReader.ReadTokens(ctx)
if err != nil {
controller.setConditionTokenError(dynakube, err)
return err
}
dynatraceClientBuilder := controller.dynatraceClientBuilder.
SetContext(ctx).
SetDynakube(*dynakube).
SetTokens(tokens)
dynatraceClient, err := dynatraceClientBuilder.BuildWithTokenVerification(&dynakube.Status)
if err != nil {
controller.setConditionTokenError(dynakube, err)
return err
}
controller.setConditionTokenReady(dynakube)
err = status.SetDynakubeStatus(dynakube, status.Options{
DtClient: dynatraceClient,
ApiReader: controller.apiReader,
})
if err != nil {
log.Info("could not update Dynakube status")
return err
}
err = dtpullsecret.
NewReconciler(ctx, controller.client, controller.apiReader, controller.scheme, dynakube, tokens).
Reconcile()
if err != nil {
log.Info("could not reconcile Dynatrace pull secret")
return err
}
err = connectioninfo.NewReconciler(ctx, controller.client, controller.apiReader, controller.scheme, dynakube, dynatraceClient).Reconcile()
if err != nil {
return err
}
err = deploymentmetadata.NewReconciler(ctx, controller.client, controller.apiReader, controller.scheme, *dynakube, controller.clusterID).Reconcile()
if err != nil {
return err
}
versionReconciler := version.Reconciler{
Dynakube: dynakube,
ApiReader: controller.apiReader,
Fs: controller.fs,
VersionProvider: version.GetImageVersion,
TimeProvider: kubeobjects.NewTimeProvider(),
}
err = versionReconciler.Reconcile(ctx)
if err != nil {
log.Info("could not reconcile component versions")
return err
}
err = controller.reconcileActiveGate(ctx, dynakube, dynatraceClient)
if err != nil {
log.Info("could not reconcile ActiveGate")
return err
}
err = controller.reconcileOneAgent(ctx, dynakube)
if err != nil {
log.Info("could not reconcile OneAgent")
return err
}
err = controller.reconcileAppInjection(ctx, dynakube)
if err != nil {
log.Info("could not reconcile app injection")
return err
}
return nil
}
func (controller *Controller) reconcileAppInjection(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) error {
if dynakube.NeedAppInjection() {
return controller.setupAppInjection(ctx, dynakube)
}
return controller.removeAppInjection(ctx, dynakube)
}
func (controller *Controller) setupAppInjection(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) (err error) {
endpointSecretGenerator := dtingestendpoint.NewEndpointSecretGenerator(controller.client, controller.apiReader, dynakube.Namespace)
dkMapper := controller.createDynakubeMapper(ctx, dynakube)
if err = dkMapper.MapFromDynakube(); err != nil {
log.Info("update of a map of namespaces failed")
return err
}
err = initgeneration.NewInitGenerator(controller.client, controller.apiReader, dynakube.Namespace).GenerateForDynakube(ctx, dynakube)
if err != nil {
log.Info("failed to generate init secret")
return err
}
err = endpointSecretGenerator.GenerateForDynakube(ctx, dynakube)
if err != nil {
log.Info("failed to generate data-ingest secret")
return err
}
if dynakube.ApplicationMonitoringMode() {
dynakube.Status.SetPhase(dynatracev1beta1.Running)
}
log.Info("app injection reconciled")
return nil
}
func (controller *Controller) removeAppInjection(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) (err error) {
endpointSecretGenerator := dtingestendpoint.NewEndpointSecretGenerator(controller.client, controller.apiReader, dynakube.Namespace)
dkMapper := controller.createDynakubeMapper(ctx, dynakube)
if err := dkMapper.UnmapFromDynaKube(); err != nil {
log.Info("could not unmap dynakube from namespace")
return err
}
err = endpointSecretGenerator.RemoveEndpointSecrets(ctx, dynakube)
if err != nil {
log.Info("could not remove data-ingest secret")
return err
}
return nil
}
func (controller *Controller) reconcileOneAgent(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) error {
if !dynakube.NeedsOneAgent() {
return controller.removeOneAgentDaemonSet(ctx, dynakube)
}
return oneagent.NewOneAgentReconciler(
controller.client, controller.apiReader, controller.scheme, controller.clusterID,
).Reconcile(ctx, dynakube)
}
func (controller *Controller) removeOneAgentDaemonSet(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) error {
oneAgentDaemonSet := appsv1.DaemonSet{ObjectMeta: metav1.ObjectMeta{Name: dynakube.OneAgentDaemonsetName(), Namespace: dynakube.Namespace}}
return kubeobjects.Delete(ctx, controller.client, &oneAgentDaemonSet)
}
func (controller *Controller) reconcileActiveGate(ctx context.Context, dynakube *dynatracev1beta1.DynaKube, dtc dtclient.Client) error {
reconciler := activegate.NewReconciler(ctx, controller.client, controller.apiReader, controller.scheme, dynakube, dtc)
err := reconciler.Reconcile()
if err != nil {
return errors.WithMessage(err, "failed to reconcile ActiveGate")
}
controller.setupAutomaticApiMonitoring(dynakube, dtc)
return nil
}
func (controller *Controller) setupAutomaticApiMonitoring(dynakube *dynatracev1beta1.DynaKube, dtc dtclient.Client) {
if dynakube.Status.KubeSystemUUID != "" &&
dynakube.FeatureAutomaticKubernetesApiMonitoring() &&
dynakube.IsKubernetesMonitoringActiveGateEnabled() {
clusterLabel := dynakube.FeatureAutomaticKubernetesApiMonitoringClusterName()
if clusterLabel == "" {
clusterLabel = dynakube.Name
}
err := apimonitoring.NewReconciler(dtc, clusterLabel, dynakube.Status.KubeSystemUUID).
Reconcile()
if err != nil {
log.Error(err, "could not create setting")
}
}
}
func (controller *Controller) updateDynakubeStatus(ctx context.Context, dynakube *dynatracev1beta1.DynaKube) error {
dynakube.Status.UpdatedTimestamp = metav1.Now()
err := controller.client.Status().Update(ctx, dynakube)
if err != nil && k8serrors.IsConflict(err) {
log.Info("could not update dynakube due to conflict", "name", dynakube.Name)
return nil
}
return errors.WithStack(err)
}
|
package models
import (
"decept-defense/controllers/comm"
"fmt"
"strings"
)
type Baits struct {
ID int64 `gorm:"primary_key;AUTO_INCREMENT;not null;unique;column:id" json:"id"`
CreateTime string `gorm:"not null"`
Creator string `gorm:"not null;size:256"`
BaitType string `gorm:"not null;size:256" form:"BaitType" binding:"required"`
UploadPath string `gorm:"size:256"`
FileName string `gorm:"size:256"`
BaitName string `gorm:"unique"form:"BaitName" gorm:"unique;size:256" binding:"required"`
BaitData string `form:"BaitData"`
}
func (bait * Baits) CreateBait() error{
if err := db.Create(bait).Error; err != nil {
return err
}
return nil
}
//func (bait * Baits) GetHistoryBaitsRecord(payload *comm.BaitSelectPayload) (*[]comm.HistoryBaitSelectResultPayload, int64, error){
// var ret []comm.HistoryBaitSelectResultPayload
// baitType := strings.Join([]string{"%", payload.BaitType, "%"}, "")
// baitName := strings.Join([]string{"%", payload.BaitName, "%"}, "")
// var count int64
// if payload.StartTimestamp != 0 && payload.EndTimestamp !=0{
// if err := db.Model(bait).Select("id, bait_type, bait_name, bait_data, create_time, creator").Where("bait_name LIKE ? AND bait_type LIKE ? AND create_time BETWEEN ? AND ?", baitName, baitType, util.Sec2TimeStr(payload.StartTimestamp, ""), util.Sec2TimeStr(payload.EndTimestamp, "")).Count(&count).Error; err != nil{
// return nil, 0, err
// }
// if err := db.Model(bait).Select("id, bait_type, bait_name, bait_data, create_time, creator").Limit(payload.PageSize).Offset((payload.PageNumber - 1) * payload.PageSize).Where("bait_name LIKE ? AND bait_type LIKE ? AND create_time BETWEEN ? AND ?", baitName, baitType, util.Sec2TimeStr(payload.StartTimestamp, ""), util.Sec2TimeStr(payload.EndTimestamp, "")).Scan(&ret).Error; err != nil{
// return nil, 0, err
// }
// }else{
// if err := db.Model(bait).Select("id, bait_type, bait_name, bait_data, create_time, creator").Where("bait_name LIKE ? AND bait_type LIKE ?", baitName, baitType).Count(&count).Error; err != nil{
// return nil, 0, err
// }
// if err := db.Model(bait).Select("id, bait_type, bait_name, bait_data, create_time, creator").Limit(payload.PageSize).Offset((payload.PageNumber - 1) * payload.PageSize).Where("bait_name LIKE ? AND bait_type LIKE ?", baitName, baitType).Scan(&ret).Error; err != nil{
// return nil, 0, err
// }
// }
// return &ret, count, nil
//}
func (bait * Baits) GetBaitsRecord(payload *comm.BaitSelectPayload) (*[]comm.FileBaitSelectResultPayload, int64, error){
var ret []comm.FileBaitSelectResultPayload
var count int64
var p string = "%" + payload.Payload + "%"
var sql = ""
if payload.BaitType != ""{
sql = fmt.Sprintf("select id, bait_type, bait_name, file_name, bait_data, create_time, creator from baits where CONCAT(bait_type, bait_name, bait_data, file_name, create_time, creator) LIKE '%s' AND bait_type = '%s' order by create_time DESC", p, payload.BaitType)
}else{
sql = fmt.Sprintf("select id, bait_type, bait_name, file_name, bait_data, create_time, creator from baits where CONCAT(bait_type, bait_name, bait_data, file_name, create_time, creator) LIKE '%s' order by create_time DESC", p)
}
if err := db.Raw(sql).Scan(&ret).Error; err != nil{
return nil, 0, err
}
count = (int64)(len(ret))
t := fmt.Sprintf("limit %d offset %d", payload.PageSize, (payload.PageNumber - 1) * payload.PageSize)
sql = strings.Join([]string{sql, t}, " ")
if err := db.Raw(sql).Scan(&ret).Error; err != nil{
return nil, 0, err
}
return &ret, count, nil
}
func (bait * Baits) GetBaitByID(id int64) (*Baits, error){
var ret Baits
if err := db.Take(&ret, id).Error; err != nil {
return nil, err
}
return &ret, nil
}
func (bait * Baits) GetBaitByName(name string) (*Baits, error){
var ret Baits
if err := db.Where("bait_name = ?", name).Take(&ret).Error; err != nil {
return nil, err
}
return &ret, nil
}
func (bait * Baits) DeleteBaitByID(id int64) error{
if err := db.Delete(&Baits{}, id).Error; err != nil {
return err
}
return nil
}
|
// Copyright 2014 Aller Media AS. All rights reserved.
// License: GPL3
package command
import (
"github.com/jwaldrip/odin/cli"
)
func ExampleVersionRun() {
GitTag = "unknown"
GitCommit = "unknown"
GitBranch = "unknown"
v := &Version{}
var c cli.Command
v.Run(c)
// Output:
// miniETL version unknown (unknown:unknown)
}
|
package _map
import "testing"
func TestMapWithFunValue(t *testing.T) {
m := map[int]func(op int) int{}
m[1] = func(op int) int { return op }
m[2] = func(op int) int { return op * op }
m[3] = func(op int) int { return op * op * op }
t.Log(m[1](2), m[2](3), m[3](5))
}
|
package uploadbills
import (
"encoding/json"
"fmt"
"net/http"
"time"
godrej "main.go/godrej"
itc "main.go/itc"
marico "main.go/marico"
rb "main.go/rb"
"main.go/utils"
)
// gcloud config set project dropshop-5cbbf
// gcloud functions deploy UploadCreditNoteAPI --runtime go113 --trigger-http --allow-unauthenticated --timeout 540s
func UploadCreditNoteAPI(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodOptions {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
w.Header().Set("Access-Control-Max-Age", "3600")
w.WriteHeader(http.StatusNoContent)
return
}
// Set CORS headers for the main request.
w.Header().Set("Access-Control-Allow-Origin", "*")
var d utils.InputData
err := json.NewDecoder(r.Body).Decode(&d)
res := utils.RestStatus{
Message: "Success",
Code: 200,
}
if err != nil {
// log.Printf("error parsing application/json: %v", err)
res = utils.RestStatus{
Message: "error parsing application/json",
Code: 500,
}
fmt.Fprintf(w, "%+v", res)
return
}
if d.Object == "" {
fmt.Fprint(w, "File path with name is required!")
return
}
fmt.Println(d)
startTime := time.Now()
fmt.Println("Start: ", startTime.String())
if d.CustomerId == "jdEMMQbw29yhULLeRgJd" {
godrej.ReadFile(d, "", &res)
} else if d.CustomerId == "7MI05trvcFT8aez5C1iU" {
marico.ReadFile(d, "", &res)
} else if d.CustomerId == "WnwBuNNbGQLHNRAOStli" {
rb.ReadFile(d, "", &res)
} else if d.CustomerId == "ksBQR6afWnyaP7G9lv22" {
itc.ReadFile(d, "", &res)
} else {
res = utils.RestStatus{
Message: "Not supported CUSTOMER / BRAND type ",
Code: 500,
}
// fmt.Fprintf(w, "%+v", res)
}
endTime := time.Now()
fmt.Println("End: ", endTime.String())
fmt.Println("Time taken : ", endTime.Sub(startTime))
resR, _ := json.Marshal(res)
// fmt.Fprintf(w, "%+v", string(resR))
w.Header().Set("Content-Type", "application/json")
w.Write(resR)
w.WriteHeader(res.Code)
return
}
|
package main
import (
"fmt"
"os"
"github.com/LEW21/siren/imagectl"
)
func main() {
args := os.Args[1:]
allCommands := []imagectl.CommandGroup{
{"Image", imagectl.Commands},
}
if len(args) == 0 || args[0] == "-h" || args[0] == "--help" {
imagectl.PrintHelp("Image manager for systemd-machined.", allCommands)
return
}
if args[0] == "-v" || args[0] == "--version" {
fmt.Println("imagectl version 0")
return
}
os.Exit(imagectl.RunCommand(args, allCommands))
}
|
package net
import (
"encoding/json"
"fmt"
"log"
"net/http"
"serverskeleton/parser"
"github.com/gorilla/websocket"
)
type WSServer struct {
MethodMap map[string]*parser.MethodInfo
}
func (w *WSServer) RegisterMethod(v interface{}) {
parser.RegisterMethod(w.MethodMap, v)
}
type WSConnection struct {
SendBuf chan *parser.Response
ReadBuf chan []byte
Conn *websocket.Conn
messageType int
client *Client
isClosed bool
MethodMap map[string]*parser.MethodInfo
}
var upgrader = websocket.Upgrader{} // use default options
func (ws *WSServer) serve(w http.ResponseWriter, r *http.Request) {
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Print("upgrade:", err)
return
}
conn := &WSConnection{
Conn: c,
SendBuf: make(chan *parser.Response, 10),
ReadBuf: make(chan []byte, 10),
client: &Client{},
MethodMap: ws.MethodMap,
}
go conn.read()
go conn.send()
}
func (ws *WSServer) Run(Addr string) {
mux := &http.ServeMux{}
mux.HandleFunc("/serve", ws.serve)
server := &http.Server{Addr: Addr, Handler: mux}
server.ListenAndServe()
}
func (ws *WSConnection) translate(data []byte) {
req, ok := parser.GenRequest(data)
if !ok {
ws.write(parser.GenErrRespones(parser.ErrorCode_JsonError))
} else {
ws.handle(req)
}
}
func (ws *WSConnection) read() {
defer ws.close()
for {
if ws.isClosed {
return
}
mt, message, err := ws.Conn.ReadMessage()
if err != nil {
log.Println("read:", err)
return
}
ws.messageType = mt
ws.translate(message)
}
}
func (ws *WSConnection) handle(req *parser.Request) {
resp := parser.Invoke(ws.MethodMap, req, ws.client)
ws.SendBuf <- resp
}
func (ws *WSConnection) write(resp *parser.Response) {
ws.SendBuf <- resp
}
func (ws *WSConnection) close() {
close(ws.ReadBuf)
close(ws.SendBuf)
ws.Conn.Close()
}
func (ws *WSConnection) send() {
defer func() {
ws.isClosed = true
}()
for {
respon, ok := <-ws.SendBuf
if !ok {
ws.Conn.WriteMessage(websocket.CloseMessage, []byte{})
return
}
data, _ := json.Marshal(&respon)
err := ws.Conn.WriteMessage(ws.messageType, data)
if nil != err {
fmt.Println("ws.Conn.WriteMessage:", err)
return
}
if respon.FuncName == "Error" {
return
}
}
}
|
package main
//
//import (
// "fmt"
// "os"
// "runtime"
// "strconv"
// "time"
//
// "github.com/eatonphil/gosql"
//)
//
//var inserts = 0
//var lastId = 0
//var firstId = 0
//
//func doInsert(mb gosql.Backend) {
// parser := gosql.Parser{}
// for i := 0; i < inserts; i++ {
// lastId = i
// if i == 0 {
// firstId = lastId
// }
// ast, err := parser.Parse(fmt.Sprintf("INSERT INTO users VALUES (%d)", lastId))
// if err != nil {
// panic(err)
// }
//
// err = mb.Insert(ast.Statements[0].InsertStatement)
// if err != nil {
// panic(err)
// }
// }
//}
//
//func doSelect(mb gosql.Backend) {
// parser := gosql.Parser{}
// ast, err := parser.Parse(fmt.Sprintf("SELECT id FROM users WHERE id = %d", lastId))
// if err != nil {
// panic(err)
// }
//
// r, err := mb.Select(ast.Statements[0].SelectStatement)
// if err != nil {
// panic(err)
// }
//
// if len(r.Rows) != 1 {
// panic("Expected 1 row")
// }
//
// if int(*r.Rows[0][0].AsInt()) != inserts-1 {
// panic(fmt.Sprintf("Bad row, got: %d", r.Rows[0][1].AsInt()))
// }
//
// ast, err = parser.Parse(fmt.Sprintf("SELECT id FROM users WHERE id = %d", firstId))
// if err != nil {
// panic(err)
// }
//
// r, err = mb.Select(ast.Statements[0].SelectStatement)
// if err != nil {
// panic(err)
// }
//
// if len(r.Rows) != 1 {
// panic("Expected 1 row")
// }
//
// if int(*r.Rows[0][0].AsInt()) != 0 {
// panic(fmt.Sprintf("Bad row, got: %d", r.Rows[0][1].AsInt()))
// }
//}
//
//func perf(name string, b gosql.Backend, cb func(b gosql.Backend)) {
// start := time.Now()
// fmt.Println("Starting", name)
// cb(b)
// fmt.Printf("Finished %s: %f seconds\n", name, time.Since(start).Seconds())
//
// var m runtime.MemStats
// runtime.ReadMemStats(&m)
// fmt.Printf("Alloc = %d MiB\n\n", m.Alloc/1024/1024)
//}
//
//func main() {
// mb := gosql.NewMemoryBackend()
//
// index := false
// for i, arg := range os.Args {
// if arg == "--with-index" {
// index = true
// }
//
// if arg == "--inserts" {
// inserts, _ = strconv.Atoi(os.Args[i+1])
// }
// }
//
// primaryKey := ""
// if index {
// primaryKey = " PRIMARY KEY"
// }
//
// parser := gosql.Parser{}
// ast, err := parser.Parse(fmt.Sprintf("CREATE TABLE users (id INT%s)", primaryKey))
// if err != nil {
// panic(err)
// }
//
// err = mb.CreateTable(ast.Statements[0].CreateTableStatement)
// if err != nil {
// panic(err)
// }
//
// indexingString := " with indexing enabled"
// if !index {
// indexingString = ""
// }
// fmt.Printf("Inserting %d rows%s\n", inserts, indexingString)
//
// perf("INSERT", mb, doInsert)
//
// perf("SELECT", mb, doSelect)
//}
|
package lesson01
import (
"io"
"github.com/dtamura/opentracing-tutorial-go/lib/log"
opentracing "github.com/opentracing/opentracing-go"
spanLog "github.com/opentracing/opentracing-go/log"
)
// Client 構造体
type Client struct {
tracer opentracing.Tracer
logger log.Factory
closer io.Closer
}
// ConfigOptions オプション
type ConfigOptions struct {
Message string
}
var options = &ConfigOptions{}
// NewClient Client構造体を作成する
func NewClient(o *ConfigOptions, tracer opentracing.Tracer, logger log.Factory) *Client {
options = o
return &Client{
tracer: tracer,
logger: logger,
}
}
// RunE プログラム開始。エラーを返す
func (c *Client) RunE() error {
c.logger.Bg().Info("Lesson01 Start")
span := c.tracer.StartSpan("say-hello") // "say-hello" という名称のSpanを生成
helloStr := options.Message
span.SetTag("hello-to", helloStr) // Tagに"hello-to"をセット
span.LogFields(
spanLog.String("event", "string-format"),
spanLog.String("value", helloStr),
)
println(helloStr)
span.LogKV("event", "println")
span.Finish()
return nil
}
|
package main
import (
"log"
"os"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
cfsservice "gitlab.com/cfs-service"
"gitlab.com/cfs-service/server"
"gitlab.com/cfs-service/service"
"gitlab.com/cfs-service/store"
)
func init() {
}
func main() {
config := &cfsservice.RuntimeConfig{}
var rootCmd = &cobra.Command{
Use: "app",
Run: func(cmd *cobra.Command, args []string) {
// Init key service
if err := service.InitializeKeyService(config.JWTPublicKeyPath); err != nil {
logrus.Fatal("Missing public key file", err)
}
// init store
var dbStore store.IStore
var err error
if dbStore, err = store.NewMySQLStore(config.DBConnectionString); err != nil {
logrus.Fatal("DB connection failed", err)
}
if err := dbStore.Migrate(); err != nil {
logrus.Fatal("DB migration failed", err)
}
if err := server.Start(config.Port, dbStore); err != nil {
log.Fatal(err)
}
},
}
// TODO: Add sub command as cli tool.
// + Migrate down (migrate up is automatically when start app)
rootCmd.Flags().Uint64Var(&config.Port, "port", 8080, "Port")
rootCmd.Flags().StringVar(&config.DBConnectionString, "db-conn", os.Getenv("DB_CONNECTION_STRING"), "DB-Connection string")
rootCmd.Flags().StringVar(&config.JWTPublicKeyPath, "public-key-path", os.Getenv("PUBLIC_KEY_PATH"), "Public key file path, used to validate JWT token")
// rootCmd.AddCommand(cmdEcho)
rootCmd.Execute()
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"sort"
"strings"
)
func nextPermu(s string) (string, bool) {
k, l := -1, 0
for i := len(s) - 2; i >= 0; i-- {
if s[i] < s[i+1] {
k = i
break
}
}
if k == -1 {
return s, true
}
for i := len(s) - 1; i > 0; i-- {
if s[i] > s[k] {
l = i
break
}
}
s = strings.Join([]string{s[0:k], string(s[l]), s[k+1 : l], string(s[k]), s[l+1 : len(s)]}, "")
l = len(s) - k - 1
for i := 0; l-1-i > i; i++ {
s = strings.Join([]string{s[0 : k+1+i], string(s[len(s)-1-i]), s[k+2+i : len(s)-1-i], string(s[k+1+i]), s[len(s)-i : len(s)]}, "")
}
return s, false
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
s := strings.Split(scanner.Text(), "")
sort.Strings(s)
c := strings.Join(s, "")
fmt.Print(c)
var done bool
for c, done = nextPermu(c); !done; c, done = nextPermu(c) {
fmt.Printf(",%s", c)
}
fmt.Println()
}
}
|
package main;
import
(
"fmt"
"github.com/rajeshpachar/hellomod/hellotest"
"github.com/rajeshpachar/hellomod/child"
)
func main(){
fmt.Println("we are inside main func now");
hellotest.SayHello("my main calling me");
fmt.Println("now main is running child###");
child.HelloChild()
}
|
package resource
import (
"fmt"
"io"
"net/url"
"strings"
"github.com/mebyus/ffd/resource/archiveofourown"
"github.com/mebyus/ffd/resource/fanfiction"
"github.com/mebyus/ffd/resource/ficbook"
"github.com/mebyus/ffd/resource/fiction"
"github.com/mebyus/ffd/resource/royalroad"
"github.com/mebyus/ffd/resource/samlib"
"github.com/mebyus/ffd/resource/spacebattles"
"github.com/mebyus/ffd/resource/webnovel"
"github.com/mebyus/ffd/track/fic"
)
type tools interface {
Download(target string, saveSource bool) (book *fiction.Book, err error)
Check(target string) (info *fic.Info, err error)
Parse(src io.Reader) (book *fiction.Book, err error)
}
func GetLocationForTarget(target string) (location fic.Location, err error) {
u, err := url.Parse(target)
if err != nil {
return
}
hostname := u.Hostname()
location, err = GetLocationForHostname(hostname)
return
}
func GetLocationForHostname(hostname string) (location fic.Location, err error) {
switch hostname {
case ficbook.Hostname:
location = fic.FicBook
case webnovel.Hostname:
location = fic.WebNovel
case spacebattles.Hostname:
location = fic.SpaceBattles
case "forums.sufficientvelocity.com":
location = fic.SufficientVelocity
case "forum.questionablequesting.com":
location = fic.QuestionableQuesting
case fanfiction.Hostname:
location = fic.FanFiction
case archiveofourown.Hostname:
location = fic.ArchiveOfOurOwn
case royalroad.Hostname:
location = fic.RoyalRoad
default:
err = unknown(hostname)
}
return
}
func ChooseByTarget(target string) (t tools, err error) {
u, err := url.Parse(target)
if err != nil {
return
}
hostname := u.Hostname()
t, err = ChooseByHostname(hostname)
return
}
func ChooseByID(resourceID string) (t tools, err error) {
if len(resourceID) > 3 {
// resource id is a hostname
t, err = ChooseByHostname(resourceID)
} else {
// resource id is a location
t, err = ChooseByLocation(resourceID)
}
return
}
func ChooseByHostname(hostname string) (t tools, err error) {
switch hostname {
case ficbook.Hostname:
t = ficbook.NewTools()
case webnovel.Hostname:
t = webnovel.NewTools()
case samlib.Hostname:
t = samlib.NewTools()
case spacebattles.Hostname:
t = spacebattles.NewTools()
case "forums.sufficientvelocity.com":
t = spacebattles.NewTools()
case "forum.questionablequesting.com":
err = notImplemented(hostname)
case fanfiction.Hostname:
t = fanfiction.NewTools()
case archiveofourown.Hostname:
t = archiveofourown.NewTools()
case royalroad.Hostname:
t = royalroad.NewTools()
default:
err = unknown(hostname)
}
return
}
func ChooseByLocation(location string) (t tools, err error) {
loc := strings.ToUpper(location)
switch fic.Location(loc) {
case fic.FicBook:
t = ficbook.NewTools()
case fic.WebNovel:
t = webnovel.NewTools()
case fic.SpaceBattles:
t = spacebattles.NewTools()
case fic.SufficientVelocity:
t = spacebattles.NewTools()
case fic.QuestionableQuesting:
err = notImplemented(loc)
case fic.FanFiction:
t = fanfiction.NewTools()
case fic.ArchiveOfOurOwn:
t = archiveofourown.NewTools()
case fic.RoyalRoad:
t = royalroad.NewTools()
default:
err = unknown(loc)
}
return
}
func notImplemented(resourceID string) error {
return fmt.Errorf("resource [ %s ] not implemented", resourceID)
}
func unknown(resourceID string) error {
return fmt.Errorf("unknown resource [ %s ]", resourceID)
}
|
// Package client implements a lightweight bandwidth estimation using a modified UDP
// implementation of ping.
//
// Example usage from golang/bin/
// # Runs a basic client *locally* that sends UDP packets of varying size.
// # See white paper for details.
// ./client
//
// # Specify ports to receive and send on and to.
// ./client --server_ip="192.168.2.5" --client_rcv_port=1002 --client_snd_port=1003 --server_rcv_port=2000
//
// # Specify parameters to Pingtest. Run --help to see more.
// ./client --run_search_only=true --start_size=150 --increase_factor=3
//
// # Randomize the UDP padding to mitigate caching.
// ./client --randomize=true
//
// # View all flags and explanations.
// ./client --help
package main
import (
"errors"
"flag"
"log"
"math"
"math/rand"
"net"
"reflect"
"sort"
"strconv"
"sync"
"time"
"github.com/golang/protobuf/proto"
ptpb "ptprotos"
)
var (
serverIP = flag.String("server_ip", "127.0.0.1", "The IP address of the ack server.")
clientRcvPort = flag.Int("client_rcv_port", 3141, "The client's port that listens for ACKs")
clientSndPort = flag.Int("client_snd_port", 3142, "The client's port that sends large packets.")
serverRcvPort = flag.Int("server_rcv_port", 3143, "The server's port that listens for packets.")
randomize = flag.Bool("randomize", true, "Randomize the UDP packet padding.")
runExpIncrease = flag.Bool("run_exp_increase", true, "Run the exponential increase phase 1 beginning.")
runSearchOnly = flag.Bool("run_search_only", false, "Run binary search only.")
increaseFactor = flag.Int("increase_factor", 10, "Increase factor during exponential increase.")
decreaseFactor = flag.Int("decrease_factor", 2, "Decrease factor during binary search.")
startSize = flag.Int("start_size", 30, "The size of the smallest packet, in bytes.")
maxSize = flag.Int("max_size", 64000, "The size of the largest packet, in bytes.")
phaseOneNumPackets = flag.Int("phase_one_num_packets", 10, "The number of packets to send during the exponential increase phase.")
phaseTwoNumPackets = flag.Int("phase_two_num_packets", 10, "The number of packets to send during the binary search phase.")
lossRate = flag.Int("loss_rate", 70, "The acceptible rate of packets to receive before increasing the packet size.")
convergeSize = flag.Int("converge_size", 200, "When the difference is packet sizes is less than converge_size, terminate the procedure.")
delayBetweenPackets = flag.Int("delay_between_packets", 1, "The time, in seconds, to wait between sending packets.")
)
const (
// maxUDPSize is the maximum size, in bytes, of a single UDP packet.
maxUDPSize = 65000
// timeoutMultiplier determines the timeout length for each packet. The timeout length is
// determined by timeoutMultiplier * max(delay of smallest ping size)
timeoutMultiplier = 50
// initialProbeTimeout is the default time to wait for each of the initial packets sent out to
// determine the average delay.
// TODO(jvesuna): Change this constant to something smarter.
initialProbeTimeout = 10000
)
// Client holds meta-parameters to run Pingtest.
type Client struct {
// The host ip to ping.
IP string
Params Params
RunExpIncrease bool
RunSearchOnly bool
}
// Params holds parameters for Pingtest.
type Params struct {
// Increase the ping packet size by IncreaseFactor on each run during phase 1.
IncreaseFactor int
// Decrease the packet size by 1/DecreaseFactor after failing to ping during phase 2.
DecreaseFactor int
// Smallest packet size in bytes.
StartSize int
// The largest packet size to send in bytes.
MaxSize int
// The number of packets to send for each ping during phase 1.
PhaseOneNumPackets int
// The number of packets to send for each ping during phase 2.
PhaseTwoNumPackets int
// The minimum packet loss % that is acceptable.
LossRate int
// When we test packet sizes that are less than ConvergeSize bytes apart, finish the ping test.
ConvergeSize int
}
// TestStats holds statistics for an entire Pingtest speedtest experiment.
type TestStats struct {
// TotalBytesSent is the total bytes that were *attempted* to send.
TotalBytesSent int
// TotalBytesDropped is the total bytes that were dropped during this ping.
TotalBytesDropped int
// The estimated bandwidth.
EstimatedMB float64
EstimatedKb float64
}
// pingStats holds statistics for *each* ping that was sent.
// TODO(jvesuna): use median instead of mean.
type pingStats struct {
min float64
max float64
mean float64
stdev float64
loss float64
wg sync.WaitGroup
}
// receivedPacket contains the full proto for each packet recieved and the sent index of the packet.
type receivedPacket struct {
message ptpb.PingtestMessage
sentIndex int
}
// BySentIndex implements sort.Inteface for []receivedPacket based on the sentIndex field.
type BySentIndex []receivedPacket
func (s BySentIndex) Len() int { return len(s) }
func (s BySentIndex) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s BySentIndex) Less(i, j int) bool { return s[i].sentIndex < s[j].sentIndex }
// computeDelays returns the min, mean, and max delays of a sorted list of packets and received
// times in milliseconds.
func computeDelays(rcvPackets []receivedPacket, rcvTimes []int64) (float64, float64, float64, error) {
if len(rcvPackets) != len(rcvTimes) {
return 0, 0, 0, errors.New("error: length of rcvPackets != length of rcvTimes")
}
if len(rcvPackets) == 0 {
return 0, 0, 0, nil
}
var delays []int64
var timeDelay float64
for i, _ := range rcvPackets {
timeDelay = float64(rcvTimes[i]-*rcvPackets[i].message.PingtestParams.ClientTimestampNano) /
float64(1000000)
delays = append(delays, int64(timeDelay))
}
sum := 0
min, max := delays[0], delays[0]
for _, delay := range delays {
sum += int(delay)
if delay < min {
min = delay
}
if delay > max {
max = delay
}
}
mean := float64(sum) / float64(len(rcvPackets))
if float64(min) > mean || mean > float64(max) {
log.Fatalln("error: mean must be within min and max")
}
return float64(min), mean, float64(max), nil
}
// clientReceiver receives ping ACKs from the server and returns ping statistics.
// timeoutLen is the duration to wait for each packet in Milliseconds.
func (ps *pingStats) clientReceiver(count, timeoutLen int, ServerConn *net.UDPConn) {
log.Println("Conn is", ServerConn)
var rcvCount int
var rcvPackets []receivedPacket
var rcvTimes []int64
// TODO(jvesuna): Consider making everything in the for loop a big goroutine.
for i := 0; i < count; i++ {
buf := make([]byte, maxUDPSize)
ServerConn.SetReadDeadline(time.Now().Add(time.Duration(timeoutLen) * time.Millisecond))
numBytesRead, _, err := ServerConn.ReadFromUDP(buf)
if err != nil {
// Handle timeout.
if reflect.TypeOf(err) == reflect.TypeOf((*net.OpError)(nil)) {
log.Println("Timeout!")
} else {
// TODO(jvesuna): Handle.
log.Fatalln("Error: ", err)
}
} else {
// Packet received.
// TODO(jvesuna): Handle in goroutine with a shared struct.
buf = buf[:numBytesRead]
log.Println("Received packet #", i)
rcvCount++
message := &ptpb.PingtestMessage{}
if err := proto.Unmarshal(buf, message); err != nil {
log.Fatalln("error reading proto from packet:", err)
}
sentIndex := *message.PingtestParams.PacketIndex
rcvPackets = append(rcvPackets, receivedPacket{
message: *message,
sentIndex: int(sentIndex),
})
rcvTimes = append(rcvTimes, time.Now().UnixNano())
}
}
// Process packets.
sort.Sort(BySentIndex(rcvPackets))
percentLoss := 100 - (float64(rcvCount)/float64(count))*100
minDelay, meanDelay, maxDelay, err := computeDelays(rcvPackets, rcvTimes)
if err != nil {
log.Fatalln("error computing delay:", err)
}
ps.min = minDelay
ps.mean = meanDelay
ps.max = maxDelay
ps.loss = percentLoss
log.Println("Min Delay:", ps.min)
log.Println("Max Delay:", ps.max)
log.Println("Mean Delay:", ps.mean)
log.Println("Loss Percent:", ps.loss)
defer ps.wg.Done()
}
// runPing runs a UDP implementation of the ping util.
// count is the number of UDP packets to send.
// size is in bytes.
// timeoutLen is in milliseconds.
func runPing(count, size, timeoutLen int, Conn *net.UDPConn) (pingStats, error) {
log.Printf("========= starting new ping =========")
log.Println("Using byte size: ", size)
// Startup receiver.
var ps pingStats
ps.wg.Add(1)
go ps.clientReceiver(count, timeoutLen, Conn)
// Send packets
log.Println("sending Conn is", Conn)
// TODO(jvesuna): Add Channel here.
for i := 0; i < count; i++ {
params := &ptpb.PingtestParams{
PacketIndex: proto.Int64(int64(i)),
ClientTimestampNano: proto.Int64(time.Now().UnixNano()),
PacketSizeBytes: proto.Int64(int64(size)),
}
// paddingLen is the number of bytes we should add to the payload.
paddingLen := int(math.Max(0, float64(size-proto.Size(params))))
var padding []byte
// Randomize the padding.
if *randomize {
for i := 0; i < paddingLen; i++ {
padding = append(padding, byte(rand.Intn(10)))
}
} else {
padding = make([]byte, paddingLen)
}
message := &ptpb.PingtestMessage{
PingtestParams: params,
Padding: padding,
}
wireBytes, err := proto.Marshal(message)
if err != nil {
log.Println(err)
return pingStats{}, errors.New("error marshalling message")
}
// Send the packet.
// TODO(jvesuna): Handle 'host down' and network problems.
Conn.SetWriteBuffer(proto.Size(message))
if _, err = Conn.Write(wireBytes); err != nil {
log.Println(err)
return pingStats{}, errors.New("error writing wireBytes to connection")
}
// TODO(jvesuna): Change this delay between packets?
log.Println("Sent #", i)
time.Sleep(time.Duration(*delayBetweenPackets) * time.Second)
}
// Wait to receive all threads or timeout.
ps.wg.Wait()
return ps, nil
}
// checkDefaults ensures that Params values are valid, and if not, assigns default ones.
// TODO(jvesuna): Remove this when the caller is specified.
func checkDefaults(oldClient *Client) (Client, error) {
if oldClient.RunExpIncrease && oldClient.RunSearchOnly {
return Client{}, errors.New("cannot run exponential increase AND binary search only flags together")
}
c := Client{
IP: oldClient.IP,
RunExpIncrease: oldClient.RunExpIncrease,
RunSearchOnly: oldClient.RunSearchOnly,
Params: oldClient.Params}
if c.IP == "" {
c.IP = "127.0.0.1"
}
if c.Params.IncreaseFactor == 0 {
c.Params.IncreaseFactor = 10
}
if c.Params.DecreaseFactor == 0 {
c.Params.DecreaseFactor = 2
}
if c.Params.StartSize == 0 {
c.Params.StartSize = 2500
}
if c.Params.MaxSize == 0 {
c.Params.MaxSize = 2500
}
if c.Params.PhaseOneNumPackets == 0 {
c.Params.PhaseOneNumPackets = 5
}
if c.Params.PhaseTwoNumPackets == 0 {
c.Params.PhaseTwoNumPackets = 10
}
if c.Params.LossRate == 0 {
c.Params.LossRate = 70
}
if c.Params.ConvergeSize == 0 {
c.Params.ConvergeSize = 200
}
return c, nil
}
// bandwidth returns the MBps and Mbps for the given packet size and RTT.
// TODO(jvesuna): Integrate this into TestStats.
func bandwidth(pktSize int, minRTT float64) (float64, float64) {
bytes := float64(pktSize) / ((minRTT / float64(2)) * 1000)
return bytes, 8000 * bytes
}
// expIncrease runs pings with exponentially increasing packet sizes. Returns the smallest packet
// size that was dropped (ceiling), and TestStats. Assumes that startSize already works, thus the
// first packet we send is startSize * increaseFactor bytes.
func (c *Client) expIncrease(ts TestStats, startSize, maxSize, increaseFactor, timeoutLen int, Conn *net.UDPConn) (int, TestStats, error) {
pktSize := startSize
for {
pktSize *= increaseFactor
if pktSize > maxSize {
pktSize = maxSize
}
ts.TotalBytesSent += c.Params.PhaseOneNumPackets * pktSize
// TODO(jvesuna): Fix timeoutLen.
ps, err := runPing(c.Params.PhaseOneNumPackets, pktSize, timeoutLen, Conn)
ts.TotalBytesDropped += int(ps.loss * 0.01 * float64(c.Params.PhaseOneNumPackets) *
float64(pktSize))
if err != nil {
log.Fatalf("failed to ping host: %v", err)
return 0, TestStats{}, err
}
if ps.loss > float64(c.Params.LossRate) {
log.Printf("Loss is over %d percent: %f", c.Params.LossRate, ps.loss)
return pktSize, ts, nil
}
if pktSize >= maxSize {
// Ping test passed. More tests need to be run to detemine a higher limit.
// TODO(jvesuna): Clean up logging, move to caller.
log.Printf("Passed Ping Test, hit hard limit")
log.Printf("Total bytes attempted to send: %d", ts.TotalBytesSent)
log.Printf("Total MB attempted to send: %f", float64(ts.TotalBytesSent)/float64(1000000))
log.Printf("Total bytes dropped: %d", ts.TotalBytesDropped)
log.Printf("Net megabytes that made it: %f",
float64(ts.TotalBytesSent-ts.TotalBytesDropped)/float64(1000000))
ts.EstimatedMB, ts.EstimatedKb = bandwidth(pktSize, ps.mean)
return maxSize, ts, nil
}
}
}
// binarySearch returns the largest packet size that has a loss rate < c.Params.LossRate, and
// TestStats. Assume that minBytes works and maxBytes is dropped.
func (c *Client) binarySearch(ts TestStats, minBytes, maxBytes, decreaseFactor int, Conn *net.UDPConn) (int, TestStats, error) {
log.Printf("Testing range %d to %d bytes sized packets.\n", minBytes, maxBytes)
// middleGround is always the average of minBytes and maxBytes.
middleGround := int(float64(maxBytes+minBytes) / float64(c.Params.DecreaseFactor))
// oldGround was the previous middleGround, used as the new upper or lower bound in binary search.
var oldGround int
for {
ts.TotalBytesSent += c.Params.PhaseTwoNumPackets * middleGround
// TODO(jvesuna): Fix timeoutLen.
ps, err := runPing(c.Params.PhaseTwoNumPackets, middleGround, initialProbeTimeout, Conn)
ts.TotalBytesDropped += int(ps.loss * 0.01 * float64(c.Params.PhaseTwoNumPackets) *
float64(middleGround))
if err != nil {
log.Fatalf("failed to ping host: %v", err)
return 0, TestStats{}, err
}
// Update oldGround to be the previous middleGround.
oldGround = middleGround
// Update middleGround to either the average of (middleGround, minBytes) or the average of
// (middleGround, maxBytes).
if ps.loss > float64(c.Params.LossRate) {
// Decrease middleGround. New ceiling is middleGround.
log.Printf("Loss is over %d percent: %f", c.Params.LossRate, ps.loss)
maxBytes = middleGround
middleGround = int(float64(minBytes+middleGround) / float64(c.Params.DecreaseFactor))
log.Printf("Updating range: %d to %d\n", minBytes, maxBytes)
} else {
// Increase middleGround. New floor is middleGround.
minBytes = middleGround
middleGround = int(float64(maxBytes+middleGround) / float64(c.Params.DecreaseFactor))
log.Printf("Updating range: %d to %d\n", minBytes, maxBytes)
}
if math.Abs(float64(middleGround-oldGround)) < float64(c.Params.ConvergeSize) {
// Finished! Once we have narrowed down the packet size, return.
log.Printf("Completed Ping Test, found estimate with largest packet size: %d", middleGround)
log.Printf("Total bytes attempted to send: %d", ts.TotalBytesSent)
log.Printf("Total MB attempted to send: %f", float64(ts.TotalBytesSent)/float64(1000000))
log.Printf("Total bytes dropped: %d", ts.TotalBytesDropped)
log.Printf("Net megabytes that made it: %f",
float64(ts.TotalBytesSent-ts.TotalBytesDropped)/float64(1000000))
ts.EstimatedMB, ts.EstimatedKb = bandwidth(middleGround, ps.mean)
return middleGround, ts, nil
}
}
}
// runExpIncrease first runs an exponentially increasing byte size phase (1), then a binary search
// phase (2).
func (c *Client) runExpIncrease(ts TestStats, Conn *net.UDPConn) (TestStats, error) {
// First, find the smallest RTT.
// TODO(jvesuna): Add test runtime by taking timestamp here and at return points.
// TODO(jvesuna): Find a better way of accumulating bytes, maybe in runPing?
ts.TotalBytesSent += c.Params.PhaseOneNumPackets * c.Params.StartSize
// Wait 10 seconds per packet initially.
ps, err := runPing(c.Params.PhaseOneNumPackets, c.Params.StartSize, initialProbeTimeout, Conn)
ts.TotalBytesDropped += int(ps.loss * 0.01 * float64(c.Params.PhaseOneNumPackets) *
float64(c.Params.StartSize))
if err != nil {
log.Println("failed to ping host")
return TestStats{}, err
}
// TODO(jvesuna): add some sanity checks
log.Printf("Initial ping results: %v", ps)
timeoutLen := int(math.Max(ps.max, 1)) * timeoutMultiplier
// Begin Phase 1: exponential increase.
pktSize, ts, err := c.expIncrease(ts, c.Params.StartSize, c.Params.MaxSize,
c.Params.IncreaseFactor, timeoutLen, Conn)
if err != nil {
return ts, err
}
if pktSize == c.Params.MaxSize {
// Pingtest passed. Need to run more tests.
return ts, nil
}
if pktSize == 0 {
return TestStats{}, errors.New("pingtest: unexpected error in exponential increase")
}
// End Phase 1.
// We know maxBytes sized packets previously failed, set this as our upper bound.
maxBytes := pktSize
// We know minBytes sized packets worked, set this as our lower bound.
minBytes := pktSize / c.Params.IncreaseFactor
// Start Phase 2: binary search.
pktSize, ts, err = c.binarySearch(ts, minBytes, maxBytes, c.Params.DecreaseFactor, Conn)
if err != nil {
return ts, err
}
if pktSize == 0 {
return TestStats{}, errors.New("pingtest: unexpected error in binary search")
}
// End Phase 2.
return ts, nil
// End RunExpIncrease
}
// runSearchOnly only runs binary search on the given parameters.
func (c *Client) runSearchOnly(ts TestStats, Conn *net.UDPConn) (TestStats, error) {
// First run smallest packet size.
ts.TotalBytesSent += c.Params.PhaseOneNumPackets * c.Params.StartSize
// Wait 10 seconds per packet initially.
ps, err := runPing(c.Params.PhaseOneNumPackets, c.Params.StartSize, initialProbeTimeout, Conn)
ts.TotalBytesDropped += int(ps.loss * 0.01 * float64(c.Params.PhaseOneNumPackets) *
float64(c.Params.StartSize))
if err != nil {
log.Println("failed to ping host: %v", err)
return TestStats{}, err
}
// TODO(jvesuna): add some sanity checks
timeoutLen := int(math.Max(ps.max, 1)) * timeoutMultiplier
// Then run largest packet size.
ts.TotalBytesSent += c.Params.PhaseOneNumPackets * c.Params.MaxSize
ps, err = runPing(c.Params.PhaseOneNumPackets, c.Params.MaxSize, timeoutLen, Conn)
ts.TotalBytesDropped += int(ps.loss * 0.01 * float64(c.Params.PhaseOneNumPackets) *
float64(c.Params.MaxSize))
if err != nil {
log.Fatalf("failed to ping host: %v", err)
return TestStats{}, err
}
if ps.loss < float64(c.Params.LossRate) {
// Pingtest passed. Need to run more tests.
log.Printf("Completed Ping Test")
log.Printf("Total bytes *attempted* to send: %d", ts.TotalBytesSent)
log.Printf("Total MB *attempted* to send: %f", float64(ts.TotalBytesSent)/float64(1000000))
log.Printf("Total bytes dropped: %d", ts.TotalBytesDropped)
log.Printf("Net megabytes that made it: %f", float64(ts.TotalBytesSent-ts.TotalBytesDropped)/
float64(1000000))
ts.EstimatedMB, ts.EstimatedKb = bandwidth(c.Params.MaxSize, ps.mean)
return ts, nil
}
// Else if largest packet size fails, then run binary search.
pktSize, ts, err := c.binarySearch(ts, c.Params.StartSize, c.Params.MaxSize,
c.Params.DecreaseFactor, Conn)
if err != nil {
return ts, err
}
if pktSize == 0 {
return TestStats{}, errors.New("pingtest: unexpected error in binary search")
}
return ts, nil
}
// RunPingtest runs binary search to find the largest ping packet size without a high drop rate.
// Option to run an exponential increase phase first.
// Returns the test statistics for this test.
func (c *Client) RunPingtest(Conn *net.UDPConn) (TestStats, error) {
// First, make sure parameters are correct.
cl, err := checkDefaults(c)
if err != nil {
return TestStats{}, err
}
// ts holds the overall statistics for this test.
var ts TestStats
// TODO(jvesuna): Have these functions take a range to probe for, and return a range. Then clean
// up.
if cl.RunExpIncrease {
return cl.runExpIncrease(ts, Conn)
} else if cl.RunSearchOnly {
return cl.runSearchOnly(ts, Conn)
}
return TestStats{}, errors.New("pingtest failed: ended unexpectedly")
}
func main() {
flag.Parse()
clientSenderAddr, err := net.ResolveUDPAddr("udp", *serverIP+":"+strconv.Itoa(*serverRcvPort))
if err != nil {
log.Fatalln("error binding to server port:", err)
}
Conn, err := net.DialUDP("udp", nil, clientSenderAddr)
if err != nil {
log.Fatalln("error connecting UDP:", err)
}
defer Conn.Close()
c := Client{
IP: *serverIP,
Params: Params{
IncreaseFactor: *increaseFactor,
DecreaseFactor: *decreaseFactor,
StartSize: *startSize,
MaxSize: *maxSize,
PhaseOneNumPackets: *phaseOneNumPackets,
PhaseTwoNumPackets: *phaseTwoNumPackets,
LossRate: *lossRate,
ConvergeSize: *convergeSize,
},
RunExpIncrease: *runExpIncrease,
RunSearchOnly: *runSearchOnly,
}
ts, err := c.RunPingtest(Conn)
if err != nil {
log.Println(err)
} else {
log.Printf("Estimated bandwidth: %f MBps, %f Kbps", ts.EstimatedMB, ts.EstimatedKb)
}
}
|
package server
import (
"github.com/bugscatcher/cache-service/configs"
"github.com/go-redis/redis"
)
type GRPCHandler struct {
Redis *redis.Client
Conf configs.Config
}
|
package main
import (
"math"
"fmt"
)
/*
Given two integers dividend and divisor, divide two integers without using multiplication, division and mod operator.
Return the quotient after dividing dividend by divisor.
The integer division should truncate toward zero.
Example 1:
Input: dividend = 10, divisor = 3
Output: 3
Example 2:
Input: dividend = 7, divisor = -3
Output: -2
Note:
Both dividend and divisor will be 32-bit signed integers.
The divisor will never be 0.
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 231 − 1 when the division result overflows.
*/
func main() {
fmt.Println(divide(math.MinInt32,-1))
}
func divide(dividend int, divisor int) int {
/*
不能用乘法除法和取模。
直接想到的方式是减法,但收敛太慢;线性的。
减法配合<<1 位运算,这样做减法做的更快,看能够被减掉多少次。
*/
if dividend == math.MinInt32 && divisor == -1 {
// 就这么个特殊情况。
return math.MaxInt32
}
if dividend>0&&divisor<0 {
return -1*solve(dividend,-1*divisor)
} else if divisor>0&÷nd<0 {
return -1*solve(-1*dividend,divisor)
} else if dividend<0&&divisor<0 {
return solve(-1*dividend,-1*divisor)
}
return solve(dividend,divisor)}
/*
Well, two cases may cause overflow:
divisor = 0;
dividend = INT_MIN and divisor = -1 (because abs(INT_MIN) = INT_MAX + 1).
*/
func solve(dd,ds int) int {
if dd<ds {
return 0
}
var x int = 1
var t int = ds
for dd > t<<1 {
t<<=1
x<<=1 // 2倍关系,logN的复杂度
}
//fmt.Println(x,t,dd,ds)
return x + solve(dd-t,ds)
} |
// 经营体检
package elemeOpenApi
// 根据商户ID查询商户经营体检信息
// shopId 店铺ID
// date 体检日期(最多查到7天内的体检数据)
func (diagnosis *Diagnosis) GetShopDiagnosis(shopId_ int64, date_ string) (interface{}, error) {
params := make(map[string]interface{})
params["shopId"] = shopId_
params["date"] = date_
return APIInterface(diagnosis.config, "eleme.diagnosis.getShopDiagnosis", params)
}
// 根据多个商户ID批量查询商户经营体检信息
// shopIds 店铺ID集合
// date 体检日期(最多查到7天内的体检数据)
func (diagnosis *Diagnosis) GetShopDiagnosisList(shopIds_ interface{}, date_ string) (interface{}, error) {
params := make(map[string]interface{})
params["shopIds"] = shopIds_
params["date"] = date_
return APIInterface(diagnosis.config, "eleme.diagnosis.getShopDiagnosisList", params)
}
|
package closing_channels
func ExampleClosingChannels() {
closingChannels()
//Output:
//Sent job 1
//Received job 1
//Sent job 2
//Received job 2
//Sent job 3
//Sent all jobs
//Received job 3
//Received all jobs
}
|
package main
import (
"fmt"
"math/big"
"reflect"
"strconv"
)
type SMC struct {
E map[string]EnviromentValue
S GenericStack
M map[string]Var
C Stack
T GenericStack
}
//Função que converte booleanos em String
func BtoA(boolValue bool) string {
resultStr := ""
if boolValue {
resultStr = "true"
} else {
resultStr = "false"
}
return resultStr
}
func cleanMemory(smc SMC, ident string) SMC {
enviromentValue := smc.E[ident]
if enviromentValue.WhatIsMyType() != "main.Location" {
return smc
}
delete(smc.M, string(enviromentValue.(Location)))
return smc
}
func resolveDesigualdade(typeOf string, smc SMC, forest ...*Tree) *Tree {
value1, err1 := strconv.Atoi(forest[0].toString())
value0, err0 := strconv.Atoi(forest[1].toString())
if err0 != nil {
value0, _ = strconv.Atoi(findValue(forest[1], smc))
}
if err1 != nil {
value1, _ = strconv.Atoi(findValue(forest[0], smc))
}
var boolValue bool
switch typeOf {
case "eq":
boolValue = (value0 == value1)
break
case "ge":
boolValue = (value0 >= value1)
break
case "le":
boolValue = (value0 <= value1)
break
case "gt":
boolValue = (value0 > value1)
break
case "lt":
boolValue = (value0 < value1)
break
}
return &Tree{Value: BtoA(boolValue), Sons: nil}
}
var dismember map[string]func(SMC, []*Tree) SMC
var evaluate map[string]func(SMC) SMC
func memFindNext(memory map[string]Var) string {
max := 0
for k, _ := range memory {
kv, _ := strconv.Atoi(k)
if kv >= max {
max = kv
}
}
return strconv.Itoa(max + 1)
}
func createVariable(ident *Tree, val *Tree, smc SMC) SMC {
value, err := toVar(val.toString())
if err {
value, err = toVar(findValue(val, smc))
if err {
panic(fmt.Sprintf("Erro! Variável %s não delcarada!", val.toString()))
}
}
identificador := ident.toString()
location := memFindNext(smc.M)
smc.E[identificador] = Location(location)
//var val
smc.M[location] = value
return smc
}
func createConst(ident *Tree, val *Tree, smc SMC) SMC {
value, err := strconv.Atoi(val.toString())
if err != nil {
value, _ = strconv.Atoi(findValue(val, smc))
}
identificador := ident.toString()
smc.E[identificador] = Constante(strconv.Itoa(value))
return smc
}
func changeValueInMemory(ident *Tree, val *Tree, smc SMC) (SMC, bool) {
l, exist := smc.E[ident.toString()]
if !exist || reflect.TypeOf(l).String() != "main.Location" {
return smc, false
}
value, err := toVar(val.toString())
if err {
value, _ = toVar(findValue(val, smc))
}
smc.M[string(l.(Location))] = value
return smc, true
}
func findValue(ident *Tree, smc SMC) string {
//fmt.Println(ident.toString())
valorAmbiente := smc.E[ident.toString()]
if valorAmbiente.WhatIsMyType() == "main.Location" {
location := string(valorAmbiente.(Location))
//fmt.Print(location)
return smc.M[location].toString()
} else if valorAmbiente.WhatIsMyType() == "main.Constante" {
constante := string(valorAmbiente.(Constante))
return constante
} else {
panic(fmt.Sprint("Variável %s não delcarada", ident.toString()))
}
}
func findVar(ident *Tree, smc SMC) (Var, bool) {
//fmt.Println(ident.toString())
valorAmbiente := smc.E[ident.toString()]
if valorAmbiente.WhatIsMyType() == "main.Location" {
location := string(valorAmbiente.(Location))
//fmt.Print(location)
val, found := smc.M[location]
return val, !found
} else if valorAmbiente.WhatIsMyType() == "main.Constante" {
constante := string(valorAmbiente.(Constante))
return toVar(constante)
} else {
//panic(fmt.Sprint("Variável %s não delcarada", ident.toString()))
return errVar()
}
}
func getTreeFromValueStack(smc SMC) (SMC, *Tree) {
var genericInfo interface{}
var typeOfGenericInfo string
smc.S, genericInfo, typeOfGenericInfo = smc.S.pop()
if typeOfGenericInfo != "*main.Tree" {
panic("Erro inesperado")
}
var tree = new(Tree)
tree = (genericInfo).(*Tree)
return smc, tree
}
func getEnviromentFromValueStack(smc SMC) (SMC, map[string]EnviromentValue) {
var genericInfo interface{}
var typeOfGenericInfo string
smc.S, genericInfo, typeOfGenericInfo = smc.S.pop()
if typeOfGenericInfo != "map[string]main.EnviromentValue" {
panic("Erro inesperado")
}
var enviroment = genericInfo.(map[string]EnviromentValue)
return smc, enviroment
}
func criaMapa() map[string]func(SMC) SMC {
var evaluate = map[string]func(SMC) SMC{
"add": func(smc SMC) SMC {
var num = 2
var t = new(Tree)
var sum = big.NewInt(0)
for i := 0; i < num; i++ {
smc, t = getTreeFromValueStack(smc)
//value, err := strconv.Atoi(t.toString())
value := big.NewInt(0)
_, err := value.SetString(t.Value, 10)
if !err {
value = big.NewInt(0)
value.SetString(findValue(t, smc), 10)
}
sum.Add(sum, value)
}
smc.S = smc.S.push(&Tree{Value: sum.String(), Sons: nil})
return smc
},
"sub": func(smc SMC) SMC {
var t1 *(Tree)
var t0 *(Tree)
var value0 = big.NewInt(0)
var value1 = big.NewInt(0)
smc, t1 = getTreeFromValueStack(smc)
smc, t0 = getTreeFromValueStack(smc)
value0, err0 := value0.SetString(t0.Value, 10)
if !err0 {
value0 = big.NewInt(0)
value0.SetString(findValue(t0, smc), 10)
}
value1, err1 := value1.SetString(t1.Value, 10)
if !err1 {
value1 = big.NewInt(0)
value1, err1 = value1.SetString(findValue(t1, smc), 10)
}
res := value0.Sub(value0, value1)
smc.S = smc.S.push(&Tree{Value: res.String(), Sons: nil})
return smc
},
"mul": func(smc SMC) SMC {
var num = 2
var t = new(Tree)
var product = big.NewInt(1)
for i := 0; i < num; i++ {
smc, t = getTreeFromValueStack(smc)
value, err := big.NewInt(0).SetString(t.Value, 10)
if !err {
value = big.NewInt(0)
value, err = value.SetString(findValue(t, smc), 10)
}
product.Mul(product, value)
}
smc.S = smc.S.push(&Tree{Value: product.String(), Sons: nil})
return smc
},
"div": func(smc SMC) SMC {
var t1 *(Tree)
var t0 *(Tree)
smc, t1 = getTreeFromValueStack(smc)
smc, t0 = getTreeFromValueStack(smc)
value0, err0 := big.NewInt(0).SetString(t0.Value, 10)
if !err0 {
value0 = big.NewInt(0)
value0, err0 = value0.SetString(findValue(t0, smc), 10)
}
value1, err1 := big.NewInt(0).SetString(t1.Value, 10)
if !err1 {
value1 = big.NewInt(0)
value1, err1 = value1.SetString(findValue(t1, smc), 10)
}
smc.S = smc.S.push(&Tree{Value: value0.Div(value0, value1).String(), Sons: nil})
return smc
},
"and": func(smc SMC) SMC {
var num = 2
var t = new(Tree)
var result = true
var bl = false
for i := 0; i < num; i++ {
smc, t = getTreeFromValueStack(smc)
var str = t.toString()
value, found := smc.M[str]
if found {
bl, _ = value.toBool()
} else {
bl = (str == "true")
}
//boolValue := (str == "true")
result = result && bl
}
smc.S = smc.S.push(&Tree{Value: BtoA(result), Sons: nil})
return smc
},
"or": func(smc SMC) SMC {
var num = 2
var t = new(Tree)
var result = false
var bl = false
for i := 0; i < num; i++ {
smc, t = getTreeFromValueStack(smc)
var str = t.toString()
value, found := smc.M[str]
if found {
bl, _ = value.toBool()
} else {
bl = (str == "true")
}
//boolValue := (str == "true")
result = result || bl
}
smc.S = smc.S.push(&Tree{Value: BtoA(result), Sons: nil})
return smc
},
"neg": func(smc SMC) SMC {
value := new(Tree)
smc, value = getTreeFromValueStack(smc)
str := value.toString()
var bl = true
boolVal, found := smc.M[str]
if found {
bl, _ = boolVal.toBool()
} else {
bl = (str == "true")
}
boolValue := !bl
smc.S = smc.S.push(&Tree{Value: BtoA(boolValue), Sons: nil})
return smc
},
"eq": func(smc SMC) SMC {
t1 := new(Tree)
t2 := new(Tree)
smc, t1 = getTreeFromValueStack(smc)
smc, t2 = getTreeFromValueStack(smc)
smc.S = smc.S.push(resolveDesigualdade("eq", smc, t1, t2))
return smc
},
"gt": func(smc SMC) SMC {
t1 := new(Tree)
t2 := new(Tree)
smc, t1 = getTreeFromValueStack(smc)
smc, t2 = getTreeFromValueStack(smc)
smc.S = smc.S.push(resolveDesigualdade("gt", smc, t1, t2))
return smc
},
"ge": func(smc SMC) SMC {
t1 := new(Tree)
t2 := new(Tree)
smc, t1 = getTreeFromValueStack(smc)
smc, t2 = getTreeFromValueStack(smc)
smc.S = smc.S.push(resolveDesigualdade("ge", smc, t1, t2))
return smc
},
"lt": func(smc SMC) SMC {
t1 := new(Tree)
t2 := new(Tree)
smc, t1 = getTreeFromValueStack(smc)
smc, t2 = getTreeFromValueStack(smc)
smc.S = smc.S.push(resolveDesigualdade("lt", smc, t1, t2))
return smc
},
"le": func(smc SMC) SMC {
t1 := new(Tree)
t2 := new(Tree)
smc, t1 = getTreeFromValueStack(smc)
smc, t2 = getTreeFromValueStack(smc)
smc.S = smc.S.push(resolveDesigualdade("le", smc, t1, t2))
return smc
},
"while": func(smc SMC) SMC {
var result = new(Tree)
var holdInterface interface{}
smc.S, holdInterface, _ = smc.S.pop()
result = holdInterface.(*Tree)
var exp = new(Tree)
var bloco = new(Tree)
smc, exp = getTreeFromValueStack(smc)
smc, bloco = getTreeFromValueStack(smc)
if result.toString() == "true" {
smc.C = smc.C.push(Tree{Value: "while", Sons: append(append(initSons(), exp), bloco)})
smc.C = smc.C.push(*bloco)
}
return smc
},
"if": func(smc SMC) SMC {
var result = new(Tree)
smc, result = getTreeFromValueStack(smc)
var blocoIf = new(Tree)
var blocoElse = new(Tree)
smc, blocoIf = getTreeFromValueStack(smc)
smc, blocoElse = getTreeFromValueStack(smc)
if result.toString() == "true" {
smc.C = smc.C.push(*blocoIf)
} else {
smc.C = smc.C.push(*blocoElse)
}
return smc
},
"ass": func(smc SMC) SMC {
ident := new(Tree)
value := new(Tree)
smc, value = getTreeFromValueStack(smc)
smc, ident = getTreeFromValueStack(smc)
var found bool
smc, found = changeValueInMemory(ident, value, smc)
if !found {
panic(fmt.Sprint("Variable %s not declared.", value.Value))
}
return smc
},
"ref": func(smc SMC) SMC {
value := new(Tree)
ident := new(Tree)
smc, value = getTreeFromValueStack(smc)
smc, ident = getTreeFromValueStack(smc)
smc.T = smc.T.push(ident.toString())
copyOfEnviroment := make(map[string]EnviromentValue)
for key, value := range smc.E {
copyOfEnviroment[key] = value
}
smc.S = smc.S.push(copyOfEnviroment)
smc = createVariable(ident, value, smc)
return smc
},
"cns": func(smc SMC) SMC {
value := new(Tree)
ident := new(Tree)
smc, value = getTreeFromValueStack(smc)
smc, ident = getTreeFromValueStack(smc)
copyOfEnviroment := make(map[string]EnviromentValue)
for key, value := range smc.E {
copyOfEnviroment[key] = value
}
smc.S = smc.S.push(copyOfEnviroment)
smc.T = smc.T.push(ident.toString())
smc = createConst(ident, value, smc)
return smc
},
"seq": func(smc SMC) SMC {
return smc
},
"blk": func(smc SMC) SMC {
var ident interface{}
smc.T, ident, _ = smc.T.pop()
strIdent := ident.(string)
smc = cleanMemory(smc, strIdent)
var enviroment map[string]EnviromentValue
smc, enviroment = getEnviromentFromValueStack(smc)
smc.E = enviroment
return smc
},
"print": func(smc SMC) SMC {
smc, value := getTreeFromValueStack(smc)
str := value.Value
ln := len(str)
if ln > 0 && str[0] == '"' {
str = str[1:]
str = str[:ln-2]
fmt.Println(str)
return smc
}
if str == "false" || str == "true" {
fmt.Println(str)
return smc
}
num, err := big.NewInt(0).SetString(str, 10)
if err {
fmt.Println(num)
return smc
} /*else {
num = big.NewInt(0)
num, err = big.NewInt(0).SetString(findValue(value, smc), 10)
if err {
fmt.Println(num)
} else {
//panic(fmt.Sprintf("Variable %s not declared", str))
}
}*/
num2, err2 := big.NewFloat(0).SetString(str)
if err2 {
fmt.Println(num2)
return smc
}
val, err3 := findVar(value, smc)
if err3 {
panic(fmt.Sprintf("Variable %s not declared", str))
}
fmt.Println(val.MyString())
return smc
},
"cal": func(smc SMC) SMC {
smc, actuals := getTreeFromValueStack(smc)
smc, idProc := getTreeFromValueStack(smc)
abstract, procFound := smc.E[idProc.toString()].(*Tree)
if !procFound {
panic(fmt.Sprint("Procedure %s not declared.", idProc.Value))
}
initial := Tree{Value: "blk", Sons: initSons()}
current := &initial
execution := abstract.Sons[0]
if len(abstract.Sons) > 1 {
execution = abstract.Sons[1]
listaFormals := abstract.Sons[0]
if len(listaFormals.Sons) != len(actuals.Sons) {
panic(fmt.Sprint("Procedure %s needs %d parameters but %d was given.", idProc.Value, len(listaFormals.Sons), len(actuals.Sons)))
}
for i := 0; i < len(listaFormals.Sons); i++ {
identifier := listaFormals.Sons[i]
value := actuals.Sons[i]
ref := Tree{Value: "ref", Sons: append(append(initSons(), identifier), value)}
(*current).Sons = append((*current).Sons, &ref)
if i+1 < len(listaFormals.Sons) {
blk := Tree{Value: "blk", Sons: initSons()}
(*current).Sons = append((*current).Sons, &blk)
current = &blk
}
}
}
(*current).Sons = append((*current).Sons, execution)
smc.C = smc.C.push(initial)
return smc
},
}
return evaluate
}
func iniciaSMC() SMC {
var smc = *new(SMC)
smc.E = make(map[string]EnviromentValue)
smc.M = make(map[string]Var)
return smc
}
//Função En equivale à (S,M,nC)=>(nS,M,C), isto é o valor é transferido da pilha C para a pilha S
func (smc SMC) En() SMC {
var dado = new(Tree)
smc.C, dado = smc.C.pop()
smc.S = smc.S.push(dado)
return smc
}
func (smc SMC) Ei() SMC {
var operacao = (new(Tree))
smc.C, operacao = smc.C.pop()
smc = (evaluate[operacao.toString()](smc))
return smc
}
func printaOperandos(forest []*Tree) {
for _, v := range forest {
fmt.Println(v.Value)
}
}
func (smc SMC) push_tree(tree *Tree) SMC {
value, sons := tree.dismember()
funcDismember, isCtrl := dismember[value]
if isCtrl {
return funcDismember(smc, sons)
}
smc.C = smc.C.push(Tree{Value: value, Sons: nil})
for i := (len(sons) - 1); i >= 0; i-- {
smc.C = smc.C.push(*sons[i])
}
return smc
}
func criaMapaDismember() map[string]func(SMC, []*Tree) SMC {
var dismember = map[string]func(SMC, []*Tree) SMC{
"while": func(smc SMC, forest []*Tree) SMC {
smc.C = smc.C.push(Tree{Value: "while", Sons: nil})
smc.C = smc.C.push(*forest[0])
smc.S = smc.S.push(forest[1])
smc.S = smc.S.push(forest[0])
return smc
},
"if": func(smc SMC, forest []*Tree) SMC {
smc.C = smc.C.push(Tree{Value: "if", Sons: nil})
smc.C = smc.C.push(*forest[0])
smc.S = smc.S.push(forest[2])
smc.S = smc.S.push(forest[1])
return smc
},
"prc": func(smc SMC, forest []*Tree) SMC {
nameOfProc := forest[0].toString()
var abstract *Tree
if len(forest) > 2 {
formals := forest[1]
block := forest[2]
abstract = &Tree{Value: "abs", Sons: append(append(initSons(), formals), block)}
} else {
block := forest[1]
abstract = &Tree{Value: "abs", Sons: append(initSons(), block)}
}
smc.E[nameOfProc] = abstract
return smc
},
}
return dismember
}
func (tree Tree) dismember() (string, []*Tree) {
return tree.Value, tree.Sons
}
func printMap(m *map[string]Var) {
for k, v := range *m {
fmt.Printf(" %s:%s", k, v.toString())
}
}
func printAmbiente(m *map[string]EnviromentValue) {
for k, v := range *m {
fmt.Printf(" %s:%s", k, v.converteParaString())
}
}
func (smc *SMC) printSmc() {
fmt.Print("<")
printAmbiente(&smc.E)
fmt.Print(", ")
smc.S.print()
fmt.Print(",")
printMap(&smc.M)
fmt.Print(", ")
smc.C.print()
fmt.Print(">")
fmt.Println()
}
func resolverSMC(smc SMC, t Tree, verbose bool) SMC {
evaluate = criaMapa()
dismember = criaMapaDismember()
smc.C = smc.C.push(t)
if verbose {
smc.printSmc()
}
//fmt.Println(smc)
for smc.C.size() > 0 {
_, op := smc.C.pop()
if op.checkIfNode() || op.toString() == "act" {
_, isOperation := evaluate[op.toString()]
if isOperation {
//Se entrou aqui é porque topo da pilha é uma operação válida
smc = smc.Ei()
} else {
//Se entrou aqui é operando no topo da pilha
smc = smc.En()
}
} else {
//Se entrou aqui topo da pilha é uma árvore com mais de um nó
smc.C, _ = smc.C.pop()
smc = smc.push_tree(op)
}
//fmt.Println(smc)
if verbose {
smc.printSmc()
}
}
return smc
}
|
package cmd
import (
"os/exec"
"path/filepath"
"github.com/brainicorn/skelp/generator"
"github.com/spf13/cobra"
)
const (
defaultCompletionDir = "/etc/bash_completion.d/"
)
var (
completionDir string
noSudo bool
)
func newBashmeCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "bashme",
Short: "Creates a bash completion file for skelp",
Long: `Creates a bash completion file for skelp.
By default the completion file is written to /etc/bash_completion.d/ using sudo.`,
RunE: executeBashme,
}
cmd.Flags().BoolVar(&noSudo, "no-sudo", false, "will try to write the completion file without using sudo")
cmd.Flags().StringVar(&completionDir, "output", defaultCompletionDir, "path to the directory where the completion file will be written")
return cmd
}
func executeBashme(cmd *cobra.Command, args []string) error {
var completionFilePath string
opts := generator.DefaultOptions()
gen := generator.New(opts)
skelpHome, err := gen.InitSkelpHome()
if err == nil {
completionFilePath = filepath.Join(skelpHome, "completion.sh")
err = cmd.Parent().GenBashCompletionFile(completionFilePath)
}
if err == nil {
cmd := "sudo"
args := []string{"cp", completionFilePath, completionDir}
if noSudo {
cmd = "cp"
args = []string{completionFilePath, completionDir}
}
err = exec.Command(cmd, args...).Run()
}
if err == nil {
cmd.Println("bash completion successfully installed")
cmd.Println("please close and reopen your terminal")
}
return err
}
|
package main
import "fmt"
// OCP - Type should be open for extension but close for modification
// Color defines Color as an int
type Color int
// Color = iota assign 0 to Color and increment by 1 down the color list
const (
brown Color = iota // so brown is type Color and has value of 0
red // red has value or 0 + 1 = 1
orange // orange = 2
yellow // = 3
green
blue
purple
grep
white
black
)
// Size defines Size as an int
type Size int
// Size = iota assign 0 to first size in the list and
// increment by 1 down list
const (
small Size = iota // small is type Size with value of 0
medium // = 1
large
xlarge
xxlarge
)
// Product structure hold basic Product name, color, and size
type Product struct {
name string
color Color
size Size
}
// Filter defines and empty Filter struct
type Filter struct {
//
}
// FilterByClolor compares color with color in product slice
func (f *Filter) FilterByClolor(p []Product, c Color) []*Product {
result := make([]*Product, 0)
for i, v := range p {
if v.color == c {
result = append(result, &p[i])
}
}
return result
}
// FilterbySize
func (f *Filter) FilterbySize(p []Product, s Size) []*Product {
// result := make([]*Product, 0)
result := []*Product{}
for i, v := range p {
if v.size == s {
result = append(result, &p[i])
}
}
return result
}
// FilterBySizeColor (Keep adding additional function could violate the OCP principal)
func (f *Filter) FilterBySizeColor(p []Product, s Size, c Color) []*Product {
result := make([]*Product, 0)
for i, v := range p {
if v.size == s && v.color == c {
result = append(result, &p[i])
}
}
return result
}
// program entry
func main() {
// create some products
apple := Product{"Apple", green, small}
tree := Product{"Tree", green, large}
car := Product{"Car", red, large}
boat := Product{"Boat", green, medium}
// Add proudct to slice of products
products := []Product{apple, tree, car, boat}
fmt.Println(products)
fmt.Println("Green products (basic):")
f := Filter{}
for _, v := range f.FilterByClolor(products, green) {
fmt.Printf(" - %s green\n", v.name)
}
fmt.Println("Medium size products")
for _, v := range f.FilterbySize(products, medium) {
fmt.Printf(" - %s medium\n", v.name)
}
fmt.Println("Red and Large Products")
for _, v := range f.FilterBySizeColor(products, large, red) {
fmt.Printf(" - %s large, red\n", v.name)
}
}
|
package leetcode
import "testing"
func Test_alienOrder(t *testing.T) {
type args struct {
words []string
}
tests := []struct {
name string
args args
want string
}{
{
name: "test_alienOrder01",
args: args{words: []string{"wrt", "wrf", "er", "ett", "rftt"}},
want: "wertf",
},
{
name: "test_alienOrder02",
args: args{words: []string{"z", "x"}},
want: "zx",
},
{
name: "test_alienOrder03",
args: args{words: []string{"z", "x", "z"}},
want: "",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := alienOrder(tt.args.words); got != tt.want {
t.Errorf("alienOrder() = %v, want %v", got, tt.want)
}
})
}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"time"
"chromiumos/tast/common/fixture"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/lacros"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/policyutil"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: LacrosSecondaryProfilesAllowed,
Desc: "Behavior of LacrosSecondaryProfilesAllowed policy",
LacrosStatus: testing.LacrosVariantExists,
Contacts: []string{
"anastasiian@chromium.org", // Test author
"chromeos-commercial-identity@google.com",
},
SoftwareDeps: []string{"chrome", "lacros"},
Attr: []string{"group:mainline", "informational"},
Fixture: fixture.LacrosPolicyLoggedIn,
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.LacrosSecondaryProfilesAllowed{}, pci.VerifiedFunctionalityUI),
},
})
}
func LacrosSecondaryProfilesAllowed(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(chrome.HasChrome).Chrome()
fdms := s.FixtValue().(fakedms.HasFakeDMS).FakeDMS()
// Browser controls to open a profile:
profileToolbarButton := nodewith.ClassName("AvatarToolbarButton").Role(role.Button).Focusable()
profileMenu := nodewith.NameStartingWith("Accounts and sync").Role(role.Menu)
otherProfilesLabel := nodewith.Name("Other profiles").Role(role.StaticText).Ancestor(profileMenu)
// 'Add' and 'Guest' profile buttons.
addProfileButton := nodewith.Name("Add").Role(role.Button).Focusable().Ancestor(profileMenu)
guestProfileButton := nodewith.Name("Guest").Role(role.Button).Focusable().Ancestor(profileMenu)
// Reserve ten seconds for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
for _, param := range []struct {
// name is the subtest name.
name string
// value is the policy value.
value *policy.LacrosSecondaryProfilesAllowed
// newProfileOrGuestAllowed is the policy result
newProfileOrGuestAllowed bool
}{
{
name: "true",
value: &policy.LacrosSecondaryProfilesAllowed{Val: true},
newProfileOrGuestAllowed: true,
},
{
name: "false",
value: &policy.LacrosSecondaryProfilesAllowed{Val: false},
newProfileOrGuestAllowed: false,
},
{
name: "unset",
value: &policy.LacrosSecondaryProfilesAllowed{Stat: policy.StatusUnset},
newProfileOrGuestAllowed: false,
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
// Perform cleanup.
if err := policyutil.ResetChrome(ctx, fdms, cr); err != nil {
s.Fatal("Failed to clean up: ", err)
}
// Update policies.
if err := policyutil.ServeAndRefresh(ctx, fdms, cr, []policy.Policy{param.value}); err != nil {
s.Fatal("Failed to update policies: ", err)
}
// Setup the browser.
cr, l, _, err := lacros.Setup(ctx, s.FixtValue(), browser.TypeLacros)
if err != nil {
s.Fatal("Failed to initialize test: ", err)
}
defer lacros.CloseLacros(cleanupCtx, l)
// Connect to Test API to use it with the UI library.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect Test API: ", err)
}
defer faillog.DumpUITreeWithScreenshotOnError(ctx, s.OutDir(), s.HasError, cr, "ui_tree_"+param.name)
ui := uiauto.New(tconn)
if err := uiauto.Combine("Open profile toolbar",
ui.WaitUntilExists(profileToolbarButton),
ui.LeftClick(profileToolbarButton),
ui.WaitUntilExists(otherProfilesLabel),
)(ctx); err != nil {
s.Fatal("Failed to open profile toolbar: ", err)
}
// Test 'Add profile' button.
newProfileEnabled := true
if err := ui.Exists(addProfileButton)(ctx); err != nil {
newProfileEnabled = false
}
if newProfileEnabled != param.newProfileOrGuestAllowed {
s.Errorf("Unexpected new profile behavior: got %t; want %t", newProfileEnabled, param.newProfileOrGuestAllowed)
}
// Test 'Guest' button.
guestProfileEnabled := true
if err := ui.Exists(guestProfileButton)(ctx); err != nil {
guestProfileEnabled = false
}
if guestProfileEnabled != param.newProfileOrGuestAllowed {
s.Fatalf("Unexpected guest profile behavior: got %t; want %t", guestProfileEnabled, param.newProfileOrGuestAllowed)
}
})
}
}
|
package parser
import (
"errors"
"strings"
"github.com/DataDrake/cuppa/version"
"github.com/autamus/go-parspack/pkg"
)
// ParseVersion returns the value of a version tuple.
func (p *Parser) ParseVersion() (result pkg.Version, err error) {
// Watch for the end of the version.
end := false
token := p.scnr.Peak()
if !token.IsVersion() {
return result, errors.New("called ParseVersion without the beginning token being a version declaration")
}
// Parse Version Value
noprefix := strings.TrimPrefix(strings.ToLower(token.Data), "version(")
if noprefix != "" {
p.scnr.SetToken(noprefix)
} else {
p.scnr.Next()
}
value, err := p.ParseString()
if err != nil {
return result, err
}
if strings.HasSuffix(noprefix, ")") {
end = true
}
result.Value = version.NewVersion(value)
// Check for N/A version value
if result.Value.String() == "N/A" {
result.Value = []string{value}
}
for !end {
token, err = p.scnr.Next()
if err != nil {
return result, err
}
if strings.HasSuffix(token.Data, ")") {
end = true
token.Data = strings.TrimSuffix(token.Data, ")")
} else {
token.Data = strings.TrimSuffix(token.Data, ",")
}
p.scnr.SetToken(token.Data)
switch {
case token.IsChecksum():
// Parse Checksum
result.Checksum = token.Data
case token.IsURL():
result.URL, err = p.ParseURL()
if err != nil {
return result, err
}
case token.IsBranch():
result.Branch, err = p.ParseBranch()
if err != nil {
return result, err
}
case token.IsSubmodule():
result.Submodules, err = p.ParseSubmodule()
if err != nil {
return result, err
}
case token.IsExpand():
result.Expand, err = p.ParseExpand()
if err != nil {
return result, err
}
case token.IsCommit():
result.Commit, err = p.ParseCommit()
if err != nil {
return result, err
}
case token.IsExtension():
result.Extension, err = p.ParseExtension()
if err != nil {
return result, err
}
case token.IsTag():
result.Tag, err = p.ParseTag()
if err != nil {
return result, err
}
}
}
return result, nil
}
|
package exec
import (
"errors"
"fmt"
"github.com/pgavlin/warp/wasm"
)
// ErrDataSegmentDoesNotFit should be returned by Instantiate if a data segment attempts to write outside of
// its target memory's bounds.
var ErrDataSegmentDoesNotFit = errors.New("data segment does not fit")
// ErrElementSegmentDoesNotFit should be returned by Instantiate if a element segment attempts to write outside
// of its target table's bounds.
var ErrElementSegmentDoesNotFit = errors.New("element segment does not fit")
type InvalidTableIndexError uint32
func (e InvalidTableIndexError) Error() string {
return fmt.Sprintf("wasm: Invalid table to table index space: %d", uint32(e))
}
// An ExportNotFoundError is returned by InstantiateModule if an export could not be found.
type ExportNotFoundError struct {
ModuleName string
FieldName string
}
type KindMismatchError struct {
ModuleName string
FieldName string
Import wasm.External
Export wasm.External
}
func (e *KindMismatchError) Error() string {
return fmt.Sprintf("wasm: mismatching import and export external kind values for %s.%s (%v, %v)", e.FieldName, e.ModuleName, e.Import, e.Export)
}
func (e *ExportNotFoundError) Error() string {
return fmt.Sprintf("wasm: couldn't find export with name %s in module %s", e.FieldName, e.ModuleName)
}
// An ImportResolver resolves import entries to function, memory, table, and global instances.
type ImportResolver interface {
ResolveFunction(moduleName, functionName string, type_ wasm.FunctionSig) (Function, error)
ResolveMemory(moduleName, memoryName string, type_ wasm.Memory) (*Memory, error)
ResolveTable(moduleName, tableName string, type_ wasm.Table) (*Table, error)
ResolveGlobal(moduleName, globalName string, type_ wasm.GlobalVar) (*Global, error)
}
// A ModuleEventHandler responds to module allocations and instantiations.
type ModuleEventHandler interface {
ModuleAllocated(m AllocatedModule) error
ModuleInstantiated(m Module) error
}
// ModuleDefinition represents a WASM module definition.
type ModuleDefinition interface {
// Allocate creates an allocated, uninitialized module with the given name from this module definition.
Allocate(name string) (AllocatedModule, error)
}
// NewKindMismatchError creates a new error that reports a mismatch between an import and export kind. This function
// should be used to create the errors returned by Module.Get{Function,Table,Memory,Global} if the requested name
// refers to an export of a different kind.
func NewKindMismatchError(exportingModuleName, exportName string, importKind, exportKind wasm.External) error {
return &KindMismatchError{
FieldName: exportName,
ModuleName: exportingModuleName,
Import: importKind,
Export: exportKind,
}
}
// An AllocatedModule is an allocated but uninitialized WASM module.
type AllocatedModule interface {
Module
// Instantiate initializes the allocated module with imports supplied by the given resolver.
Instantiate(imports ImportResolver) (Module, error)
}
// A Module is an instantiated WASM module.
type Module interface {
// Name returns the name of this module.
Name() string
// GetFunction returns the exported function with the given name. If the function does not exist or the name
// refers to an export of a different kind, this function returns an error.
GetFunction(name string) (Function, error)
// GetTable returns the exported table with the given name. If the table does not exist or the name
// refers to an export of a different kind, this function returns an error.
GetTable(name string) (*Table, error)
// GetMemory returns the exported memory with the given name. If the memory does not exist or the name
// refers to an export of a different kind, this function returns an error.
GetMemory(name string) (*Memory, error)
// GetGlobal returns the exported global with the given name. If the global does not exist or the name
// refers to an export of a different kind, this function returns an error.
GetGlobal(name string) (*Global, error)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.