text stringlengths 11 4.05M |
|---|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package expressions
import (
"testing"
"datavalues"
"github.com/stretchr/testify/assert"
)
func TestExpressionFor(t *testing.T) {
vals := []interface{}{
int64(1),
int32(2),
int16(2),
byte(0x01),
float64(1),
float32(2),
datavalues.ToValue(1),
}
exprs := expressionsFor(vals...)
assert.NotNil(t, exprs)
err := Walk(func(e IExpression) (bool, error) {
assert.NotNil(t, e)
return true, nil
}, exprs...)
assert.Nil(t, err)
}
|
package types
import (
"fault/ast"
"fmt"
"math"
"strings"
)
var TYPES = map[string]int{ //Convertible Types
"STRING": 0, //Not convertible
"BOOL": 1,
"NATURAL": 2,
"FLOAT": 3,
"INT": 4,
"UNCERTAIN": 5,
}
var COMPARE = map[string]bool{
">": true,
"<": true,
"==": true,
"!=": true,
"<=": true,
">=": true,
"&&": true,
"||": true,
"!": true, //Prefix
}
type Type struct {
Type string
Scope int32
Parameters []Type
}
type Checker struct {
SymbolTypes map[string]interface{}
scope string
}
func (c *Checker) Check(a *ast.Spec) error {
c.SymbolTypes = make(map[string]interface{})
// Pass one, globals and constants
err := c.assigntype(a, 1)
if err != nil {
return err
}
// Pass two, stock/flow properties
err = c.assigntype(a, 2)
return err
}
func (c *Checker) assigntype(exp interface{}, pass int) error {
var err error
switch node := exp.(type) {
case *ast.Spec:
for _, v := range node.Statements {
err = c.assigntype(v, pass)
}
return err
case *ast.SpecDeclStatement:
return nil
case *ast.ConstantStatement:
if pass == 1 {
id := node.Name.String()
var valtype *Type
if c.isValue(node.Value) {
valtype, err = c.infer(node.Value, make(map[string]ast.Expression))
} else {
valtype, err = c.inferFunction(node.Value, make(map[string]ast.Expression))
}
c.SymbolTypes[id] = valtype
}
return err
case *ast.DefStatement:
c.scope = node.Name.String()
err = c.assigntype(node.Value, pass)
return err
case *ast.StockLiteral:
if pass == 1 {
newcontext := make(map[string]*Type)
newcontext["__type"] = &Type{"STOCK", 0, nil}
c.SymbolTypes[c.scope] = newcontext
} else {
properties := c.preparse(node.Pairs)
for k, v := range node.Pairs {
id := k.String()
var valtype *Type
if c.isValue(v) {
valtype, err = c.infer(v, properties)
} else {
valtype, err = c.inferFunction(v, properties)
}
c.SymbolTypes[c.scope].(map[string]*Type)[id] = valtype
}
}
c.scope = ""
return err
case *ast.FlowLiteral:
if pass == 1 {
newcontext := make(map[string]*Type)
newcontext["__type"] = &Type{"FLOW", 0, nil}
c.SymbolTypes[c.scope] = newcontext
} else {
properties := c.preparse(node.Pairs)
for k, v := range node.Pairs {
id := k.String()
var valtype *Type
if c.isValue(v) {
valtype, err = c.infer(v, properties)
} else {
valtype, err = c.inferFunction(v, properties)
}
c.SymbolTypes[c.scope].(map[string]*Type)[id] = valtype
}
}
c.scope = ""
return err
case *ast.AssertionStatement:
if pass == 1 {
var valtype *Type
if c.isValue(node.Expression) {
valtype, err = c.infer(node.Expression, make(map[string]ast.Expression))
} else {
valtype, err = c.inferFunction(node.Expression, make(map[string]ast.Expression))
}
if valtype.Type != "BOOL" {
return fmt.Errorf("Assert statement not testing a Boolean expression. got=%s", valtype.Type)
}
}
return err
default:
return fmt.Errorf("Unimplemented: %T", node)
}
}
func (c *Checker) isValue(exp interface{}) bool {
switch exp.(type) {
case *ast.IntegerLiteral:
return true
case *ast.Boolean:
return true
case *ast.FloatLiteral:
return true
case *ast.StringLiteral:
return true
case *ast.Identifier:
return true
case *ast.Natural:
return true
case *ast.Uncertain:
return true
default:
return false
}
}
func (c *Checker) preparse(pairs map[ast.Expression]ast.Expression) map[string]ast.Expression {
properties := make(map[string]ast.Expression)
for k, v := range pairs {
id := k.String()
switch tree := v.(type) {
case *ast.FunctionLiteral:
properties[id] = c.preparseWalk(tree)
case *ast.InstanceExpression:
properties[id] = tree.Stock.(*ast.Identifier)
}
}
return properties
}
func (c *Checker) preparseWalk(tree *ast.FunctionLiteral) ast.Expression {
if len(tree.Body.Statements) == 1 {
return tree.Body.Statements[0].(*ast.ExpressionStatement).Expression
}
return nil
}
func (c *Checker) infer(exp interface{}, p map[string]ast.Expression) (*Type, error) {
switch node := exp.(type) {
case *ast.IntegerLiteral:
return &Type{"INT", 1, nil}, nil
case *ast.Boolean:
return &Type{"BOOL", 0, nil}, nil
case *ast.FloatLiteral:
scope := c.inferScope(node.Value)
return &Type{"FLOAT", scope, nil}, nil
case *ast.StringLiteral:
return &Type{"STRING", 0, nil}, nil
case *ast.Natural:
return &Type{"NATURAL", 1, nil}, nil
case *ast.Uncertain:
params := c.inferUncertain(node)
return &Type{"UNCERTAIN", 0, params}, nil
case *ast.Identifier:
id := strings.Split(node.Value, ".")
if s, ok := c.SymbolTypes[id[0]]; ok {
if ty, ok := s.(*Type); ok {
return ty, nil
}
return s.(map[string]*Type)[id[1]], nil
}
stock := p[id[0]].String()
if s, ok := c.SymbolTypes[stock]; ok {
if ty, ok := s.(*Type); ok {
return ty, nil
}
return s.(map[string]*Type)[id[1]], nil
}
pos := node.Position()
return nil, fmt.Errorf("Unrecognized type: line %d col %d got=%T", pos[0], pos[1], node)
default:
pos := node.(ast.Node).Position()
return nil, fmt.Errorf("Unrecognized type: line %d col %d got=%T", pos[0], pos[1], node)
}
}
func (c *Checker) inferFunction(f ast.Expression, p map[string]ast.Expression) (*Type, error) {
var err error
switch node := f.(type) {
case *ast.FunctionLiteral:
var valtype *Type
body := node.Body.Statements
if len(body) == 1 && c.isValue(body[0].(*ast.ExpressionStatement).Expression) {
valtype, err = c.infer(body[0].(*ast.ExpressionStatement).Expression, p)
return valtype, err
}
for i := 0; i < len(body); i++ {
valtype, err = c.inferFunction(body[i].(*ast.ExpressionStatement).Expression, p)
}
return valtype, err
case *ast.InstanceExpression:
return &Type{"STOCK", 0, nil}, nil
case *ast.InfixExpression:
if COMPARE[node.Operator] {
return &Type{"BOOL", 0, nil}, err
}
var left, right *Type
if c.isValue(node.Left) {
left, err = c.infer(node.Left, p)
} else {
left, err = c.inferFunction(node.Left, p)
}
if c.isValue(node.Right) {
right, err = c.infer(node.Right, p)
} else {
right, err = c.inferFunction(node.Right, p)
}
if left != right {
if TYPES[left.Type] == 0 || TYPES[right.Type] == 0 {
return nil, fmt.Errorf("type mismatch: got=%s,%s", left.Type, right.Type)
}
if TYPES[left.Type] > TYPES[right.Type] {
return right, err
} else {
return left, err
}
}
return left, err
case *ast.PrefixExpression:
if COMPARE[node.Operator] {
return &Type{"BOOL", 0, nil}, err
}
var right *Type
if c.isValue(node.Right) {
right, err = c.infer(node.Right, p)
} else {
right, err = c.inferFunction(node.Right, p)
}
return right, err
}
return nil, nil
}
func (c *Checker) inferScope(fl float64) int32 {
s := strings.Split(fmt.Sprintf("%f", fl), ".")
base := c.calculateBase(s[1])
return int32(base)
}
func (c *Checker) inferUncertain(node *ast.Uncertain) []Type {
return []Type{
{"MEAN", c.inferScope(node.Mean), nil},
{"SIGMA", c.inferScope(node.Sigma), nil},
}
}
func (c *Checker) calculateBase(s string) int32 {
rns := []rune(s) // convert to rune
zero := []rune("0")
for i := len(rns) - 1; i >= 0; i = i - 1 {
if rns[i] != zero[0] {
base := math.Pow10(i + 1)
return int32(base)
}
}
return 1
}
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package session
import (
"context"
"github.com/godbus/dbus/v5"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/session"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: RejectDuplicate,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Ensures that the session_manager won't start the same session twice",
Contacts: []string{
"hidehiko@chromium.org",
},
SoftwareDeps: []string{"chrome"},
Attr: []string{"group:mainline"},
})
}
func RejectDuplicate(ctx context.Context, s *testing.State) {
if err := upstart.RestartJob(ctx, "ui"); err != nil {
s.Fatal("Failed to restart session_manager: ", err)
}
const user = "first_user@nowhere.com"
// Create clean vault.
if err := cryptohome.RemoveVault(ctx, user); err != nil {
s.Fatalf("Failed to remove the vault for %s: %v", user, err)
}
if err := cryptohome.CreateVault(ctx, user, ""); err != nil {
s.Fatalf("Failed to create a vault for %s: %v", user, err)
}
defer cryptohome.RemoveVault(ctx, user)
// Start the first session.
sm, err := session.NewSessionManager(ctx)
if err != nil {
s.Fatal("Failed to create session_manager binding: ", err)
}
if err := session.PrepareChromeForPolicyTesting(ctx, sm); err != nil {
s.Fatal("Failed to prepare Chrome for testing: ", err)
}
if err = sm.StartSession(ctx, user, ""); err != nil {
s.Fatalf("Failed to start new session for %s: %v", user, err)
}
// Second StartSession() should fail with SessionExists.
if err = sm.StartSession(ctx, user, ""); err == nil {
s.Fatalf("Unexpectedly succeeded to start session for %s twice", user)
} else if e, ok := err.(dbus.Error); !ok || e.Name != "org.chromium.SessionManagerInterface.SessionExists" {
s.Error("Unexpected error: ", err)
}
}
|
package main
import (
"crypto/sha512"
"encoding/hex"
"fmt"
"net/http"
"os/exec"
"strconv"
"strings"
"time"
)
var initialPassword = "hellop#firstsec"
var secretKey string = "z2Xm3m4Dr:/Rm2Gv5WdpCpDLdYVrqCgpcftYqMiqSXLu3esqzwfgpwxKqyDm765UnJttuw2CtxV2bunpTwmqvLeFTfrzdkA3Q6pNNGPwvrTDCBHFN4jPyWAj7X7wPrX7feiKxRni2PZc6go3Ksd7HETh6HbGRZgiZKtSdQohfwK9qNYWGF5975ePgGLTgykGGpFik3AmhxKRWN7NxzUVdotWkdzdkVCzLkGcVzi8C9BqDt9vekshWZoCvVNo8zFnph7ZvCN6n9ZrHpyhfaNNddPAPxCsDzWxRVbK7tHkrbvdPUxmM5D87LcmQBwDfJybvspvy23ZbCcufKER6xSXizMBxG3m6gZjj4nopRrRHVSieB4YEf2pCAeXH3GghMEJ3bEtFuGCeacQ8y3PgDZaoyZD92Lq6t6raRjdSxzYrHq4h7VGTRrBNzorsXD3VffkWusQCVigwgr6difcSxdUK7qVd4rX5VdJv"
var maxTime int32 = 60
var minTime int32 = 10
var maxDuration int = 3600 * 24 // max 1 day. for infinite use 0
func removeRule(rule []string, duration int) {
var newRule []string
newRule = append(newRule, "delete")
newRule = append(newRule, rule...)
time.Sleep(time.Duration(duration) * time.Second)
exec.Command("ufw", newRule...).Output()
fmt.Printf("Removed rule\n")
// send some notification
}
func getsha512(inputString string) (sha string) {
bv := []byte(inputString)
hasher := sha512.New()
hasher.Write(bv)
sha = hex.EncodeToString(hasher.Sum(nil))
return
}
// myIP echo ip
func myIP(w http.ResponseWriter, r *http.Request) {
theIP := r.Header.Get("X-Real-IP")
_, wantJSON := r.URL.Query()["json"]
if wantJSON {
fmt.Fprintf(w, "{\"ip\":\"%s\"}", theIP)
} else {
fmt.Fprintf(w, "%s", theIP)
}
}
// OpenExec service
func OpenExec(w http.ResponseWriter, r *http.Request) {
command, okc := r.URL.Query()["command"]
sign, oks := r.URL.Query()["sign"]
Now, okn := r.URL.Query()["time"]
duration, okd := r.URL.Query()["duration"]
passedPassword, okp := r.URL.Query()["password"]
if !okc || !oks || !okn || !okd || !okp {
fmt.Fprintf(w, "request error.")
return
}
if passedPassword[0] != initialPassword {
fmt.Fprintf(w, "invalid first security password.") // to prevent possibile hashing dos
return
}
now := int32(time.Now().Unix())
remoteNow64, _ := strconv.Atoi(Now[0])
remoteNow := int32(remoteNow64)
if remoteNow < (now-minTime) || remoteNow > (now+maxTime) {
fmt.Fprintf(w, "timestamp error %d %d.", remoteNow64, now)
return
}
concatValues := secretKey + ":" + command[0] + ":" + Now[0] + ":" + duration[0] + ":" + passedPassword[0]
myHash := getsha512(concatValues)
if myHash != sign[0] {
fmt.Fprintf(w, "not authorized to do that.")
return
}
Duration, err := strconv.Atoi(duration[0])
if err != nil || Duration > maxDuration {
fmt.Fprintf(w, "duration error. maybe longer than %d seconds?", maxDuration)
return
}
cmds := strings.Split(command[0], " ")
out, err := exec.Command("ufw", cmds...).Output()
if err != nil {
fmt.Fprintf(w, "error: \n%s", err)
return
}
fmt.Fprintf(w, "%s", out[:])
if Duration > 0 {
go removeRule(cmds, Duration)
}
// c, _ := redis.Dial("tcp", ":6379")
// defer c.Close()
// n, _ := redis.Int(c.Do("INCR", "k1"))
// fmt.Fprintf(w, "%s -> %s %s %s now is %d Hello %s, %d!", concatValues, myHash, command, sign, now, getsha512("ciao"), n)
}
func main() {
http.HandleFunc("/", OpenExec)
http.HandleFunc("/ip", myIP)
http.ListenAndServe(":8082", nil)
}
|
package main
import (
"context"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"github.com/golang/protobuf/ptypes/empty"
v1 "github.com/onuryartasi/scaler/pkg/api/v1"
"google.golang.org/grpc"
)
type container struct {
*v1.Container
}
var (
InfoColor = "\033[1;34m%s\033[0m"
NoticeColor = "\033[1;36m%s\033[0m"
WarningColor = "\033[1;33m%s\033[0m"
ErrorColor = "\033[1;31m%s\033[0m"
DebugColor = "\033[0;36m%s\033[0m"
)
var usageStr = `
Usage: scaler [options]
Options:
--image <image-url> Container's image for scale
--min <min-value> Minimum container to run (default is 1)
--max <max-value> Maximum container to run (0 is unlimited, default is 3)
`
func usage() {
log.Fatalf(InfoColor, usageStr)
}
func connect() v1.ContainerServiceClient {
conn, err := grpc.Dial(":4444", grpc.WithInsecure())
if err != nil {
panic(err)
}
client := v1.NewContainerServiceClient(conn)
return client
}
func main() {
var image string
var minValue string
var maxValue string
var name string
var cpuValue string
flag.NewFlagSet("list", flag.ExitOnError)
flag.NewFlagSet("stop", flag.ExitOnError)
create := flag.NewFlagSet("create", flag.ExitOnError)
create.StringVar(&image, "image", "", "Container's image for scale")
create.StringVar(&name, "name", "", "Project Name")
create.StringVar(&minValue, "min", "1", "Minimum container to run (default is 1)")
create.StringVar(&maxValue, "max", "3", "Maximum container to run (0 is unlimited, default is 3)")
create.StringVar(&cpuValue, "cpu", "1", "Container cpu limit")
log.SetFlags(0)
flag.Usage = usage
if len(os.Args) < 2 {
usage()
}
switch os.Args[1] {
case "list":
client := connect()
resp, err := client.ContainerList(context.Background(), &empty.Empty{})
if err != nil {
log.Fatalf("Container List Error : %s", err)
}
for _, container := range resp.GetContainer() {
fmt.Println(container.Id, container.Names, container.Image)
}
case "create":
create.Parse(os.Args[2:])
if len(image) < 1 {
log.Printf(ErrorColor, "Error: An image must be specified.")
usage()
}
if len(name) < 1 {
log.Printf(ErrorColor, "Error: A project name must be specified.")
usage()
}
min, _ := strconv.Atoi(minValue)
max, _ := strconv.Atoi(maxValue)
cpu, _ := strconv.ParseFloat(cpuValue, 32)
client := connect()
resp, err := client.CreateProject(context.Background(), &v1.Project{Cpu: float32(cpu), Image: image, Min: int32(min), Max: int32(max), Name: name})
if err != nil {
log.Println(err)
}
log.Printf("Containers created : %+v", resp.ContainerId)
//resp,err := client.ContainerCreate(context.Background(),&v1.ContainerConfig{Image:image})
//if err != nil{
// log.Printf(ErrorColor,"Error: Contaner Create error")
//}
//fmt.Println(resp.GetId())
case "remove":
client := connect()
containerID := string(os.Args[2])
resp, err := client.ContainerRemove(context.Background(), &v1.ContainerId{ContainerId: containerID})
if err != nil {
log.Printf(ErrorColor, "Error: Container Remove Error: %v", err)
}
fmt.Println(resp.GetContainerId())
case "start":
client := connect()
containerId := string(os.Args[2])
resp, err := client.ContainerStart(context.Background(), &v1.ContainerId{ContainerId: containerId})
if err != nil {
log.Printf(ErrorColor, "Error: Container start error %v", err)
}
fmt.Println(resp)
case "stop":
client := connect()
projectName := string(os.Args[2])
resp, err := client.StopProject(context.Background(), &v1.StopProjectRequest{ProjectName: projectName})
if err != nil {
log.Printf(ErrorColor, "Error: Contaner Stop error")
}
fmt.Println(resp)
case "stat":
client := connect()
containerId := string(os.Args[2])
stream, err := client.ContainerStatStream(context.Background(), &v1.ContainerId{ContainerId: containerId})
if err != nil {
panic(err)
}
for {
data, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
panic(err)
}
log.Println(data)
}
default:
flag.PrintDefaults()
os.Exit(1)
}
log.SetFlags(0)
flag.Usage = usage
}
|
package services
import "time"
var SessionDuration time.Duration
type Cache struct {
values map[string]interface{}
timers map[string]*time.Timer
}
func NewCache() Cache {
var cache Cache
cache.timers = make(map[string]*time.Timer)
cache.values = make(map[string]interface{})
return cache
}
func (cache Cache) Set(key string, object interface{}) {
cache.values[key] = object
scheduleDeleting(cache, key)
}
func (cache Cache) Get(key string) interface{} {
scheduleDeleting(cache, key)
return cache.values[key]
}
func(cache Cache) Delete(key string){
delete(cache.values, key)
cache.timers[key].Stop()
delete(cache.timers, key)
}
func scheduleDeleting(cache Cache, key string) {
timer, success := cache.timers[key]
if success {
timer.Stop()
}
timer = time.NewTimer(SessionDuration)
cache.timers[key] = timer
go func(cache Cache) {
<-timer.C
delete(cache.values, key)
}(cache)
} |
package services
import (
"github.com/ham357/tsundoku/api/domain/users"
"github.com/ham357/tsundoku/api/utils/errors"
)
// CreateUser - Service
func CreateUser(user users.User) (*users.User, *errors.ApiErr) {
if err := user.Save(); err != nil {
return nil, err
}
return &user, nil
}
|
// comma inserts commas in a non-negative decimal integer string
package main
import (
"fmt"
)
func main() {
fmt.Println(comma("1"))
fmt.Println(comma("12"))
fmt.Println(comma("213"))
fmt.Println(comma("3451"))
fmt.Println(comma("23451"))
fmt.Println(comma("234543"))
fmt.Println(comma("1345678"))
}
func comma(s string) string {
n := len(s)
if n <= 3 {
return s
}
return comma(s[:n-3]) + "," + s[n-3:]
}
|
package view
import (
"bufio"
"github.com/xiaozefeng/go-web-crawler/engine"
"github.com/xiaozefeng/go-web-crawler/frontend/model"
"github.com/xiaozefeng/go-web-crawler/model/zhenai"
"html/template"
"os"
"testing"
)
func TestTemplate(t *testing.T) {
var item = engine.Item{
Id:"123",
Url:"https://album.zhenai.com/u/1755238721",
Type:"zhenai",
Payload: zhenai.Profile{
Name: "非诚勿扰",
Gender: "男",
Age: "12",
Height: "170cm",
Weight: "150kg",
Income: "20000-30000",
Marriage: "未婚",
Education: "大专",
Occupation: "工程师",
Hukou: "上海",
Constellation: "双鱼",
House: "已购房",
Car: "已购车",
Avatar: "https://photo.zastatic.com/images/photo/438810/1755238721/1729715236718796.jpg?scrop=1&crop=1&cpos=north&w=200&h=200",
},
}
temp := template.Must(template.ParseFiles("./index.tmpl"))
file, err := os.Create("./index.html")
if err != nil {
panic(err)
}
defer file.Close()
w := bufio.NewWriter(file)
page := model.SearchResult{
Hits: 10,
Start: 0,
}
for i := 0; i < 10; i++ {
page.Items = append(page.Items, item)
}
err = temp.Execute(w, page)
defer w.Flush()
if err != nil {
panic(err)
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package camera
import (
"context"
"chromiumos/tast/common/media/caps"
"chromiumos/tast/local/camera/cca"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: CCAUIRefresh,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test for checking Chrome Camera App still works after refreshing",
Contacts: []string{"wtlee@chromium.org", "chromeos-camera-eng@google.com"},
Attr: []string{"group:mainline", "informational", "group:camera-libcamera"},
SoftwareDeps: []string{"camera_app", "chrome", caps.BuiltinOrVividCamera},
Fixture: "ccaLaunched",
})
}
func CCAUIRefresh(ctx context.Context, s *testing.State) {
app := s.FixtValue().(cca.FixtureData).App()
tb := s.FixtValue().(cca.FixtureData).TestBridge()
s.Log("Refreshing CCA")
if err := app.Refresh(ctx, tb); err != nil {
s.Fatal("Failed to complete refresh: ", err)
}
if err := app.WaitForVideoActive(ctx); err != nil {
s.Fatal("Preview is not shown after refreshing: ", err)
}
}
|
package enums
const (
//總參與人數:完成遊戲的總人數,跳出不計算
RedisFinishedGameCount = "RedisFinishedGameCount"
//跳出人數:未完成遊戲的跳出人數
RedisUnfinishedGameCount = "RedisUnfinishedGameCount"
//重複玩人數:同一玩家重複完整玩完的人數
RedisRepeatUserCount = "RedisRepeatUserCount"
//不重複玩人數:同一玩家重複完整玩完的人數
RedisNotRepeatUserCount = "RedisNotRepeatUserCount"
)
|
package gomodulesdeptwo
import (
"github.com/tarekbadrshalaan/gomodulesdepone/v2/subpkg"
)
// GetDatadepone : get data from depone
func GetDatadepone() string {
return subpkg.GetExtraData()
}
|
package list
import (
"fmt"
"testing"
)
func TestAppend(t *testing.T) {
linkedList := NewLinkedList()
linkedList.Append(1)
linkedList.InsertInFront("22")
if linkedList.Size != 2 {
t.Error("should be 2 but it is:", linkedList.Size)
}
linkedList.PrintAll()
fmt.Println("-------")
if v := linkedList.Find("22"); v == nil {
t.Error("it should be able to find 22")
} else {
fmt.Println("found record", v.Element)
}
fmt.Println("--------")
linkedList.Append(3)
linkedList.PrintAll()
fmt.Println("after delete 22")
fmt.Println(linkedList.Delete("22"))
linkedList.Delete("333") // nothing should happen
linkedList.PrintAll()
}
func TestFindMth(t *testing.T) {
fmt.Println("In TestFindMth")
linkedList := NewLinkedList()
linkedList.Append(1)
linkedList.Append(2)
linkedList.Append(3)
fmt.Println(linkedList.FindMthToLast(2))
}
|
package runtime
import (
"github.com/juanibiapina/marco/lang"
"reflect"
"testing"
)
func TestRunString(t *testing.T) {
r := New()
expr := r.Run("1")
expected := lang.MakeNumber(1)
if !reflect.DeepEqual(expr, expected) {
t.Errorf("Wrong result, expected '%v', got '%v'", expected, expr)
}
}
|
package concurrent
import (
"math/rand"
"strconv"
"testing"
)
func TestStripedMutex_GetLock(t *testing.T) {
c := NewStripedMutex(64)
for i := 0; i < 100; i++ {
c.GetLock(strconv.Itoa(rand.Int()))
}
}
|
package cmqapi
import (
"fmt"
"github.com/friendlyhank/foundation/str"
)
//CmqQueue -队列
type CmqQueue struct {
QueueName string //队列名
CmqClient *CmqClient
Encoding bool //64位编码
}
//NewCmqQueue -
func NewCmqQueue(queuename string, cmqclient *CmqClient, encoding bool) *CmqQueue {
return &CmqQueue{QueueName: queuename, CmqClient: cmqclient, Encoding: encoding}
}
//===============================================queue operation===============================================
/**
*Create
*创建队列
@type queuemeta: QueueMeta struct
@return err
**/
func (q *CmqQueue) Create(queuemeta *QueueMeta) (err error) {
//此处默认
params := map[string]string{
"queueName": q.QueueName,
"pollingWaitSeconds": str.Int642str(queuemeta.PollingWaitSeconds),
"visibilityTimeout": str.Int642str(queuemeta.VisibilityTimeout),
"maxMsgSize": str.Int642str(queuemeta.MaxMsgSize),
"msgRetentionSeconds": str.Int642str(queuemeta.MsgRetentionSeconds),
"rewindSeconds": str.Int642str(queuemeta.RewindSeconds),
}
if queuemeta.MaxMsgHeapNum > 0 {
params["maxMsgHeapNum"] = str.Int642str(queuemeta.MaxMsgHeapNum)
}
_, err = q.CmqClient.CreateQueue(params)
return
}
/*
@type queueName: queueName string
@return err
CreateByName -用名字创建队列
*/
func (q *CmqQueue) CreateByName(queueName string) (err error) {
return q.Create(q.SetDefaultQueueMeta(queueName))
}
/*
@type backTrackingTime backTrackingTime int64 该时间戳以后的消息
@return err
RewindQueue -回溯队列
*/
func (q *CmqQueue) RewindQueue(backTrackingTime int64) (err error) {
params := map[string]string{
"queueName": q.QueueName,
"startConsumeTime": str.Int642str(backTrackingTime),
}
_, err = q.CmqClient.RewindQueue(params)
return
}
/*
@return err
Delete -删除队列
*/
func (q *CmqQueue) Delete() (err error) {
params := map[string]string{
"queueName": q.QueueName,
}
_, err = q.CmqClient.DeleteQueue(params)
return
}
//===============================================QueueMeta operation===============================================
/*
QueueMeta - 队列属性
#note: 设置属性
MaxMsgHeapNum:最大堆积消息数量
PollingWaitSeconds:消息接收长轮询等待时间.取值范围0-30 单位:秒
VisibilityTimeout:消息可见性超时 单位:秒
MaxMsgSize:消息的最大长度 单位:秒
MsgRetentionSeconds:消息保留周期 单位:秒
RewindSeconds:队列是否开启消息回溯能力 最大回溯时间 单位:秒
*/
type QueueMeta struct {
MaxMsgHeapNum int64
PollingWaitSeconds int64
VisibilityTimeout int64
MaxMsgSize int64
MsgRetentionSeconds int64
RewindSeconds int64
Queuename string
}
/*
@type queueName: queueName string
@return QueueMeta QueueMeta struct
*SetAttributes -设置默认的属性
*/
func (q *CmqQueue) SetDefaultQueueMeta(queuename string) *QueueMeta {
return &QueueMeta{
Queuename: queuename,
MaxMsgHeapNum: 10000000,
PollingWaitSeconds: 30,
VisibilityTimeout: 30,
MaxMsgSize: 1024 * 1024,
MsgRetentionSeconds: 1296000, //保留15天
RewindSeconds: 1296000,
}
}
/*
@type queuemeta: QueueMeta struct
@return err
*SetAttributes -设置属性
*/
func (q *CmqQueue) SetAttributes(queuemeta *QueueMeta) (err error) {
params := map[string]string{
"queueName": q.QueueName,
"pollingWaitSeconds": str.Int642str(queuemeta.PollingWaitSeconds),
"visibilityTimeout": str.Int642str(queuemeta.VisibilityTimeout),
"maxMsgSize": str.Int642str(queuemeta.MaxMsgSize),
"msgRetentionSeconds": str.Int642str(queuemeta.MsgRetentionSeconds),
"rewindSeconds": str.Int642str(queuemeta.RewindSeconds),
}
if queuemeta.MaxMsgHeapNum > 0 {
params["maxMsgHeapNum"] = str.Int642str(queuemeta.MaxMsgHeapNum)
}
_, err = q.CmqClient.SetQueueAttributes(params)
return
}
/*
return QueueMeta QueueMeta struct
return err
GetAttributes -获取属性
*/
func (q *CmqQueue) GetAttributes() (queuemeta *QueueMeta, err error) {
params := map[string]string{
"queueName": q.QueueName,
}
var res *GetQueueAttributesRes
res, err = q.CmqClient.GetQueueAttributes(params)
queuemeta = &QueueMeta{
Queuename: q.QueueName,
PollingWaitSeconds: res.PollingWaitSeconds,
VisibilityTimeout: res.VisibilityTimeout,
MaxMsgSize: res.MaxMsgSize,
MsgRetentionSeconds: res.MsgRetentionSeconds,
RewindSeconds: res.RewindSeconds,
}
return
}
//===============================================message operation===============================================
//Message -
type Message struct {
MsgBody string //消费的消息正文
MsgID string //消费的消息唯一标识ID
ReceiptHandle string //每次消费返回唯一的消息句柄,用于删除消费。仅上一次消费该消息产生的句柄能用于删除消息
EnqueueTime int64 //消费被生产出来,进入队列的时间。
FirstDequeueTime int64 //第一次消费该消息的时间。
NextVisibleTime int64 //消息的下次可见时间
DequeueCount int64 //消息被消费次数
}
/*
@param queuename string
@param message struct
@param delaytime int64
@return
SendMessage -发送消息
*/
func (q *CmqQueue) SendMessage(message *Message, delaytime int64) (msgid string, err error) {
params := map[string]string{
"queueName": q.QueueName,
"msgBody": message.MsgBody,
"delaySeconds": str.Int642str(delaytime),
}
//TODOS消息加密
msgid, err = q.CmqClient.SendMessage(params)
return
}
/*
@type messages []*Struct
@params messages 批量的消息列
@type delaytime int64
@params delaytime 发送消息后,需要延时多久用户才可见
@rtype msgids []string
@params msgids 消息的唯一标识列
BatchSendMessage -批量发送消息
*/
func (q *CmqQueue) BatchSendMessage(messages []*Message, delaytime int64) (msgids []string, err error) {
params := map[string]string{
"queueName": q.QueueName,
"delaySeconds": str.Int642str(delaytime),
}
//TODOS消息加密
for k, message := range messages {
var key = fmt.Sprintf("msgBody.%v", k)
params[key] = message.MsgBody
}
var batchSendMessageres *BatchSendMessageRes
batchSendMessageres, err = q.CmqClient.BatchSendMessage(params)
for _, msglist := range batchSendMessageres.MsgList {
msgids = append(msgids, msglist.MsgID)
}
return
}
/*
@type pollingwaitseconds: int
@param pollingwaitseconds: 本次请求的长轮询时间,单位: 秒
@rtype Message object
@return Message object 中包含基本属性、 临时句柄
ReceiveMessage -消费消息
*/
func (q *CmqQueue) ReceiveMessage(pollingwaitseconds int64) (msg *Message, err error) {
params := map[string]string{
"queueName": q.QueueName,
}
if 0 != pollingwaitseconds {
params["UserpollingWaitSeconds"] = str.Int642str(pollingwaitseconds)
params["pollingWaitSeconds"] = str.Int642str(pollingwaitseconds)
} else {
params["UserpollingWaitSeconds"] = str.Int642str(30)
}
resp := &ReceiveMessageRes{}
resp, err = q.CmqClient.ReceiveMessage(params)
msg = new(Message)
msg.MsgID = resp.MsgID
msg.ReceiptHandle = resp.ReceiptHandle
msg.EnqueueTime = resp.EnqueueTime
msg.NextVisibleTime = resp.NextVisibleTime
msg.DequeueCount = resp.DequeueCount
msg.FirstDequeueTime = resp.FirstDequeueTime
return
}
/*
@type numOfMsg int64
@params numOfMsg 本次消费的消息数量
@type pollingWaitSeconds int64
@params pollingWaitSeconds 本次请求的长轮询等待时间
BatchReceiveMessage - 批量消费消息
*/
func (q *CmqQueue) BatchReceiveMessage(numOfMsg int64, pollingWaitSeconds int64) (messages []*Message, err error) {
params := map[string]string{
"queueName": q.QueueName,
"numOfMsg": str.Int642str(numOfMsg),
}
if pollingWaitSeconds != 0 {
params["UserpollingWaitSeconds"] = str.Int642str(pollingWaitSeconds)
params["pollingWaitSeconds"] = str.Int642str(pollingWaitSeconds)
} else {
params["UserpollingWaitSeconds"] = str.Int2str(30)
}
var batchreceivemessageres *BatchReceiveMessageRes
batchreceivemessageres, err = q.CmqClient.BatchReceiveMessage(params)
for _, msginfolist := range batchreceivemessageres.MsgInfoList {
var message = &Message{
MsgBody: msginfolist.MsgBody,
MsgID: msginfolist.MsgID,
ReceiptHandle: msginfolist.ReceiptHandle,
EnqueueTime: str.Str2int(msginfolist.EnqueueTime),
FirstDequeueTime: str.Str2int(msginfolist.FirstDequeueTime),
NextVisibleTime: str.Str2int(msginfolist.NextVisibleTime),
DequeueCount: str.Str2int(msginfolist.DequeueCount),
}
messages = append(messages, message)
}
return
}
/*
@type receipthandle string
@params receipthandle 上次消费返回唯一的消息句柄,用于删除消息
@rtype error
@return err
DeleteMessage - 删除消息
*/
func (q *CmqQueue) DeleteMessage(receipthandle string) (err error) {
params := map[string]string{
"queueName": q.QueueName,
"receiptHandle": receipthandle,
}
_, err = q.CmqClient.DeleteMessage(params)
return
}
/*
@type receipthandlelist []string
@params receipthandlelist 上次消费消息时返回的消息句柄,从0或者从1开始
@rtype error
@return err
BatchDeleteMessage - 批量删除消息
*/
func (q *CmqQueue) BatchDeleteMessage(receipthandlelist []string) (err error) {
params := map[string]string{
"queueName": q.QueueName,
}
for k, receipthandle := range receipthandlelist {
var key = fmt.Sprintf("receiptHandle.%v", k)
params[key] = receipthandle
}
_, err = q.CmqClient.BatchDeleteMessage(params)
return
}
|
package views
import (
"io/ioutil"
"path/filepath"
"reflect"
"testing"
"github.com/jenkins-x/octant-jx/pkg/common/viewhelpers"
"github.com/stretchr/testify/require"
"sigs.k8s.io/yaml"
"github.com/vmware-tanzu/octant/pkg/view/component"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func Test_toHealthTableRow(t *testing.T) {
tests := []struct {
name string
want *component.TableRow
wantErr bool
}{
{
name: "kuberhealthy1.yaml",
want: &component.TableRow{
"Name": viewhelpers.NewMarkdownText(`<a href="https://github.com/Comcast/kuberhealthy/blob/230c4f1/cmd/dns-resolution-check/README.md" target="docs">dns-status-internal</a>`),
"Namespace": component.NewText("kuberhealthy"),
"Errors": component.NewText(""),
"Healthy": viewhelpers.NewMarkdownText(`<clr-icon shape="check-circle" class="is-solid is-success" title="True"></clr-icon> True`),
},
wantErr: false,
},
{
name: "kuberhealthy2.yaml",
want: &component.TableRow{
"Name": viewhelpers.NewMarkdownText(`<a href="https://github.com/Comcast/kuberhealthy/blob/230c4f1/cmd/dns-resolution-check/README.md" target="docs">dns-status-internal</a>`),
"Namespace": component.NewText("kuberhealthy"),
"Errors": component.NewText("foo\nbar\n"),
"Healthy": viewhelpers.NewMarkdownText(`<clr-icon shape="check-circle" class="is-solid is-success" title="True"></clr-icon> True`),
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fileName := filepath.Join("test_data", tt.name)
data, err := ioutil.ReadFile(fileName)
require.NoError(t, err, "failed to load %s", fileName)
u := &unstructured.Unstructured{}
err = yaml.Unmarshal(data, u)
require.NoError(t, err, "failed to unmarshal YAML %s", fileName)
got, err := toHealthTableRow(u)
if (err != nil) != tt.wantErr {
t.Errorf("toHealthTableRow() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("toHealthTableRow() got = %v, want %v", got, tt.want)
}
})
}
}
|
package extjson
const (
NamedStyleLowerCamelCase = 1001
NamedStyleUpperCamelCase = 1002
NamedStyleUnderScoreCase = 1003
)
|
package Tieba
import (
"github.com/PuerkitoBio/goquery"
)
type Article struct {
url string
totalPage int
author string
lastUpdate int
}
func (article Article) NewArticle(url string) (article *Article){
doc, _ := goquery.NewDocument(url)
&article.totalPage = doc.Find(".l_reply_num .red").Eq(1).Text()
return article
} |
package main
import (
"fmt"
"testing"
)
func TestFindMove(t *testing.T) {
board := new(Board)
board.NewGame()
game := new(Game)
game.board = board
blackPlayer := &Player{BLACK}
redPlayer := &Player{RED}
game.DoMove(&Square{2, 1}, &Square{3, 2}, blackPlayer)
game.DoMove(&Square{5, 0}, &Square{4, 1}, redPlayer)
game.DoMove(&Square{2, 3}, &Square{3, 4}, blackPlayer)
game.DoMove(&Square{5, 6}, &Square{4, 5}, redPlayer)
game.DoMove(&Square{3, 2}, &Square{5, 0}, blackPlayer)
blackMove, err := FindMove(game, BLACK)
if err == nil {
fmt.Printf("Best move for black: %v\n", blackMove)
} else {
fmt.Println("No moves for black.")
}
redMove, err := FindMove(game, RED)
if err == nil {
fmt.Printf("Best move for red: %v\n", redMove)
} else {
fmt.Println("No moves for red.")
}
}
|
package moxings
type Yinpinwenjians struct {
Id int
Xuliehao string `gorm:"not null;DEFAULT:0"`
Lujing string `gorm:"not null;DEFAULT:0"`
Leixing string `gorm:"not null;DEFAULT:0"`
Mima string `gorm:"not null;DEFAULT:0"`
Md5mima string `gorm:"not null;DEFAULT:0"`
}
func (Yinpinwenjians) TableName() string {
return "Yinpinwenjians"
}
|
package sphinx
import "github.com/decred/slog"
// sphxLog is a logger that is initialized with no output filters. This
// means the package will not perform any logging by default until the caller
// requests it.
// The default amount of logging is none.
var sphxLog = slog.Disabled
// UseLogger uses a specified Logger to output package logging info.
// This should be used in preference to SetLogWriter if the caller is also
// using slog.
func UseLogger(logger slog.Logger) {
sphxLog = logger
}
|
package client_generator
import (
"fmt"
"github.com/go-openapi/spec"
"github.com/morlay/gin-swagger/codegen"
"sort"
)
func getFieldsFromSchema(schema spec.Schema) (fields []string, deps []string) {
var propNames = []string{}
for name := range schema.Properties {
propNames = append(propNames, name)
}
sort.Strings(propNames)
for _, name := range propNames {
propSchema := schema.Properties[name]
fieldName := codegen.ToUpperCamelCase(name)
if propSchema.Extensions["x-go-name"] != nil {
fieldName = fmt.Sprint(propSchema.Extensions["x-go-name"])
}
goType, subDeps := GetTypeFromSchema(propSchema)
deps = append(deps, subDeps...)
var tags []string
var jsonTag = name
if propSchema.Enum == nil && propSchema.Type.Contains("string") && goType != "string" {
jsonTag = codegen.JoinWithComma(jsonTag, "string")
}
tags = append(tags, codegen.DeclTag("json", jsonTag))
if fmt.Sprint(schema.Default) != "<nil>" {
tags = append(tags, codegen.DeclTag("default", fmt.Sprint(schema.Default)))
}
if propSchema.Extensions["x-go-validate"] != nil {
tags = append(tags, codegen.DeclTag("validate", fmt.Sprint(propSchema.Extensions["x-go-validate"])))
}
fields = append(fields, codegen.DeclField(
fieldName,
goType,
tags,
propSchema.Description,
))
}
return
}
func GetTypeFromSchema(schema spec.Schema) (tpe string, deps []string) {
if schema.Ref.String() != "" {
tpe = getRefName(schema.Ref.String())
return
}
if schema.Extensions["x-go-named"] != nil {
tpeName := fmt.Sprint(schema.Extensions["x-go-named"])
tpe = getRefName(tpeName)
deps = append(deps, getPackageNameFromPath(tpeName))
return
}
if len(schema.AllOf) > 0 {
var fields []string
for _, subSchema := range schema.AllOf {
if subSchema.Ref.String() != "" {
gType := getRefName(subSchema.Ref.String())
fields = append(fields, codegen.DeclField(
"",
gType,
[]string{""},
"",
))
}
if subSchema.Properties != nil {
otherFields, subDeps := getFieldsFromSchema(subSchema)
fields = append(fields, otherFields...)
deps = append(deps, subDeps...)
}
}
tpe = codegen.DeclStruct(fields)
return
}
if schema.Type.Contains("object") {
if schema.AdditionalProperties != nil {
goType, subDeps := GetTypeFromSchema(*schema.AdditionalProperties.Schema)
deps = append(deps, subDeps...)
tpe = codegen.DeclMap("string", goType)
return
}
if schema.Properties != nil {
fields, subDeps := getFieldsFromSchema(schema)
deps = append(deps, subDeps...)
tpe = codegen.DeclStruct(fields)
return
}
}
if schema.Type.Contains("array") {
if schema.Items != nil {
goType, subDeps := GetTypeFromSchema(*schema.Items.Schema)
deps = append(deps, subDeps...)
tpe = codegen.DeclSlice(goType)
return
}
}
schemaType := schema.Type[0]
format := schema.Format
switch format {
case "byte", "int", "int8", "int16", "int32", "int64", "rune", "uint", "uint8", "uint16", "uint32", "uint64", "uintptr", "float32", "float64":
tpe = format
case "float":
tpe = "float32"
case "double":
tpe = "float64"
default:
switch schemaType {
case "boolean":
tpe = "bool"
default:
tpe = "string"
}
}
return
}
func ToGoType(name string, schema spec.Schema) (string, []string) {
goType, deps := GetTypeFromSchema(schema)
return codegen.DeclType(name, goType), deps
}
func ToTypes(pkgName string, swagger spec.Swagger) string {
p := codegen.NewPrinter().Input(codegen.DeclPackage(pkgName)).NewLine()
var types = []string{}
var deps = []string{}
var definitionNames = []string{}
for name := range swagger.Definitions {
definitionNames = append(definitionNames, name)
}
sort.Strings(definitionNames)
for _, name := range definitionNames {
goType, subDeps := ToGoType(name, swagger.Definitions[name])
types = append(types, goType)
deps = append(deps, subDeps...)
}
p.Input(codegen.DeclImports(deps...)).NewLine()
p.Input(codegen.JoinWithLineBreak(types...))
return p.String()
}
|
package main
import (
"github.com/p4vlowVl4d/purchase-tracker_gui/gui"
"log"
)
func main() {
log.Println("Starting")
win := gui.NewWindow(640, 480, "example")
win.Show()
}
|
package processor
import (
"context"
"fmt"
)
type TileInternalPipeline struct {
Context context.Context
Error chan error
RPCAddress string
APIAddress string
}
func NewTileInternalPipeline(ctx context.Context, apiAddr string, rpcAddr string, errChan chan error) *TileInternalPipeline {
return &TileInternalPipeline{
Context: ctx,
Error: errChan,
RPCAddress: rpcAddr,
APIAddress: apiAddr,
}
}
func (dp *TileInternalPipeline) Process(geoReq *GeoTileRequest) chan *ByteRaster {
grpcTiler := NewRasterGRPC(dp.Context, dp.RPCAddress, dp.Error)
if grpcTiler == nil {
dp.Error <- fmt.Errorf("Couldn't instantiate RPCTiler %s/n", dp.RPCAddress)
return nil
}
i := NewTileIndexer(dp.Context, dp.APIAddress, dp.Error)
go func() {
i.In <- geoReq
close(i.In)
}()
m := NewRasterMerger(dp.Error)
sc := NewRasterScaler(dp.Error)
grpcTiler.In = i.Out
m.In = grpcTiler.Out
sc.In = m.Out
go i.Run()
go grpcTiler.Run()
go m.Run()
go sc.Run()
return sc.Out
}
|
package service
import (
"github.com/talesmud/talesmud/pkg/db"
"github.com/talesmud/talesmud/pkg/repository"
"github.com/talesmud/talesmud/pkg/scripts"
)
//Facade ...
type Facade interface {
CharactersService() CharactersService
PartiesService() PartiesService
UsersService() UsersService
RoomsService() RoomsService
ScriptsService() ScriptsService
ItemsService() ItemsService
Runner() scripts.ScriptRunner
}
type facade struct {
css CharactersService
ps PartiesService
us UsersService
rs RoomsService
is ItemsService
ss ScriptsService
sr scripts.ScriptRunner
db *db.Client
}
//NewFacade creates a new service facade
func NewFacade(db *db.Client, runner scripts.ScriptRunner) Facade {
charactersRepo := repository.NewMongoDBcharactersRepository(db)
partiesRepo := repository.NewMongoDBPartiesRepository(db)
usersRepo := repository.NewMongoDBUsersRepository(db)
roomsRepo := repository.NewMongoDBRoomsRepository(db)
scriptsRepo := repository.NewMongoDBScriptRepository(db)
ss := NewScriptsService(scriptsRepo)
itemsRepo := repository.NewMongoDBItemsRepository(db)
itemTemplatesRepo := repository.NewMongoDBItemTemplatesRepository(db)
is := NewItemsService(itemsRepo, itemTemplatesRepo, ss, runner)
return &facade{
css: NewCharactersService(charactersRepo),
ps: NewPartiesService(partiesRepo),
us: NewUsersService(usersRepo),
rs: NewRoomsService(roomsRepo),
ss: ss,
is: is,
sr: runner,
}
}
func (f *facade) RoomsService() RoomsService {
return f.rs
}
func (f *facade) CharactersService() CharactersService {
return f.css
}
func (f *facade) ItemsService() ItemsService {
return f.is
}
func (f *facade) ScriptsService() ScriptsService {
return f.ss
}
func (f *facade) PartiesService() PartiesService {
return f.ps
}
func (f *facade) UsersService() UsersService {
return f.us
}
func (f *facade) Runner() scripts.ScriptRunner {
return f.sr
}
|
package main
import (
"github.com/astaxie/beego"
_ "github.com/go-sql-driver/mysql"
_ "my_blog/routers"
)
func main() {
//beego.AutoRender
beego.Run()
}
|
package crybsy
import (
"crypto/sha256"
"errors"
"fmt"
"log"
"os"
"os/user"
"path/filepath"
"regexp"
"sync"
"time"
)
type scanner struct {
Root *Root
Files chan File
Errors chan error
WaitGroup *sync.WaitGroup
Filter []*regexp.Regexp
}
// NewRoot creates a new CryBSy Root
func NewRoot(path string) (*Root, error) {
if len(path) == 0 {
return nil, errors.New("empty path is not valid")
}
info, err := os.Stat(path)
if err != nil {
return nil, err
}
if !info.IsDir() {
return nil, errors.New("path is not a directory")
}
absPath, err := filepath.Abs(path)
if err != nil {
return nil, err
}
root := new(Root)
root.Path = absPath
root.Host, err = os.Hostname()
if err != nil {
root.Host = "unknown"
}
user, err := user.Current()
if err == nil {
root.User.Name = user.Name
root.User.UID = user.Uid
root.User.GID = user.Gid
}
root.ID = calculateRootID(root)
return root, nil
}
// SetDefaultFilter for root
func SetDefaultFilter(root *Root) {
if root.Filter == nil {
root.Filter = make([]string, 0)
}
root.Filter = append(root.Filter, "[.]git.*")
root.Filter = append(root.Filter, "[.]DS.*")
root.Filter = append(root.Filter, "[.]crybsy.*")
}
func calculateRootID(root *Root) string {
hashFunc := sha256.New()
hashFunc.Write([]byte(root.Path))
hashFunc.Write([]byte(root.Host))
hashFunc.Write([]byte(root.User.Name))
hashFunc.Write([]byte(root.User.GID))
hashFunc.Write([]byte(root.User.UID))
return fmt.Sprintf("%x", hashFunc.Sum(nil))
}
// Scan the root tree for files
func Scan(root *Root) (chan File, chan error, *sync.WaitGroup) {
var wg sync.WaitGroup
errors := make(chan error, 10000)
patterns := make([]*regexp.Regexp, 0)
if root.Filter != nil {
for _, f := range root.Filter {
regexp, err := regexp.Compile(f)
if err != nil {
errors <- err
} else {
patterns = append(patterns, regexp)
}
}
}
scan := scanner{
Root: root,
Files: make(chan File, 100),
Errors: errors,
WaitGroup: &wg,
Filter: patterns,
}
wg.Add(1)
go scanRecursive(root.Path, scan)
return scan.Files, scan.Errors, scan.WaitGroup
}
func scanRecursive(path string, scan scanner) {
log.Println("Scan folder", path)
defer scan.WaitGroup.Done()
callback := func(filePath string, file os.FileInfo, err error) error {
if err != nil {
scan.Errors <- err
return err
}
if !file.IsDir() {
handleFile(filePath, file, scan)
}
return err
}
err := filepath.Walk(path, callback)
if err != nil {
scan.Errors <- err
}
}
func filterFile(path string, scan scanner) bool {
for _, exp := range scan.Filter {
if exp.Match([]byte(path)) {
return true
}
}
return false
}
func handleFile(path string, file os.FileInfo, scan scanner) {
if filterFile(path, scan) {
return
}
absPath, err := filepath.Abs(path)
if err != nil {
scan.Errors <- err
return
}
relPath, err := filepath.Rel(scan.Root.Path, absPath)
if err != nil {
scan.Errors <- err
return
}
modified := file.ModTime().Unix()
_, name := filepath.Split(path)
f := File{
Path: relPath,
Name: name,
RootID: scan.Root.ID,
Modified: modified,
}
scan.Files <- f
}
// UpdateFiles merge the old and new scan
func UpdateFiles(oldFiles []File, root *Root, files chan File, errors chan error, wg *sync.WaitGroup) []File {
log.Println("Update file list...")
start := time.Now().UnixNano()
fileMap := ByPath(oldFiles)
end := time.Now().UnixNano()
delta := end - start
log.Println("Mop old files:", (delta / 1000000), "ms")
start = time.Now().UnixNano()
updatedFiles := make(chan File, 100)
var wg2 sync.WaitGroup
wg2.Add(1)
go logErrors(errors, &wg2)
for i := 0; i < 8; i++ {
wg2.Add(1)
go updateFile(files, root, fileMap, updatedFiles, &wg2)
}
fileList := make(chan []File, 1)
go collectFiles(updatedFiles, fileList)
wg.Wait()
close(files)
close(errors)
end = time.Now().UnixNano()
delta = end - start
log.Println("Disk files scanned:", (delta / 1000000), "ms")
start = time.Now().UnixNano()
wg2.Wait()
close(updatedFiles)
end = time.Now().UnixNano()
delta = end - start
log.Println("Process files:", (delta / 1000000), "ms")
return <-fileList
}
func collectFiles(files chan File, res chan []File) {
fileList := make([]File, 0)
for f := range files {
fileList = append(fileList, f)
}
res <- fileList
}
func logErrors(errors chan error, wg *sync.WaitGroup) {
defer wg.Done()
for err := range errors {
log.Println("scan file error", err)
}
}
func updateFile(files chan File, root *Root, fileMap map[string]File, updateFiles chan File, wg *sync.WaitGroup) {
defer wg.Done()
for f := range files {
start := time.Now().UnixNano()
of, ok := fileMap[f.Path]
if !ok {
// new file found
hash, err := Hash(f.Path)
if err != nil {
log.Println("file hash error", err)
} else {
f.FileID = hash
f.Hash = hash
updateFiles <- f
}
} else {
// handle old file
if of.Modified == f.Modified {
updateFiles <- of
} else {
hash, err := Hash(filepath.Join(root.Path, f.Path))
if err != nil {
log.Println("file hash error", err)
updateFiles <- of
} else {
v := Version{
Modified: of.Modified,
Hash: of.Hash,
}
of.Versions = append(of.Versions, v)
of.Hash = hash
updateFiles <- of
}
}
}
end := time.Now().UnixNano()
delta := end - start
log.Println("Update file", f.Path, "Time:", (delta / 1000000), "ms")
}
}
|
/*
* Copyright 2021 Vitali Baumtrok.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*/
package displays
import (
"strconv"
"testing"
)
func TestAll(t *testing.T) {
displays := All()
if len(displays) == 0 {
t.Error("length zero")
} else if displays[0].Width-displays[0].X <= 0 {
t.Error("x:" + strconv.Itoa(displays[0].X) + " width:" + strconv.Itoa(displays[0].Width))
} else if displays[0].Height-displays[0].Y <= 0 {
t.Error("y:" + strconv.Itoa(displays[0].Y) + " height:" + strconv.Itoa(displays[0].Height))
}
}
func TestDefault(t *testing.T) {
display := Default()
if display == nil {
t.Error("nil returned")
}
}
func TestIndex(t *testing.T) {
display := Default()
if display == nil {
t.Error("nil returned")
} else if display.Index < 0 {
t.Error("wrong index")
}
}
|
package main
import "fmt"
/*
https://www.hackerearth.com/practice/data-structures/arrays/1-d/tutorial/
*/
func gsg5() {
var n int
fmt.Scanf("%d", &n)
nums := make([]int, n)
for i := 0; i < n; i++ {
fmt.Scanf("%d", &nums[i])
}
for i := len(nums) - 1; i >= 0; i-- {
fmt.Println(nums[i])
}
}
|
/*
Copyright 2020 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/docker"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
)
// Builder is an artifact builder that uses docker
type Builder struct {
localDocker docker.LocalDaemon
cfg docker.Config
pushImages bool
useCLI bool
useBuildKit *bool
artifacts ArtifactResolver
sourceDependencies TransitiveSourceDependenciesResolver
}
// ArtifactResolver provides an interface to resolve built artifact tags by image name.
type ArtifactResolver interface {
GetImageTag(imageName string) (string, bool)
}
// TransitiveSourceDependenciesResolver provides an interface to to evaluate the source dependencies for artifacts.
type TransitiveSourceDependenciesResolver interface {
TransitiveArtifactDependencies(ctx context.Context, a *latest.Artifact) ([]string, error)
}
// NewBuilder returns an new instance of a docker builder
func NewArtifactBuilder(localDocker docker.LocalDaemon, cfg docker.Config, useCLI bool, useBuildKit *bool, pushImages bool, ar ArtifactResolver, dr TransitiveSourceDependenciesResolver) *Builder {
return &Builder{
localDocker: localDocker,
pushImages: pushImages,
cfg: cfg,
useCLI: useCLI,
useBuildKit: useBuildKit,
artifacts: ar,
sourceDependencies: dr,
}
}
|
package main
import (
"context"
"fmt"
)
func t21() {
ctx := context.WithValue(context.Background(), "key", "value2222")
fmt.Println(ctx.Value("key").(string))
}
func main() {
t21()
}
|
package renderings
type HistoryItem struct {
Time string `json:"time"`
Rate float32 `json:"rate"`
}
type HistoryResponse struct {
Message string `json:"message"`
Code int `json:"code"`
Payload []HistoryItem `json:"payload"`
}
|
package top
import (
. "github.com/trapped/gomaild2/pop3/structs"
)
// Arguments:
// a message-number (required) which may NOT refer to to a
// message marked as deleted, and a non-negative number
// of lines (required)
// Restrictions:
// may only be given in the TRANSACTION state
func Process(c *Client, cmd Command) Reply {
res := OK
msg := ""
if c.State != Transaction {
res = ERR
msg = "invalid state"
}
return Reply{Result: res, Message: msg}
}
|
package main
import "fmt"
func main() {
greeting := func() {
fmt.Println("Hello, world")
}
greeting()
//assign a func to a variable
//only way to assign a function within a function
}
|
package dal
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/user"
"github.com/anurakhan/go-mongo-lb-driver/models"
"github.com/anurakhan/go-mongo-lb-driver/server"
"gopkg.in/mgo.v2/bson"
"encoding/hex"
)
type PhotosRepository struct {
Server *server.Server
DbInteractor *MongoInteractor
}
func (repo *PhotosRepository) PostPhoto(buf *bytes.Buffer, fileName string, fileExt string, id []byte) string {
path := initPath(repo)
file, err := os.Create(path + "/" + string(id) + "." + fileExt)
if err != nil {
panic(err)
}
io.Copy(file, buf)
interactor := repo.DbInteractor
interactor.StartConn()
interactor.InsertFileInfo(&models.FileModel{
Id: bson.ObjectId(string(id)),
FileName: fileName,
FileExt: fileExt})
interactor.CloseConn()
return hex.EncodeToString(id)
}
func (repo *PhotosRepository) GetPhotoById(id string) *models.FileRetModel {
path := initPath(repo)
fmt.Println(path)
interactor := repo.DbInteractor
interactor.StartConn()
fileModel := interactor.GetFileById(bson.ObjectId(id))
interactor.CloseConn()
fmt.Println(path + "/" + id + "." + fileModel.FileExt)
file, err := os.Open(path + "/" + id + "." + fileModel.FileExt)
data, err := ioutil.ReadAll(file)
if err != nil {
panic(err)
}
return &models.FileRetModel{
Id: id,
FileName: fileModel.FileName,
FileExt: fileModel.FileExt,
Data: data}
}
func initPath(repo *PhotosRepository) string {
path := repo.Server.FilePath
path = fromFileSystemDir(path)
fmt.Println(path)
if _, err := os.Stat(path); os.IsNotExist(err) {
os.Mkdir(path, os.ModePerm)
}
return path
}
func fromFileSystemDir(path string) string {
usr, err := user.Current()
if err != nil {
panic(err)
}
return usr.HomeDir + "/go-mongo-lb-driver-file-system/" + path
}
|
/*
* Created on Fri Feb 01 2019 9:10:2
* Author: WuLC
* EMail: liangchaowu5@gmail.com
*/
// dp, O(n) time, O(n) space
func mincostTickets(days []int, costs []int) int {
dp := []int{0}
for i := 0; i < len(days); i++ {
dp = append(dp, dp[i] + costs[0])
for j := i-1; j >= 0 && j >= i - 30; j-- {
if days[j] > days[i] - 7 {
dp[i+1] = min(dp[i+1], dp[j] + costs[1])
}
if days[j] > days[i] - 30 {
dp[i+1] = min(dp[i+1], dp[j] + costs[2])
} else {
break
}
}
}
return dp[len(dp)-1]
}
func min(A, B int) int {
if A < B {
return A
} else {
return B
}
} |
// Copyright 2020 The Tekton Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pipelinerun
import (
"reflect"
"sigs.k8s.io/controller-runtime/pkg/client"
"testing"
"github.com/jenkins-x/go-scm/scm"
fakescm "github.com/jenkins-x/go-scm/scm/driver/fake"
tb "github.com/tektoncd/experimental/commit-status-tracker/test/builder"
pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
"knative.dev/pkg/apis"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
var (
testNamespace = "test-namespace"
pipelineRunName = "test-pipeline-run"
testToken = "abcdefghijklmnopqrstuvwxyz12345678901234"
testRepoURL = "https://github.com/tektoncd/triggers"
)
var _ reconcile.Reconciler = &PipelinerunReconciler{}
// TestPipelineRunControllerPendingState runs ReconcilePipelineRun.Reconcile() against a
// fake client that tracks PipelineRun objects.
func TestPipelineRunControllerPendingState(t *testing.T) {
pipelineRun := makePipelineRunWithResources(
makeGitResourceBinding(testRepoURL, "master"))
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, testRepoURL, pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
wanted := &scm.Status{State: scm.StatePending, Label: "test-context", Desc: "testing", Target: ""}
status := data.Statuses["master"][0]
if !reflect.DeepEqual(status, wanted) {
t.Fatalf("commit-status notification got %#v, wanted %#v\n", status, wanted)
}
}
// TestPipelineRunControllerWithGitRepoAndRevisionViaAnnotation runs ReconcilePipelineRun.Reconcile() against a
// fake client that tracks PipelineRun objects.
func TestPipelineRunControllerWithGitRepoAndRevisionViaAnnotation(t *testing.T) {
pipelineRun := makePipelineRunWithResources()
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunAnnotation(gitRepoToReportTo, testRepoURL),
tb.PipelineRunAnnotation(gitRevision, "master"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, testRepoURL, pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
wanted := &scm.Status{State: scm.StatePending, Label: "test-context", Desc: "testing", Target: ""}
status := data.Statuses["master"][0]
if !reflect.DeepEqual(status, wanted) {
t.Fatalf("commit-status notification got %#v, wanted %#v\n", status, wanted)
}
}
// TestPipelineRunReconcileWithPreviousPending tests a PipelineRun that
// we've already sent a pending notification.
func TestPipelineRunReconcileWithPreviousPending(t *testing.T) {
pipelineRun := makePipelineRunWithResources(
makeGitResourceBinding(testRepoURL, "master"))
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, testRepoURL, pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
// This runs Reconcile twice.
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
// This cleans out the existing date for the data, because the fake scm
// client updates in-place, so there's no way to know if it received multiple
// pending notifications.
delete(data.Statuses, "master")
res, err = r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
// There should be no recorded statuses, because the state is still pending
// and the fake client's state was deleted above.
assertNoStatusesRecorded(t, data)
}
// TestPipelineRunControllerSuccessState runs ReconcilePipelineRun.Reconcile() against a
// fake client that tracks PipelineRun objects.
func TestPipelineRunControllerSuccessState(t *testing.T) {
pipelineRun := makePipelineRunWithResources(
makeGitResourceBinding(testRepoURL, "master"))
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionTrue})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, testRepoURL, pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
wanted := &scm.Status{State: scm.StateSuccess, Label: "test-context", Desc: "testing", Target: ""}
status := data.Statuses["master"][0]
if !reflect.DeepEqual(status, wanted) {
t.Fatalf("commit-status notification got %#v, wanted %#v\n", status, wanted)
}
}
// TestPipelineRunControllerFailedState runs ReconcilePipelineRun.Reconcile() against a
// fake client that tracks PipelineRun objects.
func TestPipelineRunControllerFailedState(t *testing.T) {
pipelineRun := makePipelineRunWithResources(
makeGitResourceBinding(testRepoURL, "master"))
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionFalse})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, testRepoURL, pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
wanted := &scm.Status{State: scm.StateFailure, Label: "test-context", Desc: "testing", Target: ""}
status := data.Statuses["master"][0]
if !reflect.DeepEqual(status, wanted) {
t.Fatalf("commit-status notification got %#v, wanted %#v\n", status, wanted)
}
}
// TestPipelineRunReconcileWithNoGitCredentials tests a non-notifable
// PipelineRun.
func TestPipelineRunReconcileNonNotifiable(t *testing.T) {
pipelineRun := makePipelineRunWithResources(
makeGitResourceBinding(testRepoURL, "master"))
applyOpts(
pipelineRun,
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, testRepoURL, pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
assertNoStatusesRecorded(t, data)
}
// TestPipelineRunReconcileWithNoGitCredentials tests a notifable PipelineRun
// with no "git" resource.
func TestPipelineRunReconcileWithNoGitRepository(t *testing.T) {
pipelineRun := makePipelineRunWithResources()
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, "", pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
assertNoStatusesRecorded(t, data)
}
// TestPipelineRunReconcileWithGitRepoAnnotationButGitRevisionMissing tests a notifable PipelineRun
// with "tekton.dev/git-repo" and no "tekton.des/git-revision" annotation.
func TestPipelineRunReconcileWithGitRepoAnnotationButGitRevisionMissing(t *testing.T) {
pipelineRun := makePipelineRunWithResources()
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunAnnotation(gitRepoToReportTo, testRepoURL),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, "", pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
assertNoStatusesRecorded(t, data)
}
// TestPipelineRunReconcileWithGitRevisionAnnotationButGitRepoMissing tests a notifable PipelineRun
// with "tekton.dev/git-revision" and no "tekton.dev/git-repo" annotation.
func TestPipelineRunReconcileWithGitRevisionAnnotationButGitRepoMissing(t *testing.T) {
pipelineRun := makePipelineRunWithResources()
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunAnnotation(gitRevision, "master"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, "", pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
assertNoStatusesRecorded(t, data)
}
// TestPipelineRunReconcileWithNoGitCredentials tests a notifable PipelineRun
// with multiple "git" resources.
func TestPipelineRunReconcileWithGitRepositories(t *testing.T) {
pipelineRun := makePipelineRunWithResources(
makeGitResourceBinding(testRepoURL, "master"),
makeGitResourceBinding("https://github.com/tektoncd/pipeline", "master"))
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{
pipelineRun,
makeSecret(defaultSecretName, map[string][]byte{"token": []byte(testToken)}),
}
r, data := makeReconciler(t, "", pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
assertNoStatusesRecorded(t, data)
}
// TestPipelineRunReconcileWithNoGitCredentials tests a notifable PipelineRun
// with a "git" resource, but with no Git credentials.
func TestPipelineRunReconcileWithNoGitCredentials(t *testing.T) {
pipelineRun := makePipelineRunWithResources(
makeGitResourceBinding(testRepoURL, "master"),
makeGitResourceBinding("https://github.com/tektoncd/pipeline", "master"))
applyOpts(
pipelineRun,
tb.PipelineRunAnnotation(notifiableName, "true"),
tb.PipelineRunAnnotation(statusContextName, "test-context"),
tb.PipelineRunAnnotation(statusDescriptionName, "testing"),
tb.PipelineRunStatus(tb.PipelineRunStatusCondition(
apis.Condition{Type: apis.ConditionSucceeded, Status: corev1.ConditionUnknown})))
objs := []client.Object{pipelineRun}
r, data := makeReconciler(t, "", pipelineRun, objs...)
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: pipelineRunName,
Namespace: testNamespace,
},
}
ctx, _ := ttesting.SetupFakeContext(t)
res, err := r.Reconcile(ctx, req)
fatalIfError(t, err, "reconcile: (%v)", err)
if res.Requeue {
t.Fatal("reconcile requeued request")
}
assertNoStatusesRecorded(t, data)
}
func TestKeyForCommit(t *testing.T) {
inputTests := []struct {
repo string
sha string
want string
}{
{"tekton/triggers", "e1466db56110fa1b813277c1647e20283d3370c3",
"7b2841ab8791fece7acdc0b3bb6e398c7a184273"},
}
for _, tt := range inputTests {
if v := keyForCommit(tt.repo, tt.sha); v != tt.want {
t.Errorf("keyForCommit(%#v, %#v) got %#v, want %#v", tt.repo, tt.sha, v, tt.want)
}
}
}
func applyOpts(pr *pipelinev1.PipelineRun, opts ...tb.PipelineRunOp) {
for _, o := range opts {
o(pr)
}
}
func makeReconciler(t *testing.T, wantRepoURL string, pr *pipelinev1.PipelineRun, objs ...client.Object) (*PipelinerunReconciler, *fakescm.Data) {
t.Helper()
s := scheme.Scheme
s.AddKnownTypes(pipelinev1.SchemeGroupVersion, pr)
cl := fake.NewClientBuilder().WithObjects(objs...).Build()
gitClient, data := fakescm.NewDefault()
fakeClientFactory := func(repoURL, token string) (*scm.Client, error) {
if wantRepoURL != repoURL {
t.Fatalf("repository url mismatch: got %q, want %q", repoURL, wantRepoURL)
}
return gitClient, nil
}
return &PipelinerunReconciler{
Client: cl,
Scheme: s,
scmFactory: fakeClientFactory,
pipelineRuns: make(pipelineRunTracker),
}, data
}
func fatalIfError(t *testing.T, err error, format string, a ...interface{}) {
t.Helper()
if err != nil {
t.Fatalf(format, a...)
}
}
func assertNoStatusesRecorded(t *testing.T, d *fakescm.Data) {
if l := len(d.Statuses["master"]); l != 0 {
t.Fatalf("too many statuses recorded, got %v, wanted 0", l)
}
}
|
package server
import (
"encoding/json"
"fmt"
"github.com/loft-sh/devspace/pkg/devspace/dependency/registry"
"github.com/loft-sh/devspace/pkg/devspace/pipeline/types"
"net/http"
)
func (h *handler) ping(w http.ResponseWriter, req *http.Request) {
decoder := json.NewDecoder(req.Body)
var t registry.PingPayload
err := decoder.Decode(&t)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if t.RunID != h.ctx.RunID() {
http.Error(w, h.ctx.RunID(), http.StatusConflict)
return
}
}
func (h *handler) excludeDependency(w http.ResponseWriter, req *http.Request) {
decoder := json.NewDecoder(req.Body)
var t registry.ExcludePayload
err := decoder.Decode(&t)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if h.pipeline == nil || t.RunID != h.ctx.RunID() {
// we allow this here as apparently the request targeted a wrong server
return
}
// we don't allow killing ourselves
if h.pipeline.Name() == t.DependencyName {
w.WriteHeader(http.StatusForbidden)
return
}
// try to find the dependency name and kill it
dep := findDependency(h.pipeline, t.DependencyName)
if dep != nil {
h.ctx.Log().Debugf("stopping dependency %v", t.DependencyName)
err = dep.Close()
if err != nil {
http.Error(w, fmt.Sprintf("error stopping dependency: %v", err), http.StatusInternalServerError)
return
}
}
}
func findDependency(pipe types.Pipeline, dependencyName string) types.Pipeline {
dependencies := pipe.Dependencies()
for _, dep := range dependencies {
if dep.Name() == dependencyName {
return dep
}
pipeline := findDependency(dep, dependencyName)
if pipeline != nil {
return pipeline
}
}
return nil
}
|
package requests
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/url"
"strings"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/canvasapi/models"
)
// GetUploadedMediaFolderForUserCourses Returns the details for a designated upload folder that the user has rights to
// upload to, and creates it if it doesn't exist.
//
// If the current user does not have the permissions to manage files
// in the course or group, the folder will belong to the current user directly.
// https://canvas.instructure.com/doc/api/files.html
//
// Path Parameters:
// # Path.CourseID (Required) ID
//
type GetUploadedMediaFolderForUserCourses struct {
Path struct {
CourseID string `json:"course_id" url:"course_id,omitempty"` // (Required)
} `json:"path"`
}
func (t *GetUploadedMediaFolderForUserCourses) GetMethod() string {
return "GET"
}
func (t *GetUploadedMediaFolderForUserCourses) GetURLPath() string {
path := "courses/{course_id}/folders/media"
path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID))
return path
}
func (t *GetUploadedMediaFolderForUserCourses) GetQuery() (string, error) {
return "", nil
}
func (t *GetUploadedMediaFolderForUserCourses) GetBody() (url.Values, error) {
return nil, nil
}
func (t *GetUploadedMediaFolderForUserCourses) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *GetUploadedMediaFolderForUserCourses) HasErrors() error {
errs := []string{}
if t.Path.CourseID == "" {
errs = append(errs, "'Path.CourseID' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *GetUploadedMediaFolderForUserCourses) Do(c *canvasapi.Canvas) (*models.Folder, error) {
response, err := c.SendRequest(t)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
return nil, err
}
ret := models.Folder{}
err = json.Unmarshal(body, &ret)
if err != nil {
return nil, err
}
return &ret, nil
}
|
// Copyright 2018, Irfan Sharif.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"sync"
"sync/atomic"
)
// Map from program counter fname.go:linenumber to mode.
type tracePointMap map[string]struct{}
type fileModeMap map[string]Mode
type gstateT struct {
gmode atomic.Value
tracePointMu struct {
sync.Mutex
m atomic.Value // type: tracePointMap
}
fileModeMu struct {
sync.Mutex
m atomic.Value // type: fileModeMap
}
}
var gstate gstateT
// Need to initialize the atomics; to be used once during init time.
func init() {
gstate.gmode.Store(DefaultMode)
gstate.tracePointMu.m.Store(make(tracePointMap))
gstate.fileModeMu.m.Store(make(fileModeMap))
}
// SetGlobalLogMode sets the global log mode to the one specified. Logging
// outside what's included in the mode is thereby suppressed.
func SetGlobalLogMode(m Mode) {
gstate.gmode.Store(m)
}
// GetGlobalLogMode gets the currently set global log mode.
func GetGlobalLogMode() Mode {
return gstate.gmode.Load().(Mode)
}
// SetTracePoint enables the provided tracepoint. A tracepoint is of the form
// filename.go:line-number (compiles to [\w]+.go:[\d]+) corresponding to the
// position of a logging statement that once enabled, emits a backtrace when
// the logging statement is executed. The specified tracepoint is agnostic to
// the mode, i.e. Logger.{Info|Warn|Error|Fatal|Debug}{,f}, used at the line.
func SetTracePoint(tp string) {
gstate.tracePointMu.Lock() // Synchronize with other potential writers.
ma := gstate.tracePointMu.m.Load().(tracePointMap) // Load current value of the map.
mb := make(tracePointMap) // Create a new map.
for tp := range ma {
mb[tp] = struct{}{} // Copy all data from the current object to the new one.
}
mb[tp] = struct{}{} // Do the update that we need.
gstate.tracePointMu.m.Store(mb) // Atomically replace the current object with the new one.
// At this point all new readers start working with the new version.
// The old version will be garbage collected once the existing readers
// (if any) are done with it.
gstate.tracePointMu.Unlock()
}
// ResetTracePoint resets the provided tracepoint so that a backtraces are no
// longer emitted when the specified logging statement is executed. See comment
// for SetTracePoint for what a tracepoint is.
func ResetTracePoint(tp string) {
gstate.tracePointMu.Lock() // Synchronize with other potential writers.
ma := gstate.tracePointMu.m.Load().(tracePointMap) // Load current value of the map.
mb := make(tracePointMap) // Create a new map.
for tp := range ma {
mb[tp] = struct{}{} // Copy all data from the current object to the new one.
}
delete(mb, tp) // Do the update that we need.
gstate.tracePointMu.m.Store(mb) // Atomically replace the current object with the new one.
// At this point all new readers start working with the new version.
// The old version will be garbage collected once the existing readers
// (if any) are done with it.
gstate.tracePointMu.Unlock()
}
// GetTracePoint checks if the corresponding tracepoint is enabled.
func GetTracePoint(tp string) (tpenabled bool) {
tpmap := gstate.tracePointMu.m.Load().(tracePointMap)
_, ok := tpmap[tp]
return ok
}
// SetFileLogMode sets the log mode for the provided filename. Subsequent
// logging statements within the file get filtered accordingly.
func SetFileLogMode(fname string, m Mode) {
gstate.fileModeMu.Lock() // Synchronize with other potential writers.
ma := gstate.fileModeMu.m.Load().(fileModeMap) // Load current value of the map.
mb := make(fileModeMap) // Create a new map.
for fname, m := range ma {
mb[fname] = m // Copy all data from the current object to the new one.
}
mb[fname] = m // Do the update that we need.
gstate.fileModeMu.m.Store(mb) // Atomically replace the current object with the new one.
// At this point all new readers start working with the new version.
// The old version will be garbage collected once the existing readers
// (if any) are done with it.
gstate.fileModeMu.Unlock()
}
// GetFileLogMode gets the log mode for the specified file.
func GetFileLogMode(fname string) (m Mode, ok bool) {
fmmap := gstate.fileModeMu.m.Load().(fileModeMap)
m, ok = fmmap[fname]
return m, ok
}
// ResetFileLogMode resets the log mode for the provided filename. Subsequent
// logging statements within the file get filtered as per the global log mode.
func ResetFileLogMode(fname string) {
gstate.fileModeMu.Lock() // Synchronize with other potential writers.
ma := gstate.fileModeMu.m.Load().(fileModeMap) // Load current value of the map.
mb := make(fileModeMap) // Create a new map.
for fname, m := range ma {
mb[fname] = m // Copy all data from the current object to the new one.
}
delete(mb, fname) // Do the update that we need.
gstate.fileModeMu.m.Store(mb) // Atomically replace the current object with the new one.
// At this point all new readers start working with the new version.
// The old version will be garbage collected once the existing readers
// (if any) are done with it.
gstate.fileModeMu.Unlock()
}
|
package odoo
import (
"fmt"
)
// ResPartner represents res.partner model.
type ResPartner struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
ActivityDateDeadline *Time `xmlrpc:"activity_date_deadline,omptempty"`
ActivityIds *Relation `xmlrpc:"activity_ids,omptempty"`
ActivityState *Selection `xmlrpc:"activity_state,omptempty"`
ActivitySummary *String `xmlrpc:"activity_summary,omptempty"`
ActivityTypeId *Many2One `xmlrpc:"activity_type_id,omptempty"`
ActivityUserId *Many2One `xmlrpc:"activity_user_id,omptempty"`
BankAccountCount *Int `xmlrpc:"bank_account_count,omptempty"`
BankIds *Relation `xmlrpc:"bank_ids,omptempty"`
Barcode *String `xmlrpc:"barcode,omptempty"`
CalendarLastNotifAck *Time `xmlrpc:"calendar_last_notif_ack,omptempty"`
CategoryId *Relation `xmlrpc:"category_id,omptempty"`
ChannelIds *Relation `xmlrpc:"channel_ids,omptempty"`
ChildIds *Relation `xmlrpc:"child_ids,omptempty"`
City *String `xmlrpc:"city,omptempty"`
Color *Int `xmlrpc:"color,omptempty"`
Comment *String `xmlrpc:"comment,omptempty"`
CommercialCompanyName *String `xmlrpc:"commercial_company_name,omptempty"`
CommercialPartnerCountryId *Many2One `xmlrpc:"commercial_partner_country_id,omptempty"`
CommercialPartnerId *Many2One `xmlrpc:"commercial_partner_id,omptempty"`
CompanyId *Many2One `xmlrpc:"company_id,omptempty"`
CompanyName *String `xmlrpc:"company_name,omptempty"`
CompanyType *Selection `xmlrpc:"company_type,omptempty"`
ContactAddress *String `xmlrpc:"contact_address,omptempty"`
ContractIds *Relation `xmlrpc:"contract_ids,omptempty"`
ContractsCount *Int `xmlrpc:"contracts_count,omptempty"`
CountryId *Many2One `xmlrpc:"country_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
Credit *Float `xmlrpc:"credit,omptempty"`
CreditLimit *Float `xmlrpc:"credit_limit,omptempty"`
CurrencyId *Many2One `xmlrpc:"currency_id,omptempty"`
Customer *Bool `xmlrpc:"customer,omptempty"`
Date *Time `xmlrpc:"date,omptempty"`
Debit *Float `xmlrpc:"debit,omptempty"`
DebitLimit *Float `xmlrpc:"debit_limit,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Email *String `xmlrpc:"email,omptempty"`
EmailFormatted *String `xmlrpc:"email_formatted,omptempty"`
Employee *Bool `xmlrpc:"employee,omptempty"`
Function *String `xmlrpc:"function,omptempty"`
HasUnreconciledEntries *Bool `xmlrpc:"has_unreconciled_entries,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
ImStatus *String `xmlrpc:"im_status,omptempty"`
Image *String `xmlrpc:"image,omptempty"`
ImageMedium *String `xmlrpc:"image_medium,omptempty"`
ImageSmall *String `xmlrpc:"image_small,omptempty"`
IndustryId *Many2One `xmlrpc:"industry_id,omptempty"`
InvoiceIds *Relation `xmlrpc:"invoice_ids,omptempty"`
InvoiceWarn *Selection `xmlrpc:"invoice_warn,omptempty"`
InvoiceWarnMsg *String `xmlrpc:"invoice_warn_msg,omptempty"`
IsCompany *Bool `xmlrpc:"is_company,omptempty"`
JournalItemCount *Int `xmlrpc:"journal_item_count,omptempty"`
Lang *Selection `xmlrpc:"lang,omptempty"`
LastTimeEntriesChecked *Time `xmlrpc:"last_time_entries_checked,omptempty"`
MachineOrganizationName *String `xmlrpc:"machine_organization_name,omptempty"`
MeetingCount *Int `xmlrpc:"meeting_count,omptempty"`
MeetingIds *Relation `xmlrpc:"meeting_ids,omptempty"`
MessageBounce *Int `xmlrpc:"message_bounce,omptempty"`
MessageChannelIds *Relation `xmlrpc:"message_channel_ids,omptempty"`
MessageFollowerIds *Relation `xmlrpc:"message_follower_ids,omptempty"`
MessageIds *Relation `xmlrpc:"message_ids,omptempty"`
MessageIsFollower *Bool `xmlrpc:"message_is_follower,omptempty"`
MessageLastPost *Time `xmlrpc:"message_last_post,omptempty"`
MessageNeedaction *Bool `xmlrpc:"message_needaction,omptempty"`
MessageNeedactionCounter *Int `xmlrpc:"message_needaction_counter,omptempty"`
MessagePartnerIds *Relation `xmlrpc:"message_partner_ids,omptempty"`
MessageUnread *Bool `xmlrpc:"message_unread,omptempty"`
MessageUnreadCounter *Int `xmlrpc:"message_unread_counter,omptempty"`
Mobile *String `xmlrpc:"mobile,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
OpportunityCount *Int `xmlrpc:"opportunity_count,omptempty"`
OpportunityIds *Relation `xmlrpc:"opportunity_ids,omptempty"`
OptOut *Bool `xmlrpc:"opt_out,omptempty"`
ParentId *Many2One `xmlrpc:"parent_id,omptempty"`
ParentName *String `xmlrpc:"parent_name,omptempty"`
PartnerShare *Bool `xmlrpc:"partner_share,omptempty"`
PaymentTokenCount *Int `xmlrpc:"payment_token_count,omptempty"`
PaymentTokenIds *Relation `xmlrpc:"payment_token_ids,omptempty"`
Phone *String `xmlrpc:"phone,omptempty"`
PickingWarn *Selection `xmlrpc:"picking_warn,omptempty"`
PickingWarnMsg *String `xmlrpc:"picking_warn_msg,omptempty"`
PropertyAccountPayableId *Many2One `xmlrpc:"property_account_payable_id,omptempty"`
PropertyAccountPositionId *Many2One `xmlrpc:"property_account_position_id,omptempty"`
PropertyAccountReceivableId *Many2One `xmlrpc:"property_account_receivable_id,omptempty"`
PropertyAutosalesConfig *Many2One `xmlrpc:"property_autosales_config,omptempty"`
PropertyPaymentTermId *Many2One `xmlrpc:"property_payment_term_id,omptempty"`
PropertyProductPricelist *Many2One `xmlrpc:"property_product_pricelist,omptempty"`
PropertyPurchaseCurrencyId *Many2One `xmlrpc:"property_purchase_currency_id,omptempty"`
PropertyStockCustomer *Many2One `xmlrpc:"property_stock_customer,omptempty"`
PropertyStockSupplier *Many2One `xmlrpc:"property_stock_supplier,omptempty"`
PropertySupplierPaymentTermId *Many2One `xmlrpc:"property_supplier_payment_term_id,omptempty"`
PurchaseOrderCount *Int `xmlrpc:"purchase_order_count,omptempty"`
PurchaseWarn *Selection `xmlrpc:"purchase_warn,omptempty"`
PurchaseWarnMsg *String `xmlrpc:"purchase_warn_msg,omptempty"`
Ref *String `xmlrpc:"ref,omptempty"`
RefCompanyIds *Relation `xmlrpc:"ref_company_ids,omptempty"`
SaleOrderCount *Int `xmlrpc:"sale_order_count,omptempty"`
SaleOrderIds *Relation `xmlrpc:"sale_order_ids,omptempty"`
SaleWarn *Selection `xmlrpc:"sale_warn,omptempty"`
SaleWarnMsg *String `xmlrpc:"sale_warn_msg,omptempty"`
Self *Many2One `xmlrpc:"self,omptempty"`
SignupExpiration *Time `xmlrpc:"signup_expiration,omptempty"`
SignupToken *String `xmlrpc:"signup_token,omptempty"`
SignupType *String `xmlrpc:"signup_type,omptempty"`
SignupUrl *String `xmlrpc:"signup_url,omptempty"`
SignupValid *Bool `xmlrpc:"signup_valid,omptempty"`
Siret *String `xmlrpc:"siret,omptempty"`
StateId *Many2One `xmlrpc:"state_id,omptempty"`
Street *String `xmlrpc:"street,omptempty"`
Street2 *String `xmlrpc:"street2,omptempty"`
Supplier *Bool `xmlrpc:"supplier,omptempty"`
SupplierInvoiceCount *Int `xmlrpc:"supplier_invoice_count,omptempty"`
TaskCount *Int `xmlrpc:"task_count,omptempty"`
TaskIds *Relation `xmlrpc:"task_ids,omptempty"`
TeamId *Many2One `xmlrpc:"team_id,omptempty"`
Title *Many2One `xmlrpc:"title,omptempty"`
TotalInvoiced *Float `xmlrpc:"total_invoiced,omptempty"`
Trust *Selection `xmlrpc:"trust,omptempty"`
Type *Selection `xmlrpc:"type,omptempty"`
Tz *Selection `xmlrpc:"tz,omptempty"`
TzOffset *String `xmlrpc:"tz_offset,omptempty"`
UserId *Many2One `xmlrpc:"user_id,omptempty"`
UserIds *Relation `xmlrpc:"user_ids,omptempty"`
Vat *String `xmlrpc:"vat,omptempty"`
Website *String `xmlrpc:"website,omptempty"`
WebsiteMessageIds *Relation `xmlrpc:"website_message_ids,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
Zip *String `xmlrpc:"zip,omptempty"`
}
// ResPartners represents array of res.partner model.
type ResPartners []ResPartner
// ResPartnerModel is the odoo model name.
const ResPartnerModel = "res.partner"
// Many2One convert ResPartner to *Many2One.
func (rp *ResPartner) Many2One() *Many2One {
return NewMany2One(rp.Id.Get(), "")
}
// CreateResPartner creates a new res.partner model and returns its id.
func (c *Client) CreateResPartner(rp *ResPartner) (int64, error) {
ids, err := c.CreateResPartners([]*ResPartner{rp})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateResPartner creates a new res.partner model and returns its id.
func (c *Client) CreateResPartners(rps []*ResPartner) ([]int64, error) {
var vv []interface{}
for _, v := range rps {
vv = append(vv, v)
}
return c.Create(ResPartnerModel, vv)
}
// UpdateResPartner updates an existing res.partner record.
func (c *Client) UpdateResPartner(rp *ResPartner) error {
return c.UpdateResPartners([]int64{rp.Id.Get()}, rp)
}
// UpdateResPartners updates existing res.partner records.
// All records (represented by ids) will be updated by rp values.
func (c *Client) UpdateResPartners(ids []int64, rp *ResPartner) error {
return c.Update(ResPartnerModel, ids, rp)
}
// DeleteResPartner deletes an existing res.partner record.
func (c *Client) DeleteResPartner(id int64) error {
return c.DeleteResPartners([]int64{id})
}
// DeleteResPartners deletes existing res.partner records.
func (c *Client) DeleteResPartners(ids []int64) error {
return c.Delete(ResPartnerModel, ids)
}
// GetResPartner gets res.partner existing record.
func (c *Client) GetResPartner(id int64) (*ResPartner, error) {
rps, err := c.GetResPartners([]int64{id})
if err != nil {
return nil, err
}
if rps != nil && len(*rps) > 0 {
return &((*rps)[0]), nil
}
return nil, fmt.Errorf("id %v of res.partner not found", id)
}
// GetResPartners gets res.partner existing records.
func (c *Client) GetResPartners(ids []int64) (*ResPartners, error) {
rps := &ResPartners{}
if err := c.Read(ResPartnerModel, ids, nil, rps); err != nil {
return nil, err
}
return rps, nil
}
// FindResPartner finds res.partner record by querying it with criteria.
func (c *Client) FindResPartner(criteria *Criteria) (*ResPartner, error) {
rps := &ResPartners{}
if err := c.SearchRead(ResPartnerModel, criteria, NewOptions().Limit(1), rps); err != nil {
return nil, err
}
if rps != nil && len(*rps) > 0 {
return &((*rps)[0]), nil
}
return nil, fmt.Errorf("res.partner was not found with criteria %v", criteria)
}
// FindResPartners finds res.partner records by querying it
// and filtering it with criteria and options.
func (c *Client) FindResPartners(criteria *Criteria, options *Options) (*ResPartners, error) {
rps := &ResPartners{}
if err := c.SearchRead(ResPartnerModel, criteria, options, rps); err != nil {
return nil, err
}
return rps, nil
}
// FindResPartnerIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindResPartnerIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(ResPartnerModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindResPartnerId finds record id by querying it with criteria.
func (c *Client) FindResPartnerId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(ResPartnerModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("res.partner was not found with criteria %v and options %v", criteria, options)
}
|
package main
/*
#cgo CFLAGS: -I.
#cgo LDFLAGS: -L. -ldemo
#include "demo.h"
*/
import "C"
import "fmt"
func main() {
fmt.Println(C.sum(1,2))
fmt.Println("111111")
}
|
package jumphelper
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"strings"
)
import (
"github.com/bwesterb/go-pow"
)
// Client is a HTTP client that makes jumphelper requests
type Client struct {
host string
port string
verbose bool
client *http.Client
}
// Log wraps Println to control verbosity.
func (c *Client) Log(s string) string {
if c.verbose {
log.Println(s)
}
return s
}
func (c *Client) address(s string, m ...string) string {
if len(m) > 0 {
u := "http://" + c.host + ":" + c.port + "/" + m[0] + "/" + s + "/"
return c.Log(u)
}
u := "http://" + c.host + ":" + c.port + "/" + s + "/"
return c.Log(u)
}
// Check writes a request for a true-false answer to a jumphelper server
func (c *Client) Check(s string) (bool, error) {
resp, err := c.client.Get(c.address(s, "check"))
if err != nil {
return false, err
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
sbytes := strings.TrimSpace(string(bytes))
c.Log("Log: " + sbytes)
if strings.HasPrefix(sbytes, "TRUE") {
return true, nil
}
return false, nil
}
// Request writes a request for a base32 answer to a jumphelper server
func (c *Client) Request(s string) (string, error) {
resp, err := c.client.Get(c.address(s, "request"))
if err != nil {
return "", err
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
c.Log(resp.Header.Get("Location"))
if err != nil {
return "", err
}
return string(bytes), nil
}
// Jump writes a request for a base64 address to a jumphelper server
func (c *Client) Jump(s string) (string, error) {
resp, err := c.client.Get(c.address(s, "jump"))
if err != nil {
return "", err
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
c.Log(resp.Header.Get("Location"))
if err != nil {
return "", err
}
return string(bytes), nil
}
// Signup requests a new account for a domain from a jumphelper server
func (c *Client) Signup(domain, base64 string) (string, error) {
if b, err := c.Check(domain); !b {
if err != nil {
return "", err
}
resp, err := c.client.Get(c.address(domain+","+base64, "acct"))
if err != nil {
return "", err
}
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(bytes), nil
}
return "", fmt.Errorf("Account exists, use update instead")
}
func (c *Client) Register(input, domain, base64 string) (string, error) {
proof, err := pow.Fulfil(string(input), []byte(domain))
if err != nil {
return "", err
}
resp, err := c.client.Get(
c.address(string(input)+","+proof+","+domain+","+base64, "acct"),
)
defer resp.Body.Close()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(bytes), nil
}
// NewClient creates a new jumphelper client
func NewClient(Host, Port string, verbose bool) (*Client, error) {
return NewClientFromOptions(SetClientHost(Host), SetClientPort(Port), SetClientVerbose(verbose))
}
// NewClientFromOptions creates a new jumphelper client from functional arguments
func NewClientFromOptions(opts ...func(*Client) error) (*Client, error) {
var c Client
c.client = &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
c.host = "127.0.0.1"
c.port = "7854"
c.verbose = false
for _, o := range opts {
if err := o(&c); err != nil {
return nil, fmt.Errorf("Client configuration error: %s", err)
}
}
return &c, nil
}
|
package security
import (
"bytes"
"testing"
)
func TestPKCS5Padding(t *testing.T) {
actual, _ := PKCS5Padding([]byte{'1', '2', '3', '4', '5'}, 8)
expect := []byte{'1', '2', '3', '4', '5', '3', '3', '3'}
if !bytes.Equal(actual, expect) {
t.Errorf("TestPKCS5Padding: expect->%q, actual->%q", expect, actual)
}
actual, _ = PKCS5Padding([]byte{'1', '2', '3', '4', '5', '6', '7', '8'}, 8)
expect = []byte{'1', '2', '3', '4', '5', '6', '7', '8', '8', '8', '8', '8', '8', '8', '8', '8'}
if !bytes.Equal(actual, expect) {
t.Errorf("TestPKCS5Padding: expect->%q, actual->%q", expect, actual)
}
}
func TestPKCS5UnPadding(t *testing.T) {
actual, _ := PKCS5UnPadding([]byte{'1', '2', '3', '4', '5', '3', '3', '3'})
expect := []byte{'1', '2', '3', '4', '5'}
if !bytes.Equal(actual, expect) {
t.Errorf("TestPKCS5UnPadding: expect->%q, actual->%q", expect, actual)
}
actual, _ = PKCS5UnPadding([]byte{'1', '2', '3', '4', '5', '6', '7', '8', '8', '8', '8', '8', '8', '8', '8', '8'})
expect = []byte{'1', '2', '3', '4', '5', '6', '7', '8'}
if !bytes.Equal(actual, expect) {
t.Errorf("TestPKCS5UnPadding: expect->%q, actual->%q", expect, actual)
}
}
|
package atTheCrossroads
func knapsackLight(value1 int, weight1 int, value2 int, weight2 int, maxW int) int {
//if all <= maxW
if weight1+weight2 <= maxW {
return value1+value2
}
//if all > maxW
if weight1>maxW && weight2>maxW {
return 0
}
if weight1<=maxW && weight2<=maxW {
if value1>value2 {
return value1
} else {
return value2
}
}
if weight1>maxW || weight2>maxW {
if weight1>maxW {
return value2
} else {
return value1
}
}
return 0
}
|
// In the early 60's G.M. Adelson-Velsky and E.M. Landis
// invented the first self-balancing binary search tree
// data structure, calling it AVL Tree.
//
// An AVL tree is a binary search tree between the height
// of the left and right subtrees cannot be no more than one.
//
// The AVL balance condition, known also as the node balance
// factor represents and additional piece of information stored
// for each node. The balance factor is defined as an integer in
// range of [-1, 1], where -1 will be an extra node on the right
// subtree, and 1 is an extral node on the left subtree, accounting
// for an odd number of nodes.
package avl
import (
_ "fmt"
)
type Tree struct {
root *Node
}
// 7.1 Tree Rotations
//
// algorithm LeftRotation(node)
// Pre: node.Right != nil
// Post: node.Right is the new root of the subtree,
// node has become node.Right's left child and,
// BST propeties are preserved
// RightNode = node.Right
// node.Right = RightNode.Left
// RightNode.Left = node
//
// algorithm RightRotation(node)
// Pre: node.Left != nil
// Post: node.Left is the new root of the subtree,
// node has become node.Left's right child and,
// BST properties preserved
// LeftNode = node.Left
// node.Left = LeftNode.Right
// LeftNode.Right = node
// A B
// \ / \
// B => A C
// / \ \
// X C X
func (t *Tree) leftRotation(node *Node) {
if node == nil || node.Right == nil {
return
}
pivot := node.Right
node.Right = pivot.Left
if pivot.Left != nil {
pivot.Left.Parent = node
}
pivot.Left = node
if node.Parent != nil {
if node.Parent.Right == node {
node.Parent.Right = pivot
} else {
node.Parent.Left = pivot
}
} else {
t.root = pivot
}
pivot.Parent = node.Parent
node.Parent = pivot
}
// C B
// / / \
// B => A C
// / \ /
// A X X
func (t *Tree) rightRotation(node *Node) {
if node == nil || node.Left == nil {
return
}
pivot := node.Left
node.Left = pivot.Right
if pivot.Right != nil {
pivot.Right.Parent = node
}
pivot.Right = node
if node.Parent != nil {
if node.Parent.Left == node {
node.Parent.Left = pivot
} else {
node.Parent.Right = pivot
}
} else {
t.root = pivot
}
pivot.Parent = node.Parent
node.Parent = pivot
}
// C C B
// / / / \
// A => B => A C
// \ / \ \ /
// B A X X
// / \ \
// X X X
func (t *Tree) leftRightRotation(node *Node) {
if node == nil ||
node.Left == nil ||
node.Left.Right == nil {
return
}
pivot := node.Left.Right // B
// left rotation
node.Left.Right = pivot.Left
if pivot.Left != nil {
pivot.Left.Parent = node.Left
}
pivot.Left = node.Left
node.Left.Parent = pivot
// right rotation
node.Left = pivot.Right
if pivot.Right != nil {
pivot.Right.Parent = node
}
pivot.Right = node
if node.Parent != nil {
if node.Parent.Right == node {
node.Parent.Right = pivot
} else {
node.Parent.Left = pivot
}
} else {
t.root = pivot
}
pivot.Parent = node.Parent
node.Parent = pivot
}
// A A B
// \ \ / \
// C => B => A C
// / / \ \ /
// B X C X
// / \ /
// X X X
func (t *Tree) rightLeftRotation(node *Node) {
if node == nil ||
node.Right == nil ||
node.Right.Left == nil {
return
}
pivot := node.Right.Left
// right rotation
node.Right.Left = pivot.Right
if pivot.Right != nil {
pivot.Right.Parent = node.Right
}
pivot.Right = node.Right
node.Right.Parent = pivot
// left rotation
node.Right = pivot.Left
if pivot.Left != nil {
pivot.Left.Parent = node
}
pivot.Left = node
if node.Parent != nil {
if node.Parent.Left == node {
node.Parent.Left = pivot
} else {
node.Parent.Right = pivot
}
} else {
t.root = pivot
}
pivot.Parent = node.Parent
node.Parent = pivot
}
// 7.2 Tree Rebalancing
//
// algorithm CheckBalance(current)
// Pre: current is the node to start from balancing
// Post: current height has been updated while tree balance is if needed
// restored through rotations
// if current.Left = nil and current.Right = nil
// current.Height = -1
// else
// current.Height = Max(Height(current.Left), Height(current.Right)) + 1
// if Height(current.Left) - Height(current.Right) > 1
// if Height(current.Left.Left) - Height(current.Left.Right) > 0
// RightRotation(current)
// else
// LeftAndRightRotation(current)
// else if Height(current.Left) - Height(current.Right) < -1
// if Height(current.Right.Left) - Height(current.Right.Right) < 0
// LeftRotation(current)
// else
// RightAndLeftRotation(current)
func (t *Tree) CheckBalance(node *Node) {
bf := node.Left.Height() - node.Right.Height()
if bf > 1 {
if node.Left.Left.Height()-node.Left.Right.Height() > 0 {
t.rightRotation(node)
} else {
t.leftRightRotation(node)
}
} else if bf < -1 {
if node.Right.Left.Height()-node.Right.Right.Height() < 0 {
t.leftRotation(node)
} else {
t.rightLeftRotation(node)
}
}
}
// 7.3 Insertion
//
// algorithm Insert(value)
// Pre: value has passed custom type checks for type T
// Post: vlaue has been placed in the correct location in the tree
// if root = nil
// root = node(value)
// else
// InsertNode(root, value)
//
// algorithm InsertNode(current, value)
// Pre: current is the node to start from
// Post: value has been placed in the correct location in the tree while
// preserving tree balance
// if value < current.Value
// if current.Left = nil
// current.Left = node(value)
// else
// InsertNode(current.Left, value)
// else
// if current.Right = nil
// current.Right = node(value)
// else
// InsertNode(current.Right, value)
// CheckBalance(current)
func (t *Tree) Add(v int) {
if t.root == nil {
t.root = &Node{Value: v}
} else {
t.add(t.root, v)
}
}
func (t *Tree) add(node *Node, v int) {
if v < node.Value {
if node.Left == nil {
node.Left = &Node{Value: v}
node.Left.Parent = node
} else {
t.add(node.Left, v)
}
} else {
if node.Right == nil {
node.Right = &Node{Value: v}
node.Right.Parent = node
} else {
t.add(node.Right, v)
}
}
t.CheckBalance(node)
}
// 7.4 Deletion
//
// algorithm Remove(value)
// Pre: value is the value of the node to remove, root is the root node
// of the Avl
// Post: node with vlaue is removed and tree rebalanced if found in which
// case yiels true, otherwise false
// nodeToRemove = root
// parent = nil
// Stack path = root
// while nodeToRemove != nil && nodeToRemove.Value != value
// parent = nodeToRemove
// if value < nodeToRemove.Value
// nodeToRemove = nodeToRemove.Left
// else
// nodeToRemvoe = nodeToRemove.Right
// path.Push(nodeToRemove)
// if nodeToRemove == nil
// return false // value not in the Avl
// parent = FindParent(value)
// if count = 1 // count keeps the # of nodes in the Avl
// root = nil
// else if nodeToRemove.Left = nil and nodeToRemove.Right = nil
// if nodeToRemove.Value < parent.Value
// parent.Left = nil
// else
// parent.Right = nil
// else if nodeToRemove.Left = nil and nodeToRemove.Right != nil
// if nodeToRemove.Value < parent.Value
// parent.Left = nodeToRemove.Right
// else
// parent.Right = nodeToRemove.Right
// else if nodeToRemove.Left != nil and nodeToRemove.Right = nil
// if nodeToRemove.Value < parent.Value
// parent.Left = nodeToRemove.Left
// else
// parent.Right = nodeToRemove.Left
// else
// largestValue = nodeToRemove.Left
// while largestValue.Right != nil
// largestValue = largestValue.Right
// FindParent(largest.Right.Value).Right = nil
// nodeToRemove.Value = largestValue.Right.Value
// while path.Count > 0
// CheckBalance(path.Pop()) // we trackback tothe root node check balance
// count = count - 1
// return true
func (t *Tree) Remove(v int) bool {
if t.root == nil {
return false
}
s := &Stack{}
n := t.root
for n != nil && n.Value != v {
if v < n.Value {
n = n.Left
} else {
n = n.Right
}
s.Push(n)
}
if n.Left == nil && n.Right == nil {
if n.Parent.Left == n {
n.Parent.Left = nil
} else {
n.Parent.Right = nil
}
} else if n.Left == nil && n.Right != nil {
if n.Parent.Left == n {
n.Parent.Left = n.Right
n.Right.Parent = n.Parent
} else {
n.Parent.Right = n.Right
n.Right.Parent = n.Parent
}
} else if n.Left != nil && n.Right == nil {
if n.Parent.Left == n {
n.Parent.Left = n.Left
n.Left.Parent = n.Parent
} else {
n.Parent.Right = n.Left
n.Left.Parent = n.Parent
}
} else {
maxL := n.Left
for maxL.Right != nil {
maxL = maxL.Right
}
if maxL.Parent.Left == maxL {
maxL.Parent.Left = nil
} else {
maxL.Parent.Right = nil
}
n.Value = maxL.Value
}
for {
if v, ok := s.Pop(); ok {
t.CheckBalance(v)
} else {
break
}
}
return true
}
func (t *Tree) Preorder() <-chan int {
ch := make(chan int)
go func() {
defer close(ch)
t.root.Preorder(ch)
}()
return ch
}
func (t *Tree) Inorder() <-chan int {
ch := make(chan int)
go func() {
defer close(ch)
t.root.Inorder(ch)
}()
return ch
}
|
package configuration
import (
"database/sql"
"fmt"
"log"
"testing"
_ "github.com/lib/pq"
)
type failure struct {
Prefix string
Expected interface{}
Actual interface{}
}
func SetupDB() *sql.DB {
db, err := sql.Open("postgres", "user=tenable password=insecure dbname=apitest")
if err != nil {
log.Fatal(err)
}
ResetDB(db)
return db
}
func ResetDB(db *sql.DB) {
db.Exec("DELETE FROM users")
db.Exec("DELETE FROM configurations")
db.Exec("DELETE FROM sessions")
}
func (f failure) Error() string {
str := f.Prefix
if f.Expected != nil {
str += fmt.Sprintf("\n Expected: %v", f.Expected)
}
if f.Actual != nil {
str += fmt.Sprintf("\n Actual: %v", f.Actual)
}
return str
}
var baseExpected = []Configuration{
{Name: "Config1", HostName: "Config.1", Port: 1, Username: "user1"},
{Name: "Config2", HostName: "Config.2", Port: 2, Username: "user2"},
{Name: "Config3", HostName: "Config.3", Port: 3, Username: "user3"},
{Name: "Config4", HostName: "Config.4", Port: 4, Username: "user4"},
{Name: "Config5", HostName: "Config.5", Port: 5, Username: "user5"},
{Name: "Config6", HostName: "Config.6", Port: 6, Username: "user6"},
{Name: "Config7", HostName: "Config.7", Port: 7, Username: "user7"},
{Name: "Config8", HostName: "Config.8", Port: 8, Username: "user8"},
{Name: "Config9", HostName: "Config.9", Port: 9, Username: "user9"},
}
var tests = map[string]struct {
test func(*ConfigurationController, []Configuration) error
expected []Configuration
}{
"TestEmptyConfigs": {
test: func(cc *ConfigurationController, expected []Configuration) error {
configs, err := cc.GetAll()
if len(configs) != len(expected) {
return failure{"Configs length does not match", len(expected), len(configs)}
}
return err
},
expected: []Configuration{},
},
"TestGetAll": {
test: func(cc *ConfigurationController, expected []Configuration) error {
for _, config := range expected {
_, err := cc.DB.Exec(`
INSERT INTO configurations(config_name, host_name, port, username) VALUES ($1, $2, $3, $4)`,
config.Name, config.HostName, config.Port, config.Username)
if err != nil {
return err
}
}
configs, err := cc.GetAll()
if err != nil {
return err
}
if len(configs) != len(expected) {
return failure{"Configs length does not match", len(expected), len(configs)}
}
if !Equals(configs, expected) {
return failure{Expected: expected, Actual: configs}
}
return nil
},
expected: baseExpected,
},
"TestAddOne": {
test: func(cc *ConfigurationController, expected []Configuration) error {
configs, err := cc.Add(expected...)
names := make([]string, 0, len(expected))
if err != nil {
return err
}
for index := range configs {
if configs[index].Name != expected[index].Name {
return failure{"Names do not match", expected[index].Name, names[index]}
}
names = append(names, expected[index].Name)
}
configs, err = cc.Get(names...)
if err != nil {
return err
}
if !Equals(configs, expected) {
return failure{Expected: expected, Actual: configs}
}
return nil
},
expected: baseExpected[:1],
},
"TestAddMultiple": {
test: func(cc *ConfigurationController, expected []Configuration) error {
configs, err := cc.Add(expected...)
names := make([]string, 0, len(expected))
if err != nil {
return err
}
for index := range configs {
if configs[index].Name != expected[index].Name {
return failure{"Names do not match", expected[index].Name, names[index]}
}
names = append(names, expected[index].Name)
}
configs, err = cc.Get(names...)
if err != nil {
return err
}
if !Equals(configs, expected) {
return failure{Expected: expected, Actual: configs}
}
return nil
},
expected: baseExpected,
},
"TestAddCollision": {
test: func(cc *ConfigurationController, data []Configuration) error {
_, err := cc.Add(data...)
if err, ok := err.(Error); !ok || err.Err != DuplicateConfigErr {
return failure{"Errors do not match",
Error{
DuplicateConfigErr,
data[8],
},
err}
}
count := -1
err = cc.DB.QueryRow("SELECT COUNT(id) from configurations").Scan(&count)
if err != nil {
return err
}
if count != 0 {
return failure{"Too many configurations in DB", 0, count}
}
return nil
},
expected: []Configuration{
{Name: "Config1", HostName: "Config.1", Port: 1, Username: "user1"},
{Name: "Config2", HostName: "Config.2", Port: 2, Username: "user2"},
{Name: "Config3", HostName: "Config.3", Port: 3, Username: "user3"},
{Name: "Config4", HostName: "Config.4", Port: 4, Username: "user4"},
{Name: "Config5", HostName: "Config.5", Port: 5, Username: "user5"},
{Name: "Config6", HostName: "Config.6", Port: 6, Username: "user6"},
{Name: "Config7", HostName: "Config.7", Port: 7, Username: "user7"},
{Name: "Config8", HostName: "Config.8", Port: 8, Username: "user8"},
{Name: "Config1", HostName: "Config.9", Port: 9, Username: "user9"},
},
},
"Delete": {
test: func(cc *ConfigurationController, data []Configuration) error {
expected := make([]Configuration, 0, len(data))
toDelete := make([]Configuration, 0, 4)
expected = append(append(expected, data[0:4]...), data[7])
toDelete = append(append(toDelete, data[4:7]...), data[8:]...)
namesToDelete := make([]string, 0, len(toDelete))
for _, config := range toDelete {
namesToDelete = append(namesToDelete, config.Name)
}
if _, err := cc.Add(data...); err != nil {
return err
}
if err := cc.Delete(namesToDelete...); err != nil {
return err
}
actual, err := cc.GetAll()
if err != nil {
return err
}
if !Equals(actual, expected) {
return failure{Expected: expected, Actual: actual}
}
return nil
},
expected: baseExpected,
},
"DeleteNonexisting": {
test: func(cc *ConfigurationController, data []Configuration) error {
if _, err := cc.Add(data...); err != nil {
return err
}
if err := cc.Delete("THIS DOES NOT EXIST"); err != nil {
return failure{"Unexpected Error", nil, err}
}
return nil
},
expected: baseExpected,
},
"TestModify": {
test: func(cc *ConfigurationController, data []Configuration) error {
_, err := cc.Add(data...)
if err != nil {
return err
}
expectedConfig := data[0]
expectedConfig.Name = "Hello"
newConfig := Configuration{Name: "Hello"}
if newConfig, err = cc.Modify(data[0].Name, newConfig); err != nil {
return err
}
if !EqualConfigurations(expectedConfig, newConfig) {
return failure{Expected: expectedConfig, Actual: newConfig}
}
newConfigs, err := cc.Get(expectedConfig.Name)
if err != nil {
return err
}
if len(newConfigs) != 1 || !EqualConfigurations(expectedConfig, newConfigs[0]) {
return failure{Expected: expectedConfig, Actual: newConfigs}
}
return nil
},
expected: baseExpected[:1],
},
"TestModifyAllFields": {
test: func(cc *ConfigurationController, data []Configuration) error {
_, err := cc.Add(data...)
expectedConfig := Configuration{
Name: "Something else",
HostName: "Other.stuff",
Username: "NewUserName",
Port: 9090,
}
_ = expectedConfig
if err != nil {
return err
}
newConfig := expectedConfig
if newConfig, err = cc.Modify(data[0].Name, newConfig); err != nil {
return err
}
if !EqualConfigurations(expectedConfig, newConfig) {
return failure{Expected: data[0], Actual: newConfig}
}
newConfigs, err := cc.Get(expectedConfig.Name)
if err != nil {
return err
}
if len(newConfigs) != 1 || !EqualConfigurations(expectedConfig, newConfigs[0]) {
return failure{Expected: expectedConfig, Actual: newConfigs}
}
return nil
},
expected: baseExpected[:1],
},
"TestModifyNonExisting": {
test: func(cc *ConfigurationController, data []Configuration) error {
_, err := cc.Add(data...)
expectedConfig := Configuration{
Name: "Something else",
HostName: "Other.stuff",
Username: "NewUserName",
Port: 9090,
}
_ = expectedConfig
if err != nil {
return err
}
newConfig := expectedConfig
if newConfig, err = cc.Modify("NOT A REAL NAME", newConfig); err != DoesNotExistErr {
return failure{Prefix: "Modify modified non-existing config", Expected: DoesNotExistErr, Actual: err}
}
if newConfigs, err := cc.Get(expectedConfig.Name); err != DoesNotExistErr || len(newConfigs) != 0 {
return failure{Prefix: "Config should not have been found:", Expected: DoesNotExistErr, Actual: err}
}
return nil
},
expected: baseExpected[:1],
},
}
func TestConfiguration(t *testing.T) {
cc := &ConfigurationController{SetupDB()}
for name, test := range tests {
if err := test.test(cc, test.expected); err != nil {
t.Errorf("%s Failed: %s", name, err.Error())
}
ResetDB(cc.DB)
}
}
|
package cloudid // import "yunion.io/x/onecloud/pkg/apis/cloudid"
|
package services
import (
"github.com/johnnyeven/chain/blockchain"
"github.com/johnnyeven/chain/messages"
"github.com/johnnyeven/chain/global"
"github.com/johnnyeven/terra/dht"
"github.com/johnnyeven/chain/network"
"github.com/sirupsen/logrus"
"github.com/boltdb/bolt"
"bytes"
"encoding/gob"
"errors"
"fmt"
)
var _ interface {
Service
} = (*BlockChainService)(nil)
var blockChainService *BlockChainService
type blockInTransport struct {
blocks *dht.SyncedMap
heightIndexedBlockHashed map[uint64][][]byte
sortedHeight []uint64
}
func (b *blockInTransport) Get(hash []byte) (*blockchain.Block, bool) {
val, ok := b.blocks.Get(string(hash))
return val.(*blockchain.Block), ok
}
func (b *blockInTransport) Has(hash []byte) bool {
return b.blocks.Has(string(hash))
}
func (b *blockInTransport) Set(block *blockchain.Block) {
b.blocks.Set(string(block.Header.Hash), block)
if _, ok := b.heightIndexedBlockHashed[block.Header.Height]; !ok {
b.sortedHeight = append(b.sortedHeight, block.Header.Height)
}
b.heightIndexedBlockHashed[block.Header.Height] = append(b.heightIndexedBlockHashed[block.Header.Height], block.Header.Hash)
}
func (b *blockInTransport) Delete(hash []byte) {
block, blockExist := b.Get(hash)
if hashes, ok := b.heightIndexedBlockHashed[block.Header.Height]; ok {
for i, h := range hashes {
if bytes.Compare(hash, h) == 0 {
b.heightIndexedBlockHashed[block.Header.Height] = append(b.heightIndexedBlockHashed[block.Header.Height][:i], b.heightIndexedBlockHashed[block.Header.Height][i+1:]...)
}
}
}
if blockExist {
b.blocks.Delete(string(hash))
}
}
func (b *blockInTransport) DeleteMulti(hashes [][]byte) {
for _, hash := range hashes {
b.Delete(hash)
}
}
func (b *blockInTransport) Clear() {
b.blocks.Clear()
}
func (b *blockInTransport) Iterator(iterator func(block *blockchain.Block) error, errorContinue bool) {
Run:
for _, height := range b.sortedHeight {
hashes := b.heightIndexedBlockHashed[height]
for _, hash := range hashes {
block, ok := b.Get(hash)
if ok {
err := iterator(block)
if err != nil {
if errorContinue {
continue
} else {
break Run
}
}
}
}
}
}
func (b *blockInTransport) Len() int {
return b.blocks.Len()
}
func newBlockInTransport() *blockInTransport {
return &blockInTransport{
blocks: dht.NewSyncedMap(),
heightIndexedBlockHashed: make(map[uint64][][]byte),
sortedHeight: make([]uint64, 0),
}
}
type BlockChainService struct {
c *blockchain.BlockChain
blockInTransport *blockInTransport
signalQuit chan struct{}
signalRequestHeight chan struct{}
signalSendTransaction chan *blockchain.Transaction
}
func NewBlockChainService() Service {
if blockChainService == nil {
blockChainService = &BlockChainService{
c: blockchain.NewBlockChain(blockchain.Config{
NewGenesisBlockFunc: blockchain.NewGenesisBlock,
}),
blockInTransport: newBlockInTransport(),
signalQuit: make(chan struct{}),
signalRequestHeight: make(chan struct{}),
signalSendTransaction: make(chan *blockchain.Transaction),
}
chainState := blockchain.ChainState{BlockChain: blockChainService.c}
chainState.Reindex()
}
return blockChainService
}
func GetBlockChainService() *BlockChainService {
if blockChainService == nil {
NewBlockChainService()
}
return blockChainService
}
func (s *BlockChainService) GetTransChannel() chan<- *blockchain.Transaction {
return s.signalSendTransaction
}
func (s *BlockChainService) Messages() []messages.MessageHandler {
return []messages.MessageHandler{
{
Type: global.MESSAGE_TYPE__REQUEST_HEIGHT,
Runner: s.RunRequestHeight,
},
{
Type: global.MESSAGE_TYPE__BLOCKS_HASH,
Runner: s.RunBlocksHash,
},
{
Type: global.MESSAGE_TYPE__GET_BLOCK,
Runner: s.RunGetBlock,
},
{
Type: global.MESSAGE_TYPE__GET_BLOCK_ACK,
Runner: s.RunGetBlockAck,
},
{
Type: global.MESSAGE_TYPE__NEW_TRANSACTION,
Runner: s.RunNewTransaction,
},
}
}
func (s *BlockChainService) Start() error {
go func() {
Run:
for {
select {
case <-s.signalQuit:
break Run
case <-s.signalRequestHeight:
go RequestHeight(s.c, 0)
case tran := <-s.signalSendTransaction:
BroadcastTran(tran)
}
}
}()
return nil
}
func (s *BlockChainService) Stop() error {
s.signalQuit <- struct{}{}
close(s.signalQuit)
return nil
}
func (s *BlockChainService) RunRequestHeight(t *dht.Transport, msg *messages.Message) error {
payload := &messages.RequestHeight{}
err := payload.DecodeFromSource(msg.Payload)
if err != nil {
return err
}
peer := t.GetClient().(*network.ChainProtobufClient).GetPeer()
currentHeight := s.c.GetBestHeight()
if payload.Height > currentHeight {
// 对方区块比我方更新,请求对方的区块
message := &messages.RequestHeight{
Height: currentHeight,
Version: global.Config.Version,
}
request := t.MakeResponse(peer.Guid, peer.Node.Addr, msg.MessageID, message)
t.Request(request)
} else if payload.Height < currentHeight {
// 我方区块比对方更新,发送给对方缺失的区块哈希
blockHashes := make([][]byte, 0)
it := s.c.Iterator()
// TODO 优化算法,不用遍历整条链
for {
block := it.Next()
if block == nil {
break
}
if block.Header.Height >= payload.Height {
blockHashes = append(blockHashes, block.Header.Hash)
}
if block.Header.PrevBlockHash == nil || len(block.Header.PrevBlockHash) == 0 {
break
}
}
message := &messages.BlocksHash{
Hashes: blockHashes,
}
request := t.MakeResponse(peer.Guid, peer.Node.Addr, msg.MessageID, message)
t.Request(request)
}
return nil
}
func (s *BlockChainService) RunBlocksHash(t *dht.Transport, msg *messages.Message) error {
payload := &messages.BlocksHash{}
err := payload.DecodeFromSource(msg.Payload)
if err != nil {
return err
}
peer := t.GetClient().(*network.ChainProtobufClient).GetPeer()
for _, hash := range payload.Hashes {
blockExist := s.c.GetBlock(hash)
if blockExist != nil {
continue
}
message := &messages.GetBlock{
Hash: hash,
}
request := t.MakeRequest(peer.Guid, peer.Node.Addr, "", message)
t.Request(request)
}
return nil
}
func (s *BlockChainService) RunGetBlock(t *dht.Transport, msg *messages.Message) error {
payload := &messages.GetBlock{}
err := payload.DecodeFromSource(msg.Payload)
if err != nil {
logrus.Errorf("[RunGetBlock] payload.DecodeFromSource err: %v", err)
return err
}
block := s.c.GetBlock(payload.Hash)
message := &messages.GetBlockAck{
Block: block.Serialize(),
}
peer := t.GetClient().(*network.ChainProtobufClient).GetPeer()
request := t.MakeResponse(peer.Guid, peer.Node.Addr, msg.MessageID, message)
t.Request(request)
return nil
}
func (s *BlockChainService) RunGetBlockAck(t *dht.Transport, msg *messages.Message) error {
payload := &messages.GetBlockAck{}
err := payload.DecodeFromSource(msg.Payload)
if err != nil {
return err
}
block := blockchain.DeserializeBlock(payload.Block)
logrus.Debugf("received a new block: %x", block.Header.Hash)
s.verifyAndAddBlock(block, msg)
s.blockInTransport.Iterator(func(b *blockchain.Block) error {
if b == block {
return nil
}
s.verifyAndAddBlock(b, msg)
return nil
}, true)
return nil
}
func (s *BlockChainService) verifyAndAddBlock(block *blockchain.Block, msg *messages.Message) {
err := s.c.DB.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(global.ChainStateBucketIdentity))
decoder := gob.NewDecoder(bytes.NewReader(block.Body.Data))
trans := make(blockchain.TransactionContainer, 0)
err := decoder.Decode(&trans)
if err != nil {
logrus.Panicf("RunGetBlockAck error: block data cant be decoded")
}
for _, tran := range trans {
if tran.IsCoinBase() {
continue
}
for _, input := range tran.Inputs {
serializedOutputs := bucket.Get(input.TransactionID)
if serializedOutputs == nil || len(serializedOutputs) == 0 {
if !s.blockInTransport.Has(block.Header.Hash) {
s.blockInTransport.Set(block)
}
return errors.New(fmt.Sprintf("%x block's trans not found, set into map", block.Header.Hash))
}
}
}
return nil
})
if err != nil {
logrus.Warningf("RunGetBlockAck error: %v", err)
return
}
if s.blockInTransport.Has(block.Header.Hash) {
s.blockInTransport.Delete(block.Header.Hash)
}
ok := s.c.AddBlock(block)
if ok {
chainState := blockchain.ChainState{BlockChain: s.c}
chainState.Update(block)
BroadcastBlock(block, msg)
}
}
func (s *BlockChainService) RunNewTransaction(t *dht.Transport, msg *messages.Message) error {
payload := &messages.NewTransaction{}
err := payload.DecodeFromSource(msg.Payload)
if err != nil {
return err
}
tran := blockchain.DeserializeTransaction(payload.Transaction)
go func() {
trans := make(blockchain.TransactionContainer, 0)
trans = append(trans, tran)
trans = append(trans, blockchain.NewCoinbaseTransaction(global.Config.ReceiveAddress, ""))
for _, tran := range trans {
if !blockchain.VerifyTransaction(s.c, &tran) {
logrus.Warningf("invalid transaction: %s", tran.ID)
}
}
block := s.c.PackageBlock(trans.Serialize())
BroadcastBlock(block, msg)
}()
return nil
}
func BroadcastBlock(block *blockchain.Block, msg *messages.Message) {
network.P2P.GetPeerManager().Iterator(func(peer *network.Peer) error {
message := &messages.BlocksHash{
Hashes: [][]byte{block.Header.Hash},
}
t := peer.GetTransport()
request := t.MakeResponse(peer.Guid, peer.Node.Addr, msg.MessageID, message)
t.Request(request)
return nil
}, true)
}
func BroadcastTran(tran *blockchain.Transaction) {
network.P2P.GetPeerManager().Iterator(func(peer *network.Peer) error {
message := &messages.NewTransaction{
Transaction: tran.Serialize(),
}
t := peer.GetTransport()
request := t.MakeRequest(peer.Guid, peer.Node.Addr, "", message)
t.Request(request)
return nil
}, true)
}
func RequestHeight(c *blockchain.BlockChain, height uint64) {
network.P2P.GetPeerManager().Iterator(func(peer *network.Peer) error {
if height == 0 {
height = c.GetBestHeight()
}
message := &messages.RequestHeight{
Height: height,
Version: global.Config.Version,
}
t := peer.GetTransport()
request := t.MakeRequest(peer.Guid, peer.Node.Addr, "", message)
t.Request(request)
return nil
}, true)
}
func RequestHeightTask() {
service := GetBlockChainService()
if service != nil {
service.signalRequestHeight <- struct{}{}
}
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package scanapp
import (
"context"
"path/filepath"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/bundles/cros/scanapp/scanning"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/scanapp"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/printing/document"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: MultiPageScan,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Tests that the Scan app can be used to perform multi-page flatbed PDF scans",
Contacts: []string{
"cros-peripherals@google.com",
"project-bolton@google.com",
},
Attr: []string{
"group:mainline",
"informational",
"group:paper-io",
"paper-io_scanning",
},
SoftwareDeps: []string{"chrome", "virtual_usb_printer"},
Fixture: "virtualUsbPrinterModulesLoaded",
Data: []string{
scanning.SourceImage,
singlePagePdfGoldenFile,
twoPagePdfGoldenFile,
},
})
}
const (
singlePagePdfGoldenFile = "multi_page_flatbed_single_page.pdf"
twoPagePdfGoldenFile = "multi_page_flatbed_two_page.pdf"
)
var multiPageScanTests = []struct {
name string
removePage bool
rescanPage bool
goldenFile string
}{{
name: "multi_page_base",
removePage: false,
rescanPage: false,
goldenFile: twoPagePdfGoldenFile,
}, {
name: "multi_page_remove_page",
removePage: true,
rescanPage: false,
goldenFile: singlePagePdfGoldenFile,
}, {
name: "multi_page_rescan_page",
removePage: false,
rescanPage: true,
goldenFile: twoPagePdfGoldenFile,
},
}
func MultiPageScan(ctx context.Context, s *testing.State) {
// Use cleanupCtx for any deferred cleanups in case of timeouts or
// cancellations on the shortened context.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
crWithFeature, err := chrome.New(ctx, chrome.EnableFeatures("ScanAppMultiPageScan"))
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
defer crWithFeature.Close(cleanupCtx) // Close our own chrome instance
cr := crWithFeature
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to connect Test API: ", err)
}
defer faillog.DumpUITreeOnError(cleanupCtx, s.OutDir(), s.HasError, tconn)
printer, err := scanapp.StartPrinter(ctx, tconn)
if err != nil {
s.Fatal("Failed to attach virtual printer: ", err)
}
defer func(ctx context.Context) {
if err := printer.Stop(ctx); err != nil {
s.Error("Failed to stop printer: ", err)
}
}(cleanupCtx)
// Launch the Scan app, configure the settings, and perform scans.
app, err := scanapp.Launch(ctx, tconn)
if err != nil {
s.Fatal("Failed to launch app: ", err)
}
if err := app.ClickMoreSettings()(ctx); err != nil {
s.Fatal("Failed to expand More settings: ", err)
}
if err := uiauto.Combine("set scan settings",
app.SetScanSettings(scanapp.ScanSettings{
Scanner: printer.VisibleName,
Source: scanapp.SourceFlatbed,
FileType: scanapp.FileTypePDF,
ColorMode: scanapp.ColorModeColor,
PageSize: scanapp.PageSizeLetter,
Resolution: scanapp.Resolution300DPI,
}),
app.ClickMultiPageScanCheckbox(),
)(ctx); err != nil {
s.Fatal("Failed to set scan settings: ", err)
}
myFilesPath, err := cryptohome.MyFilesPath(ctx, cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to retrieve users MyFiles path: ", err)
}
defaultScanPattern := filepath.Join(myFilesPath, scanapp.DefaultScanFilePattern)
for _, test := range multiPageScanTests {
s.Run(ctx, test.name, func(ctx context.Context, s *testing.State) {
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "ui_tree_multi_page_scan")
defer func() {
if err := scanapp.RemoveScans(defaultScanPattern); err != nil {
s.Error("Failed to remove scans: ", err)
}
}()
// Make sure printer connected notifications don't cover the Scan button.
if err := ash.CloseNotifications(ctx, tconn); err != nil {
s.Fatal("Failed to close notifications: ", err)
}
// Start a multi-page scan session and scan 2 pages.
if err := uiauto.Combine("multi-page scan",
app.MultiPageScan( /*PageNumber=*/ 1),
app.MultiPageScan( /*PageNumber=*/ 2),
)(ctx); err != nil {
s.Fatal("Failed to perform multi-page scan: ", err)
}
if test.removePage {
if err := app.RemovePage()(ctx); err != nil {
s.Fatal("Failed to remove page from scan: ", err)
}
}
if test.rescanPage {
if err := app.RescanPage()(ctx); err != nil {
s.Fatal("Failed to rescan page in scan: ", err)
}
}
// Click save to create the final PDF and compare it to the golden file.
if err := uiauto.Combine("save scan",
app.ClickSave(),
app.ClickDone(),
)(ctx); err != nil {
s.Fatal("Failed to save scan scan: ", err)
}
scan, err := scanapp.GetScan(defaultScanPattern)
if err != nil {
s.Fatal("Failed to find scan: ", err)
}
diffPath := filepath.Join(s.OutDir(), "multi_page_scan_diff.txt")
if err := document.CompareFiles(ctx, scan, s.DataPath(test.goldenFile), diffPath); err != nil {
s.Error("Scan differs from golden file: ", err)
}
})
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"github.com/google/gapid/core/text/parse/cst"
"github.com/google/gapid/gapil/ast"
)
// { annotation } 'class' identifer '{' { field } '}'
func (p *parser) class(b *cst.Branch, a *ast.Annotations) *ast.Class {
if !p.peekKeyword(ast.KeywordClass) {
return nil
}
c := &ast.Class{}
consumeAnnotations(&c.Annotations, a)
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(c, b)
p.requireKeyword(ast.KeywordClass, b)
c.Name = p.requireIdentifier(b)
p.requireOperator(ast.OpBlockStart, b)
for !p.operator(ast.OpBlockEnd, b) {
c.Fields = append(c.Fields, p.requireField(b, nil))
}
})
return c
}
// { annotation } type identifier [ '=' expression ] [ ',' ]
func (p *parser) requireField(b *cst.Branch, a *ast.Annotations) *ast.Field {
f := &ast.Field{}
consumeAnnotations(&f.Annotations, a)
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(f, b)
p.parseAnnotations(&f.Annotations, b)
f.Type = p.requireTypeRef(b)
f.Name = p.requireIdentifier(b)
if p.operator(ast.OpAssign, b) {
f.Default = p.requireExpression(b)
}
p.operator(ast.OpListSeparator, b)
})
return f
}
// api_index number
func (p *parser) apiIndex(b *cst.Branch, a *ast.Annotations) *ast.Number {
if !p.peekKeyword(ast.KeywordApiIndex) {
return nil
}
if len(*a) != 0 {
cst := p.mappings.CST((*a)[0])
p.ErrorAt(cst, "Annotation on api_index not allowed")
return nil
}
var f *ast.Number
p.ParseBranch(b, func(b *cst.Branch) {
p.requireKeyword(ast.KeywordApiIndex, b)
f = p.requireNumber(b)
})
return f
}
// { annotation } 'define' identifier expression
func (p *parser) definition(b *cst.Branch, a *ast.Annotations) *ast.Definition {
if !p.peekKeyword(ast.KeywordDefine) {
return nil
}
d := &ast.Definition{}
consumeAnnotations(&d.Annotations, a)
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(d, b)
p.requireKeyword(ast.KeywordDefine, b)
d.Name = p.requireIdentifier(b)
d.Expression = p.requireExpression(b)
})
return d
}
// { annotation } ( 'enum' | 'bitfield' ) name [ : type ] '{' { identifier '=' expression [ ',' ] } '}'
func (p *parser) enum(b *cst.Branch, a *ast.Annotations) *ast.Enum {
if !p.peekKeyword(ast.KeywordEnum) && !p.peekKeyword(ast.KeywordBitfield) {
return nil
}
s := &ast.Enum{}
consumeAnnotations(&s.Annotations, a)
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(s, b)
if p.keyword(ast.KeywordEnum, b) == nil {
p.requireKeyword(ast.KeywordBitfield, b)
s.IsBitfield = true
}
s.Name = p.requireIdentifier(b)
if p.operator(ast.OpExtends, b) {
s.NumberType = p.requireTypeRef(b)
}
p.requireOperator(ast.OpBlockStart, b)
for !p.operator(ast.OpBlockEnd, b) {
p.ParseBranch(b, func(b *cst.Branch) {
entry := &ast.EnumEntry{}
p.mappings.Add(entry, b)
entry.Name = p.requireIdentifier(b)
p.requireOperator(ast.OpAssign, b)
entry.Value = p.requireNumber(b)
p.operator(ast.OpListSeparator, b)
s.Entries = append(s.Entries, entry)
})
}
})
return s
}
// { annotation } 'type' type identifier
func (p *parser) pseudonym(b *cst.Branch, a *ast.Annotations) *ast.Pseudonym {
if !p.peekKeyword(ast.KeywordPseudonym) {
return nil
}
s := &ast.Pseudonym{}
consumeAnnotations(&s.Annotations, a)
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(s, b)
p.requireKeyword(ast.KeywordPseudonym, b)
s.To = p.requireTypeRef(b)
s.Name = p.requireIdentifier(b)
})
return s
}
// [const] generic [.name] { extend_type }
func (p *parser) typeRef(b *cst.Branch) ast.Node {
var ref ast.Node
if p.peekKeyword(ast.KeywordConst) {
c := &ast.PreConst{}
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(c, b)
p.requireKeyword(ast.KeywordConst, b)
el := p.requireTypeBase(b)
ptr := &ast.PointerType{To: el}
p.Extend(p.mappings.CST(el), func(b *cst.Branch) {
p.mappings.Add(ptr, b)
p.requireOperator(ast.OpPointer, b)
})
c.Type = ptr
})
ref = c
} else {
ref = p.typeBase(b)
}
if ref == nil {
return nil
}
for {
if t := p.extendTypeRef(ref); t != nil {
ref = t
} else {
break
}
}
return ref
}
// generic [.name]
func (p *parser) typeBase(b *cst.Branch) ast.Node {
g := p.generic(b)
if g == nil {
return nil
}
if p.peekOperator(ast.OpMember) {
t := &ast.Imported{From: g.Name}
p.Extend(p.mappings.CST(g), func(b *cst.Branch) {
p.mappings.Add(t, b)
p.requireOperator(ast.OpMember, b)
t.Name = p.requireIdentifier(b)
})
return t
}
return g
}
func (p *parser) requireTypeBase(b *cst.Branch) ast.Node {
t := p.typeBase(b)
if t == nil {
p.Expected("type")
return ast.InvalidGeneric
}
return t
}
// ref ( pointer_type | static_array_type )
func (p *parser) extendTypeRef(ref ast.Node) ast.Node {
if e := p.extendPointerType(ref); e != nil {
return e
}
if s := p.indexedType(ref); s != nil {
return s
}
return nil
}
func (p *parser) requireTypeRef(b *cst.Branch) ast.Node {
t := p.typeRef(b)
if t == nil {
p.Expected("type reference")
return ast.InvalidGeneric
}
return t
}
// lhs_type ['const'] '*'
func (p *parser) extendPointerType(ref ast.Node) *ast.PointerType {
if !p.peekOperator(ast.OpPointer) && !p.peekKeyword(ast.KeywordConst) {
return nil
}
t := &ast.PointerType{To: ref}
p.Extend(p.mappings.CST(ref), func(b *cst.Branch) {
p.mappings.Add(t, b)
t.Const = p.keyword(ast.KeywordConst, b) != nil
p.requireOperator(ast.OpPointer, b)
})
return t
}
// lhs_type '[' [ expression ] ']'
func (p *parser) indexedType(ref ast.Node) *ast.IndexedType {
if !p.peekOperator(ast.OpIndexStart) {
return nil
}
t := &ast.IndexedType{ValueType: ref}
p.Extend(p.mappings.CST(ref), func(b *cst.Branch) {
p.mappings.Add(t, b)
p.requireOperator(ast.OpIndexStart, b)
if !p.peekOperator(ast.OpIndexEnd) {
t.Index = p.requireExpression(b)
}
p.requireOperator(ast.OpIndexEnd, b)
})
return t
}
|
package virus_handler
import (
"decept-defense/controllers/comm"
"decept-defense/models"
"decept-defense/pkg/app"
"decept-defense/pkg/util"
"github.com/gin-gonic/gin"
"net/http"
)
func CreateVirusRecord(c *gin.Context) {
appG := app.Gin{C: c}
var record models.VirusRecord
err := c.ShouldBindJSON(&record)
if err != nil{
appG.Response(http.StatusOK, app.InvalidParams, nil)
return
}
record.CreateTime = util.GetCurrentTime()
err = record.CreateVirusRecord()
if err != nil{
appG.Response(http.StatusOK, app.ErrorCreateVirusRecord, nil)
return
}
appG.Response(http.StatusOK, app.SUCCESS, nil)
}
func SelectVirusRecord(c *gin.Context) {
appG := app.Gin{C: c}
var payload comm.SelectVirusPayload
var record models.VirusRecord
err := c.ShouldBindJSON(&payload)
if err != nil{
appG.Response(http.StatusOK, app.InvalidParams, nil)
return
}
data, err := record.GetVirusRecord(&payload)
if err != nil{
appG.Response(http.StatusOK, app.ErrorSelectVirusRecord, nil)
return
}
appG.Response(http.StatusOK, app.SUCCESS, data)
} |
package logr
import (
"testing"
"github.com/go-logr/logr"
)
func TestLogger(t *testing.T) {
var logger interface{} = New(nil)
if _, ok := logger.(logr.Logger); !ok {
t.Error("Logger does not implement the logr.Logger interface")
}
}
|
package eval
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/function"
"github.com/zclconf/go-cty/cty/function/stdlib"
ac "github.com/avenga/couper/accesscontrol"
"github.com/avenga/couper/config/request"
"github.com/avenga/couper/eval/lib"
"github.com/avenga/couper/internal/seetie"
)
type Roundtrip interface {
Context() context.Context
Cookies() []*http.Cookie
}
type ContextMap map[string]cty.Value
func (m ContextMap) Merge(other ContextMap) ContextMap {
for k, v := range other {
m[k] = v
}
return m
}
func NewENVContext(src []byte) *hcl.EvalContext {
envKeys := decodeEnvironmentRefs(src)
variables := make(map[string]cty.Value)
variables["env"] = newCtyEnvMap(envKeys)
return &hcl.EvalContext{
Variables: variables,
Functions: newFunctionsMap(),
}
}
func NewHTTPContext(baseCtx *hcl.EvalContext, req, bereq *http.Request, beresp *http.Response) *hcl.EvalContext {
if req == nil {
return baseCtx
}
evalCtx := cloneContext(baseCtx)
httpCtx := req.Context()
reqCtxMap := ContextMap{}
if endpoint, ok := httpCtx.Value(request.Endpoint).(string); ok {
reqCtxMap["endpoint"] = cty.StringVal(endpoint)
}
var id string
if uid, ok := httpCtx.Value(request.UID).(string); ok {
id = uid
}
evalCtx.Variables["req"] = cty.ObjectVal(reqCtxMap.Merge(ContextMap{
"id": cty.StringVal(id),
"method": cty.StringVal(req.Method),
"path": cty.StringVal(req.URL.Path),
"url": cty.StringVal(newRawURL(req.URL).String()),
"query": seetie.ValuesMapToValue(req.URL.Query()),
"post": seetie.ValuesMapToValue(parseForm(req).PostForm),
}.Merge(newVariable(httpCtx, req.Cookies(), req.Header))))
if beresp != nil {
evalCtx.Variables["bereq"] = cty.ObjectVal(ContextMap{
"method": cty.StringVal(bereq.Method),
"path": cty.StringVal(bereq.URL.Path),
"url": cty.StringVal(newRawURL(bereq.URL).String()),
"query": seetie.ValuesMapToValue(bereq.URL.Query()),
"post": seetie.ValuesMapToValue(parseForm(bereq).PostForm),
}.Merge(newVariable(httpCtx, bereq.Cookies(), bereq.Header)))
evalCtx.Variables["beresp"] = cty.ObjectVal(ContextMap{
"status": cty.StringVal(strconv.Itoa(beresp.StatusCode)),
}.Merge(newVariable(httpCtx, beresp.Cookies(), beresp.Header)))
}
return evalCtx
}
const defaultMaxMemory = 32 << 20 // 32 MB
type readCloser struct {
io.Reader
closer io.Closer
}
func newReadCloser(r io.Reader, c io.Closer) *readCloser {
return &readCloser{Reader: r, closer: c}
}
func (rc readCloser) Close() error {
return rc.closer.Close()
}
// parseForm populates the request PostForm field.
// As Proxy we should not consume the request body.
// Create a copy, buffer and reset via GetBody method.
func parseForm(r *http.Request) *http.Request {
if r.Body == nil {
return r
}
switch r.Method {
case http.MethodPut, http.MethodPatch, http.MethodPost:
if r.GetBody == nil {
bodyBytes, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
r.GetBody = func() (io.ReadCloser, error) {
return newReadCloser(bytes.NewBuffer(bodyBytes), r.Body), nil
}
}
r.Body, _ = r.GetBody()
_ = r.ParseMultipartForm(defaultMaxMemory)
r.Body, _ = r.GetBody()
}
return r
}
func newRawURL(u *url.URL) *url.URL {
rawURL := *u
rawURL.RawQuery = ""
rawURL.Fragment = ""
return &rawURL
}
func cloneContext(ctx *hcl.EvalContext) *hcl.EvalContext {
c := &hcl.EvalContext{
Variables: make(map[string]cty.Value),
Functions: make(map[string]function.Function),
}
for key, val := range ctx.Variables {
c.Variables[key] = val
}
for key, val := range ctx.Functions {
c.Functions[key] = val
}
return c
}
func newVariable(ctx context.Context, cookies []*http.Cookie, headers http.Header) ContextMap {
jwtClaims, _ := ctx.Value(ac.ContextAccessControlKey).(map[string]interface{})
ctxAcMap := make(map[string]cty.Value)
for name, data := range jwtClaims {
dataMap, ok := data.(ac.Claims)
if !ok {
continue
}
ctxAcMap[name] = seetie.MapToValue(dataMap)
}
var ctxAcMapValue cty.Value
if len(ctxAcMap) > 0 {
ctxAcMapValue = cty.MapVal(ctxAcMap)
} else {
ctxAcMapValue = cty.MapValEmpty(cty.String)
}
return map[string]cty.Value{
"ctx": ctxAcMapValue,
"cookies": seetie.CookiesToMapValue(cookies),
"headers": seetie.HeaderToMapValue(headers),
}
}
func newCtyEnvMap(envKeys []string) cty.Value {
if len(envKeys) == 0 {
return cty.MapValEmpty(cty.String)
}
ctyMap := make(map[string]cty.Value)
for _, key := range envKeys {
if _, ok := ctyMap[key]; !ok {
ctyMap[key] = cty.StringVal(os.Getenv(key))
}
}
return cty.MapVal(ctyMap)
}
// Functions
func newFunctionsMap() map[string]function.Function {
return map[string]function.Function{
"base64_decode": lib.Base64DecodeFunc,
"base64_encode": lib.Base64EncodeFunc,
"to_upper": stdlib.UpperFunc,
"to_lower": stdlib.LowerFunc,
}
}
func decodeEnvironmentRefs(src []byte) []string {
tokens, diags := hclsyntax.LexConfig(src, "tmp.hcl", hcl.InitialPos)
if diags.HasErrors() {
panic(diags)
}
needle := []byte("env")
var keys []string
for i, token := range tokens {
if token.Type == hclsyntax.TokenIdent &&
bytes.Equal(token.Bytes, needle) &&
i+2 < len(tokens) {
value := string(tokens[i+2].Bytes)
if sort.SearchStrings(keys, value) == len(keys) {
keys = append(keys, value)
}
}
}
return keys
}
|
package entry
import (
"shared/common"
"shared/utility/errors"
"shared/utility/transfer"
"sync"
)
type PatchCfg struct {
Channel string `json:"channel"`
AppVersion string `json:"app_version"`
ResourceUrl []string `json:"resource_url"`
ResourceVersion string `json:"resource_version"`
}
type Patches struct {
sync.RWMutex
Patches map[string]map[string]*PatchCfg
}
func NewPatches() *Patches {
return &Patches{}
}
func (p *Patches) Reload(config *Config) error {
p.Lock()
defer p.Unlock()
patches := map[string]map[string]*PatchCfg{}
for _, patchCSV := range config.PatchListConfig.GetAllData() {
patchCfg := &PatchCfg{}
err := transfer.Transfer(patchCSV, patchCfg)
if err != nil {
return errors.WrapTrace(err)
}
channel := patchCfg.Channel
channelPatches, ok := patches[channel]
if !ok {
channelPatches = map[string]*PatchCfg{}
patches[channel] = channelPatches
}
channelPatches[patchCfg.AppVersion] = patchCfg
}
p.Patches = patches
return nil
}
func (p *Patches) GetPatchCfg(channel, appVersion string) (*PatchCfg, error) {
channelPatches, ok := p.Patches[channel]
if !ok {
return nil, common.ErrPatchCfgNotFound
}
patchCfg, ok := channelPatches[appVersion]
if !ok {
return nil, common.ErrPatchCfgNotFound
}
return patchCfg, nil
}
|
package utils
import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"time"
)
// GetExecutablePath gets the path of the current executable.
func GetExecutablePath() (string, error) {
ex, err := os.Executable()
if err != nil {
return "", err
}
return filepath.Dir(ex), nil
}
// GetImageFromFilePath Reads the a file path and returns its content as bytes
func GetImageFromFilePath(filePath string) []byte {
file, err := os.Open(filePath)
if err != nil {
fmt.Println(err)
return nil
}
defer file.Close()
fileInfo, _ := file.Stat()
var size int64 = fileInfo.Size()
bytes := make([]byte, size)
// read file into bytes
buffer := bufio.NewReader(file)
_, err = buffer.Read(bytes) // <--------------- here!
return bytes
}
// SafeJoinPaths joins `path1` and `path2` and applies the correct separator.
func SafeJoinPaths(path1, path2 string) string {
return filepath.FromSlash(filepath.Join(path1, path2))
}
// StringToInt converts a string to an int, and if the conversion fails, it uses the `defaultValue`.
func StringToInt(str string, defaultValue int) int {
result, err := strconv.Atoi(str)
if err != nil {
return defaultValue
}
return result
}
// IntToString Converts Int to String (base 10)
func IntToString(value int) string {
return strconv.FormatInt(int64(value), 10)
}
// ConvertJSONToStruct converts a json string to a struct
func ConvertJSONToStruct(text []byte, model interface{}) error {
err := json.Unmarshal(text, &model)
if err != nil {
return err
}
return nil
}
// LoadJSONFromFile loads a JSON file from `path` and populated the given `model`.
func LoadJSONFromFile(path string, model interface{}) error {
content, err := ioutil.ReadFile(path)
if err != nil {
return err
}
err = json.Unmarshal(content, &model)
if err != nil {
return err
}
return nil
}
// BytesToString casts bytes to string
func BytesToString(data []byte) string {
return string(data[:])
}
// WriteJSONToFile writes the given `model` struct as JSON to the file at `path`
func WriteJSONToFile(path string, model interface{}) error {
file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.ModePerm)
if err != nil {
return err
}
defer file.Close()
err = file.Truncate(0)
if err != nil {
return err
}
_, err = file.Seek(0, 0)
if err != nil {
return err
}
encoder := json.NewEncoder(file)
err = encoder.Encode(model)
if err != nil {
return err
}
return nil
}
// ParseConfig reads a `config.json` file on the same directory as the executable and parses its JSON to the given model.
func ParseConfig(model interface{}) error {
path := SafeJoinPaths("./", "config.json")
return LoadJSONFromFile(path, model)
}
// FolderOrFileExists - Check if folder and file exists
func FolderOrFileExists(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
}
return true
}
// LogError logs a error inside a folder "logs" in the same location as the executable
func LogError(text string) error {
if !FolderOrFileExists("./logs") {
//Create a folder/directory at a full qualified path
err := os.Mkdir("logs", 0755)
if err != nil {
fmt.Println(err.Error())
return err
}
}
path := "logs/" + "logs-" + time.Now().UTC().Format("2006-01-02") + ".txt"
f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
fmt.Println(err.Error())
return err
}
if err != nil {
fmt.Println(err.Error())
return err
}
defer f.Close()
if _, err := f.WriteString(time.Now().Format(time.RFC3339) + " - " + text + "\n"); err != nil {
fmt.Println(err.Error())
return err
}
return nil
}
|
package employee
type position int
const (
Developer position = iota
Manager
Boss
)
func NewEmployee(position position) *employee {
switch position {
case Developer:
return &employee{position: "developer", annualIncome: 60000}
case Manager:
return &employee{position: "manager", annualIncome: 80000}
case Boss:
return &employee{position: "boss", annualIncome: 100000}
}
return nil
}
type employee struct {
name, position string
annualIncome int
}
func (e *employee) SetName(name string) {
e.name = name
}
func (e *employee) Name() string {
return e.name
}
func (e *employee) Position() string {
return e.position
}
|
package main
import (
"encoding/json"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"io/ioutil"
"net/http"
"time"
)
type terminationCollector struct {
metadataEndpoint string
terminationIndicator *prometheus.Desc
terminationTime *prometheus.Desc
}
type InstanceAction struct {
Action string `json:"action"`
Time time.Time `json:"time"`
}
var (
labels = []string {
"availability_zone",
"hostname",
"instance_id",
"instance_type",
}
)
func (c *terminationCollector) getMetadata(path string) (string, error) {
timeout := time.Duration(1 * time.Second)
client := http.Client{
Timeout: timeout,
}
url := c.metadataEndpoint + path
idResp, err := client.Get(url)
if err != nil {
log.Errorf("error request metadata from %s: %s", url, err.Error())
return "", err
}
if idResp.StatusCode == 404 {
log.Errorf("endpoint %s not found", url)
return "", nil
}
defer idResp.Body.Close()
value, _ := ioutil.ReadAll(idResp.Body)
return string(value), nil
}
func NewTerminationCollector(me string) *terminationCollector {
return &terminationCollector{
metadataEndpoint: me,
terminationIndicator: prometheus.NewDesc("aws_instance_termination_imminent", "Instance is about to be terminated", append(labels, "instance_action"), nil),
terminationTime: prometheus.NewDesc("aws_instance_termination_in", "Instance will be terminated in", labels, nil),
}
}
func (c *terminationCollector) Describe(ch chan<- *prometheus.Desc) {
ch <- c.terminationIndicator
ch <- c.terminationTime
}
func (c *terminationCollector) Collect(ch chan<- prometheus.Metric) {
log.Info("Fetching termination data from metadata-service")
az, _ := c.getMetadata("placement/availability-zone")
hostname, _ := c.getMetadata("hostname")
instanceId, _ := c.getMetadata("instance-id")
instanceType, _ := c.getMetadata("instance-type")
action, err := c.getMetadata("spot/instance-action")
if err != nil {
log.Errorf("Failed to fetch data from metadata service: %s", err)
return
} else {
if action == "" {
log.Debug("instance-action endpoint not found")
ch <- prometheus.MustNewConstMetric(c.terminationIndicator, prometheus.GaugeValue, 0, az, hostname, instanceId, instanceType, action)
return
} else {
var ia = InstanceAction{}
err = json.Unmarshal([]byte(action), &ia)
// value may be present but not be a time according to AWS docs,
// so parse error is not fatal
if err != nil {
log.Errorf("Couldn't parse instance-action metadata: %s", err)
ch <- prometheus.MustNewConstMetric(c.terminationIndicator, prometheus.GaugeValue, 0, az, hostname, instanceId, instanceType, "")
} else {
log.Infof("instance-action endpoint available, termination time: %v", ia.Time)
ch <- prometheus.MustNewConstMetric(c.terminationIndicator, prometheus.GaugeValue, 1, az, hostname, instanceId, instanceType, ia.Action)
delta := ia.Time.Sub(time.Now())
if delta.Seconds() > 0 {
ch <- prometheus.MustNewConstMetric(c.terminationTime, prometheus.GaugeValue, delta.Seconds(), az, hostname, instanceId, instanceType)
}
}
}
}
}
|
package oci8
/*
#include "oci8.go.h"
#cgo !noPkgConfig pkg-config: oci8
*/
import "C"
import (
"bytes"
"database/sql/driver"
"errors"
"fmt"
"unsafe"
)
// noPkgConfig is a Go tag for disabling using pkg-config and using environmental settings like CGO_CFLAGS and CGO_LDFLAGS instead
func freeBoundParameters(boundParameters []oci8bind) {
for _, col := range boundParameters {
if col.pbuf != nil {
switch col.kind {
case C.SQLT_CLOB, C.SQLT_BLOB:
freeDecriptor(col.pbuf, C.OCI_DTYPE_LOB)
case C.SQLT_TIMESTAMP:
freeDecriptor(col.pbuf, C.OCI_DTYPE_TIMESTAMP)
case C.SQLT_TIMESTAMP_TZ:
freeDecriptor(col.pbuf, C.OCI_DTYPE_TIMESTAMP_TZ)
case C.SQLT_TIMESTAMP_LTZ:
freeDecriptor(col.pbuf, C.OCI_DTYPE_TIMESTAMP_LTZ)
case C.SQLT_INTERVAL_DS:
freeDecriptor(col.pbuf, C.OCI_DTYPE_INTERVAL_DS)
case C.SQLT_INTERVAL_YM:
freeDecriptor(col.pbuf, C.OCI_DTYPE_INTERVAL_YM)
default:
C.free(col.pbuf)
}
col.pbuf = nil
}
}
}
func getInt64(p unsafe.Pointer) int64 {
return int64(*(*C.sb8)(p))
}
func getUint64(p unsafe.Pointer) uint64 {
return uint64(*(*C.sb8)(p))
}
// freeDecriptor calles C OCIDescriptorFree
func freeDecriptor(p unsafe.Pointer, dtype C.ub4) {
tptr := *(*unsafe.Pointer)(p)
C.OCIDescriptorFree(unsafe.Pointer(tptr), dtype)
}
// getError gets error from return value (sword) or OCIError
func getError(rv C.sword, err *C.OCIError) error {
switch rv {
case C.OCI_INVALID_HANDLE:
return errors.New("OCI_INVALID_HANDLE")
case C.OCI_SUCCESS_WITH_INFO:
return errors.New("OCI_SUCCESS_WITH_INFO")
case C.OCI_RESERVED_FOR_INT_USE:
return errors.New("OCI_RESERVED_FOR_INT_USE")
case C.OCI_NO_DATA:
return errors.New("OCI_NO_DATA")
case C.OCI_NEED_DATA:
return errors.New("OCI_NEED_DATA")
case C.OCI_STILL_EXECUTING:
return errors.New("OCI_STILL_EXECUTING")
case C.OCI_SUCCESS:
panic("ociGetError called with no error")
case C.OCI_ERROR:
errorCode, err := ociGetError(err)
switch errorCode {
/*
bad connection errors:
ORA-00028: your session has been killed
ORA-01012: Not logged on
ORA-01033: ORACLE initialization or shutdown in progress
ORA-01034: ORACLE not available
ORA-01089: immediate shutdown in progress - no operations are permitted
ORA-03113: end-of-file on communication channel
ORA-03114: Not Connected to Oracle
ORA-03135: connection lost contact
ORA-12528: TNS:listener: all appropriate instances are blocking new connections
ORA-12537: TNS:connection closed
*/
case 28, 1012, 1033, 1034, 1089, 3113, 3114, 3135, 12528, 12537:
return driver.ErrBadConn
}
return err
}
return fmt.Errorf("oracle return error code %d", rv)
}
// ociGetError gets error code and text
func ociGetError(err *C.OCIError) (int, error) {
var errorCode C.sb4
errorText := make([]byte, 1024)
C.OCIErrorGet(
unsafe.Pointer(err), // error handle
1, // status record number, starts from 1
nil, // sqlstate, not supported in release 8.x or later
&errorCode, // error code
(*C.OraText)(&errorText[0]), // error message text
1024, // size of the buffer provided in number of bytes
C.OCI_HTYPE_ERROR, // type of the handle (OCI_HTYPE_ERR or OCI_HTYPE_ENV)
)
index := bytes.IndexByte(errorText, 0)
return int(errorCode), errors.New(string(errorText[:index]))
}
// CByte comverts byte slice to C char
func CByte(b []byte) *C.char {
p := C.malloc(C.size_t(len(b)))
pp := (*[1 << 30]byte)(p)
copy(pp[:], b)
return (*C.char)(p)
}
|
package snowflake
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/derry6/gleafd/pkg/log"
)
type Service struct {
md Metadata
stor Storage
fs chan Factory // 使用chan 代替使用锁
logger log.Logger
wg sync.WaitGroup
closed int32 // 退出标记
closeC chan struct{}
}
func (s *Service) start() error {
// 第一次启动
if err := s.update(); err != nil {
return err
}
s.wg.Add(1)
go func() {
defer s.wg.Done()
if err := s.run(); err != nil {
}
}()
return nil
}
func (s *Service) isValidMachineID(id int) bool {
return id >= 0 && id < 1023
}
func (s *Service) init() error {
// 从storage中读取
md, err := s.stor.GetOrNew(context.Background(), s.md.Name, s.md.Addr)
if err != nil {
return err
}
if s.isValidMachineID(md.MachineID) {
s.md.MachineID = md.MachineID
// 检查时间
if md.Timestamp > s.nowMs() {
return fmt.Errorf("last update time greate than current time")
}
return s.start()
} else {
return fmt.Errorf("invalid machine id: %v", s.md.MachineID)
}
}
func (s *Service) run() error {
timer := time.NewTicker(3 * time.Second)
for {
select {
case <-s.closeC:
return errors.New("service closed")
case <-timer.C:
if err := s.update(); err != nil {
}
}
}
}
func (s *Service) nowMs() int64 {
return time.Now().UnixNano() / 1000000
}
func (s *Service) update() error {
now := s.nowMs()
if s.md.Timestamp > now {
return nil
}
s.md.Timestamp = now
return s.stor.Update(context.Background(), s.md)
}
func (s *Service) Close() error {
if atomic.CompareAndSwapInt32(&s.closed, 0, 1) {
close(s.closeC)
close(s.fs)
s.wg.Wait()
}
return nil
}
func (s *Service) Get(ctx context.Context, biztag string, count int) (ids []int64, err error) {
if atomic.LoadInt32(&s.closed) == 1 {
return nil, errors.New("service closed")
}
gen := func() (id int64, er error) {
var f Factory
select {
case f = <-s.fs:
case <-ctx.Done():
return 0, ctx.Err()
}
defer func() {
select {
case s.fs <- f:
case <-ctx.Done():
id = 0
er = ctx.Err()
return
}
}()
return f.Next()
}
for i := 0; i < count; i++ {
id, err := gen()
if err != nil {
return nil, err
}
ids = append(ids, id)
}
return ids, nil
}
func NewService(name, addr string, storage Storage, logger log.Logger) *Service {
s := &Service{
md: Metadata{
Name: name,
Addr: addr,
},
stor: storage,
closeC: make(chan struct{}),
fs: make(chan Factory, 1),
logger: logger,
}
if err := s.init(); err != nil {
logger.Fatalw("New snowflake service", "err", err)
}
f, _ := NewFactory(s.md.MachineID)
s.fs <- f
return s
}
|
package structs
import (
"sync"
)
// Item is an interface for the nodes to be stored in a queue
type Item interface {
DeepCopy() Item
Equals(Item) bool
Priority() int
}
type node struct {
item Item
prev *node
next *node
}
func newNode(item Item) *node {
return &node{item: item.DeepCopy()}
}
// CompareFunc is a function to sort nodes in a queue
type CompareFunc func(a Item, b Item) bool
// SortedUniqueList is a kind of priority queues, whose nodes are sorted
// Also, uniqueness of the node is guaranteed
type SortedUniqueList struct {
nodes *node
lock sync.Mutex
compareFunc CompareFunc
}
// Add a node to the SortedUniqueList
func (q *SortedUniqueList) Add(item Item) {
q.lock.Lock()
defer q.lock.Unlock()
var prevPtr *node
nextPtr := q.nodes
if q.compareFunc != nil {
for nextPtr != nil {
// Sort
if q.compareFunc(item, nextPtr.item) {
break
}
prevPtr = nextPtr
nextPtr = nextPtr.next
}
}
if prevPtr != nil {
// Guarantee uniqueness
if item.Equals(prevPtr.item) {
return
}
}
if nextPtr != nil {
// Guarantee uniqueness
if item.Equals(nextPtr.item) {
return
}
}
n := newNode(item)
n.next = nextPtr
n.prev = prevPtr
if nextPtr != nil {
nextPtr.prev = n
}
if prevPtr != nil {
prevPtr.next = n
} else {
q.nodes = n
}
}
// First retrieves the first node in the queue
func (q *SortedUniqueList) First() Item {
q.lock.Lock()
defer q.lock.Unlock()
n := q.nodes
if n == nil {
return nil
}
return n.item
}
// IteratorFunc is a function to be used for each item in the queue
type IteratorFunc func(Item)
// ForEach runs IteratorFunc for each item in the queue
func (q *SortedUniqueList) ForEach(iteratorFunc IteratorFunc) {
q.lock.Lock()
defer q.lock.Unlock()
n := q.nodes
for n != nil {
iteratorFunc(n.item)
n = n.next
}
}
// Delete deletes a node from the queue
func (q *SortedUniqueList) Delete(i Item) {
q.lock.Lock()
defer q.lock.Unlock()
n := q.nodes
for n != nil {
if n.item.Equals(i) {
if n.next != nil {
n.next.prev = n.prev
}
if n.prev != nil {
n.prev.next = n.next
} else {
q.nodes = n.next
}
return
}
n = n.next
}
}
// Len returns the length of the queue
func (q *SortedUniqueList) Len() int {
i := 0
q.ForEach(func(_ Item) {
i++
})
return i
}
// NewSortedUniqueQueue is a constructor for the SortedUniqueList
func NewSortedUniqueQueue(compareFunc CompareFunc) *SortedUniqueList {
return &SortedUniqueList{lock: sync.Mutex{}, compareFunc: compareFunc}
}
|
package main
import (
"encoding/json"
"fmt"
"io"
"github.com/bloveless/tweetgo"
)
func statusesUpdate(c config, status string) {
tc := getTwitterClient(c)
input := tweetgo.StatusesUpdateInput{
Status: tweetgo.String(status),
}
output, err := tc.StatusesUpdatePost(input)
if err != nil {
panic(err)
}
fmt.Printf("\n\n%+v\n", output)
}
func listsList(c config) {
tc := getTwitterClient(c)
input := tweetgo.ListsListInput{}
output, err := tc.ListsListGet(input)
if err != nil {
panic(err)
}
fmt.Printf("\n\n%+v\n", output)
}
func listsMembers(c config) {
tc := getTwitterClient(c)
input := tweetgo.ListsMembersInput{
ListID: tweetgo.Int64(1130185227375038465),
Cursor: tweetgo.Int(-1),
SkipStatus: tweetgo.Bool(true),
}
output, err := tc.ListsMembersGet(input)
if err != nil {
panic(err)
}
fmt.Printf("\n\n%+v\n", output)
}
func listsMembersShow(c config) {
tc := getTwitterClient(c)
input := tweetgo.ListsMembersShowInput{
ListID: tweetgo.Int64(1130185227375038465),
SkipStatus: tweetgo.Bool(true),
ScreenName: tweetgo.String("twitterdev"),
}
output, err := tc.ListsMembersShowGet(input)
if err != nil {
panic(err)
}
fmt.Printf("\n\n%+v\n", output)
}
func streamTweets(c config, hashtag string) {
fmt.Printf("Beginning to stream #%s tweets\n", hashtag)
tc := getTwitterClient(c)
input := tweetgo.StatusesFilterInput{
Track: tweetgo.String(hashtag),
}
output, err := tc.StatusesFilterPostRaw(input)
if err != nil {
panic(err)
}
for {
tweet := tweetgo.StatusesFilterOutput{}
err := json.NewDecoder(output.Body).Decode(&tweet)
if err == io.EOF {
fmt.Println("End of file")
}
if err != nil {
panic(err)
}
fmt.Printf("%#v\n\n", tweet)
}
}
|
package main
import (
"context"
"errors"
"fmt"
"github.com/afex/hystrix-go/hystrix"
"github.com/gin-gonic/gin"
"github.com/micro/go-micro/v2"
"github.com/micro/go-micro/v2/registry"
"github.com/micro/go-micro/v2/web"
"github.com/micro/go-plugins/registry/consul/v2"
proto "go-micro-demos/circuitbreaker/hystrixdo/proto/house"
"log"
"net/http"
)
func main() {
consulReg := consul.NewRegistry(registry.Addrs("127.0.0.1:8500"))
// new service
service := micro.NewService(
micro.Name("go.micro.service.client.house"),
micro.Registry(consulReg),
)
// 这个 houseService 涉及到的是后面 rpc 的请求服务,houseService 起着http到rpc的连接作用
// http param -> gin webRouter() -> gin getHouse() -> houseService.GetHouse()
// 这里 hystrix 设置熔断也是对 GetHouse() 这个服务方法设置熔断,是对服务的熔断
houseService := proto.NewHouseService("go.micro.service.house", service.Client())
// 真正运行的服务
webServer := web.NewService(
web.Name("go.micro.web.house"),
web.Address(":8001"),
web.Handler(webRouter(houseService)),
web.Registry(consulReg),
)
webServer.Init()
if err := webServer.Run(); err != nil{
log.Fatal(err)
}
}
func webRouter(houseService proto.HouseService) *gin.Engine {
router := gin.Default()
router.Use(func(ctx *gin.Context) {
ctx.Set("houseService", houseService)
})
v1 := router.Group("/v1/house")
{
v1.GET("/get", getHouse)
v1.POST("/create", buildHouse)
}
return router
}
// 给这个方法设置 hystrix 处理命令
func getHouse(ctx *gin.Context) {
req := new(proto.RequestData)
if err := ctx.BindJSON(req); err != nil {// 获取客户端请求数据并绑定,如果不为 nil 则返回错误信息
log.Println("get house param error: ", err.Error())
ctx.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
})
return
}
// 设置一个超时时间
configOne := hystrix.CommandConfig{
Timeout: 3000,
}
// 配置 command
hystrix.ConfigureCommand("getHouse", configOne)
var houseService proto.HouseService
if hservice, ok := ctx.Get("houseService"); ok {
houseService = hservice.(proto.HouseService)
}
// 执行 Do 方法
var respMsg *proto.ResponseMsg
err := hystrix.Do(
"getHouse",
func() error {
var err error
respMsg, err = houseService.GetHouse(context.Background(), req)
return err
},
func(err error) error {
errstr := fmt.Sprintf("%s, %v","hystrix error msg: ", err.Error())
return errors.New(errstr)
//return nil
},
)
if err != nil {
ctx.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
})
} else {
ctx.JSON(http.StatusOK, gin.H{
"data": respMsg,
"message" : "success",
})
}
}
func buildHouse(ctx *gin.Context) {
req := new(proto.RequestData)
if err := ctx.BindJSON(req); err != nil {
log.Println("request param: ", err)
ctx.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
})
return
}
// 这里调用 HouseService 的方法(proto 生成的方法)
// 这里模拟调用,并没有真正调用
var houseService proto.HouseService
if hservice, ok := ctx.Get("houseService"); ok {
houseService = hservice.(proto.HouseService)
}
houseService.Build(context.Background(), req)
resp := proto.ResponseMsg{Msg: "build one house 1"}
ctx.JSON(http.StatusOK, gin.H{
"message": resp.Msg,
})
}
|
package parser
import (
"github.com/stephens2424/php/ast"
"github.com/stephens2424/php/lexer"
"github.com/stephens2424/php/token"
)
func (p *Parser) parseFunctionStmt(inMethod bool) *ast.FunctionStmt {
stmt := &ast.FunctionStmt{}
stmt.FunctionDefinition = p.parseFunctionDefinition()
if !inMethod {
p.namespace.Functions[stmt.Name] = stmt
}
p.scope = ast.NewScope(p.scope, p.FileSet.GlobalScope, p.FileSet.SuperGlobalScope)
stmt.Body = p.parseBlock()
p.scope = p.scope.EnclosingScope
return stmt
}
func (p *Parser) parseFunctionDefinition() *ast.FunctionDefinition {
def := &ast.FunctionDefinition{}
if p.peek().Typ == token.AmpersandOperator {
// This is a function returning a reference ... ignore this for now
p.next()
}
if !p.accept(token.Identifier) {
p.next()
if !lexer.IsKeyword(p.current.Typ, p.current.Val) {
p.errorf("bad function name: %s", p.current.Val)
}
}
def.Name = p.current.Val
def.Arguments = make([]*ast.FunctionArgument, 0)
p.expect(token.OpenParen)
if p.peek().Typ == token.CloseParen {
p.expect(token.CloseParen)
def.Type = p.parseFunctionType()
return def
}
def.Arguments = append(def.Arguments, p.parseFunctionArgument())
for {
switch p.peek().Typ {
case token.Comma:
p.expect(token.Comma)
def.Arguments = append(def.Arguments, p.parseFunctionArgument())
case token.CloseParen:
p.expect(token.CloseParen)
def.Type = p.parseFunctionType()
return def
default:
p.errorf("unexpected argument separator: %s", p.current)
return def
}
}
}
func (p *Parser) parseFunctionType() string {
if p.peek().Typ != token.TernaryOperator2 {
return ""
}
// jump to the type declaration
p.next()
p.next()
return p.current.Val
}
func (p *Parser) parseFunctionArgument() *ast.FunctionArgument {
arg := &ast.FunctionArgument{}
switch p.peek().Typ {
case token.Identifier, token.Array, token.Self:
p.next()
arg.TypeHint = p.current.Val
}
if p.peek().Typ == token.AmpersandOperator {
p.next()
}
p.expect(token.VariableOperator)
p.next()
arg.Variable = ast.NewVariable(p.current.Val)
if p.peek().Typ == token.AssignmentOperator {
p.expect(token.AssignmentOperator)
p.next()
arg.Default = p.parseExpression()
}
return arg
}
func (p *Parser) parseFunctionCall(callable ast.Expr) *ast.FunctionCallExpr {
expr := &ast.FunctionCallExpr{}
expr.FunctionName = callable
return p.parseFunctionArguments(expr)
}
func (p *Parser) parseFunctionArguments(expr *ast.FunctionCallExpr) *ast.FunctionCallExpr {
expr.Arguments = make([]ast.Expr, 0)
p.expect(token.OpenParen)
if p.peek().Typ == token.CloseParen {
p.expect(token.CloseParen)
return expr
}
expr.Arguments = append(expr.Arguments, p.parseNextExpression())
for p.peek().Typ != token.CloseParen {
p.expect(token.Comma)
arg := p.parseNextExpression()
if arg == nil {
break
}
expr.Arguments = append(expr.Arguments, arg)
}
p.expect(token.CloseParen)
return expr
}
func (p *Parser) parseAnonymousFunction() ast.Expr {
f := &ast.AnonymousFunction{}
f.Arguments = make([]*ast.FunctionArgument, 0)
f.ClosureVariables = make([]*ast.FunctionArgument, 0)
p.expect(token.OpenParen)
if p.peek().Typ != token.CloseParen {
f.Arguments = append(f.Arguments, p.parseFunctionArgument())
}
Loop:
for {
switch p.peek().Typ {
case token.Comma:
p.expect(token.Comma)
f.Arguments = append(f.Arguments, p.parseFunctionArgument())
case token.CloseParen:
break Loop
default:
p.errorf("unexpected argument separator: %s", p.current)
return f
}
}
p.expect(token.CloseParen)
// Closure variables
if p.peek().Typ == token.Use {
p.expect(token.Use)
p.expect(token.OpenParen)
f.ClosureVariables = append(f.ClosureVariables, p.parseFunctionArgument())
ClosureLoop:
for {
switch p.peek().Typ {
case token.Comma:
p.expect(token.Comma)
f.ClosureVariables = append(f.ClosureVariables, p.parseFunctionArgument())
case token.CloseParen:
break ClosureLoop
default:
p.errorf("unexpected argument separator: %s", p.current)
return f
}
}
p.expect(token.CloseParen)
}
p.scope = ast.NewScope(p.scope, p.FileSet.GlobalScope, p.FileSet.SuperGlobalScope)
f.Body = p.parseBlock()
p.scope = p.scope.EnclosingScope
return f
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package orchestratorexplorer
import (
"fmt"
"strconv"
"github.com/DataDog/datadog-operator/controllers/datadogagent/object/configmap"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (f *orchestratorExplorerFeature) buildOrchestratorExplorerConfigMap() (*corev1.ConfigMap, error) {
if f.customConfig != nil && f.customConfig.ConfigMap != nil {
return nil, nil
}
if f.customConfig != nil && f.customConfig.ConfigData != nil {
return configmap.BuildConfigMapConfigData(f.owner.GetNamespace(), f.customConfig.ConfigData, f.configConfigMapName, orchestratorExplorerConfFileName)
}
configMap := buildDefaultConfigMap(f.owner.GetNamespace(), f.configConfigMapName, orchestratorExplorerCheckConfig(f.runInClusterChecksRunner, f.customResources))
return configMap, nil
}
func buildDefaultConfigMap(namespace, cmName string, content string) *corev1.ConfigMap {
configMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: cmName,
Namespace: namespace,
},
Data: map[string]string{
orchestratorExplorerConfFileName: content,
},
}
return configMap
}
func orchestratorExplorerCheckConfig(clusterCheckRunners bool, crs []string) string {
stringClusterCheckRunners := strconv.FormatBool(clusterCheckRunners)
config := fmt.Sprintf(`---
cluster_check: %s
ad_identifiers:
- _kube_orchestrator
init_config:
instances:
- skip_leader_election: %s
`, stringClusterCheckRunners, stringClusterCheckRunners)
if len(crs) > 0 {
config = config + " crd_collectors:\n"
for _, cr := range crs {
config = config + fmt.Sprintf(" - %s\n", cr)
}
}
return config
}
|
package leetcode
import (
"testing"
"github.com/stretchr/testify/assert"
)
/**
139. Word Break
Given a string s and a dictionary of strings wordDict, return true if s can be segmented
into a space-separated sequence of one or more dictionary words.
Note that the same word in the dictionary may be reused multiple times in the segmentation.
Example 1:
Input: s = "leetcode", wordDict = ["leet","code"]
Output: true
Explanation: Return true because "leetcode" can be segmented as "leet code".
Example 2:
Input: s = "applepenapple", wordDict = ["apple","pen"]
Output: true
Explanation: Return true because "applepenapple" can be segmented as "apple pen apple".
Note that you are allowed to reuse a dictionary word.
Example 3:
Input: s = "catsandog", wordDict = ["cats","dog","sand","and","cat"]
Output: false
Constraints:
1 <= s.length <= 300
1 <= wordDict.length <= 1000
1 <= wordDict[i].length <= 20
s and wordDict[i] consist of only lowercase English letters.
All the strings of wordDict are unique.
*/
func wordBreak(s string, wordDict []string) bool {
var memo = make(map[int]bool)
var set = make(map[string]bool)
for _, w := range wordDict {
set[w] = true
}
return backtrack(s, set, memo, 0)
}
func backtrack(s string, set map[string]bool, memo map[int]bool, start int) bool {
if isKnown, ok := memo[start]; ok {
return isKnown
}
if start == len(s) {
return true
}
var known bool
for i := start + 1; i <= len(s); i++ {
w := s[start : i]
if _, ok := set[w]; ok {
if backtrack(s, set, memo, i) {
known = true
break
}
}
}
memo[start] = known
return known
}
func TestWordBreak(t *testing.T) {
assert.True(t, wordBreak("applepenapple", []string{"apple","pen"}))
assert.False(t, wordBreak("catsandog", []string{"cats","dog","sand","and","cat"}))
assert.True(t, wordBreak("apple pen apple", []string{"apple","pen", " "}))
assert.True(t, wordBreak("catsanddog", []string{"cats","dog","sand","and","cat"}))
assert.True(t, wordBreak("cats and dog", []string{"cats","dog","sand","and","cat", " "}))
assert.True(t, wordBreak("cat sand dog", []string{"cats","dog","sand","and","cat", " "}))
} |
package main
import (
"github.com/nsf/termbox-go"
"time"
)
func draw() {
termbox.Clear(termbox.ColorDefault, termbox.ColorDefault)
w, h := termbox.Size()
bWidth := w * 15 / 100
bHeight := h / 3
offY := (h - bHeight) / 2
spacing := w * 1 / 10
hourTen := spacing
hourOne := bWidth + spacing
colonSpace := bWidth*2 + spacing
minTen := bWidth*3 + spacing
minOne := bWidth*4 + spacing
barray := [5]int{hourTen, hourOne, colonSpace, minTen, minOne}
cTime(bWidth, bHeight, barray, offY)
termbox.Flush()
}
// cTime gets the current time and parses each digit to the printing functions.
func cTime(bWidth int, bHeight int, place [5]int, offY int) {
now := time.Now()
hour, minute, _ := now.Clock()
one := (hour / 10) % 10
two := hour % 10
three := (minute / 10) % 10
four := minute % 10
order := [5]int{one, two, -1, three, four}
for x := 0; x < 5; x++ {
switch order[x] {
case -1:
colon(bWidth, bHeight, place[x], offY)
case 0:
zero(bWidth, bHeight, place[x], offY)
case 1:
ones(bWidth, bHeight, place[x], offY)
case 2:
twos(bWidth, bHeight, place[x], offY)
case 3:
threes(bWidth, bHeight, place[x], offY)
case 4:
fours(bWidth, bHeight, place[x], offY)
case 5:
five(bWidth, bHeight, place[x], offY)
case 6:
six(bWidth, bHeight, place[x], offY)
case 7:
seven(bWidth, bHeight, place[x], offY)
case 8:
eight(bWidth, bHeight, place[x], offY)
case 9:
nine(bWidth, bHeight, place[x], offY)
}
}
}
// colon draws a colon in a WxH size box.
func colon(w int, h int, offX int, offY int) {
for x := 0; x <= w/8; x++ {
for y := 0; y <= h*1/8; y++ {
termbox.SetCell((x + offX + w/2 - w/8), y+offY+h*1/8, ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
for x := 0; x <= w/8; x++ {
for y := 0; y <= h*1/8; y++ {
termbox.SetCell((x + offX + w/2 - w/8), y+offY+h*6/8, ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
// zero draws a zero in a WxH size box.
func zero(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
leftupperdown(w, h, offX, offY)
rightupperdown(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
leftlowerdown(w, h, offX, offY)
botcross(w, h, offX, offY)
}
// one draws a one in a WxH size box.
func ones(w int, h int, offX int, offY int) {
rightupperdown(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
}
// twos draws a two in a WxH size box.
func twos(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
rightupperdown(w, h, offX, offY)
midcross(w, h, offX, offY)
leftlowerdown(w, h, offX, offY)
botcross(w, h, offX, offY)
}
// threes draws a three in a WxH size box.
func threes(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
rightupperdown(w, h, offX, offY)
midcross(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
botcross(w, h, offX, offY)
}
// fours draws a four in a WxH size box.
func fours(w int, h int, offX int, offY int) {
leftupperdown(w, h, offX, offY)
rightupperdown(w, h, offX, offY)
midcross(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
}
// five draws a five in a WxH size box.
func five(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
leftupperdown(w, h, offX, offY)
midcross(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
botcross(w, h, offX, offY)
}
// six draws a six in a WxH size box.
func six(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
leftupperdown(w, h, offX, offY)
midcross(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
leftlowerdown(w, h, offX, offY)
botcross(w, h, offX, offY)
}
// seven draws a seven in a WxH size box.
func seven(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
rightupperdown(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
}
// eight draws a eight in a WxH size box.
func eight(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
leftupperdown(w, h, offX, offY)
rightupperdown(w, h, offX, offY)
midcross(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
leftlowerdown(w, h, offX, offY)
botcross(w, h, offX, offY)
}
// nine draws a nine in a WxH size box.
func nine(w int, h int, offX int, offY int) {
topcross(w, h, offX, offY)
leftupperdown(w, h, offX, offY)
rightupperdown(w, h, offX, offY)
midcross(w, h, offX, offY)
rightlowerdown(w, h, offX, offY)
}
// topcross helper function that draws a top horizontal bar.
func topcross(w int, h int, offX int, offY int) {
for x := 0; x <= w*7/8; x++ {
for y := 0; y <= h/10; y++ {
termbox.SetCell((x + offX), y+(offY), ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
// midcross helper function that draws a mid horizontal bar.
func midcross(w int, h int, offX int, offY int) {
for x := 0; x <= w*7/8; x++ {
for y := 0; y <= h/10; y++ {
termbox.SetCell((x + offX), y+(offY+h/2), ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
// botcross helper function that draws a bottom horizontal bar.
func botcross(w int, h int, offX int, offY int) {
for x := 0; x <= w*7/8; x++ {
for y := 0; y <= h/10; y++ {
termbox.SetCell((x + offX), y+(offY+h-h/10), ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
// leftupperdown helper function that draws a left upper vertical bar.
func leftupperdown(w int, h int, offX int, offY int) {
for x := 0; x <= w/6; x++ {
for y := 0; y <= h/2; y++ {
termbox.SetCell(x+offX, y+(offY), ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
// leftlowerdown helper function that draws a left lower vertical bar.
func leftlowerdown(w int, h int, offX int, offY int) {
for x := 0; x <= w/6; x++ {
for y := 0; y <= h/2; y++ {
termbox.SetCell(x+offX, y+(offY+(h/2)), ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
// rightupperdown helper function that draws a right upper vertical bar.
func rightupperdown(w int, h int, offX int, offY int) {
for x := 0; x <= w/6; x++ {
for y := 0; y <= h/2; y++ {
termbox.SetCell(x+offX+w*7/8-w/6, y+(offY), ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
// rightlowerdown helper function that draws a right lower vertical bar.
func rightlowerdown(w int, h int, offX int, offY int) {
for x := 0; x <= w/6; x++ {
for y := 0; y <= h/2; y++ {
termbox.SetCell(x+offX+w*7/8-w/6, y+(offY+h/2), ' ', termbox.ColorDefault,
termbox.ColorWhite)
}
}
}
func main() {
err := termbox.Init()
if err != nil {
panic(err)
}
defer termbox.Close()
eventQueue := make(chan termbox.Event)
go func() {
for {
eventQueue <- termbox.PollEvent()
}
}()
draw()
loop:
for {
select {
case ev := <-eventQueue:
if ev.Type == termbox.EventKey && ev.Key == termbox.KeyEsc {
break loop
}
default:
draw()
time.Sleep(10 * time.Millisecond)
}
}
}
|
package models
import (
"time"
//"github.com/jinzhu/gorm"
//"github.com/spf13/viper"
)
type Assignment struct {
EmployeeNumber uint `gorm:"column:emp_no"`
DepartmentNumber string `gorm:"column:dept_no"`
StartDate time.Time `gorm:"column:from_date"`
EndDate time.Time `gorm:"column:to_date"`
}
func (Assignment) TableName() string {
return "dept_emp"
}
func (this Assignment) Employee() Employee {
db := GetDb()
var employee Employee
db.Where(&Employee{Number: this.EmployeeNumber}).Find(&employee)
return employee
}
func (this Assignment) Department() Department {
db := GetDb()
var department Department
db.Where(&Department{Number: this.DepartmentNumber}).Find(&department)
return department
}
|
package commands
import (
"fmt"
"image"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"gocv.io/x/gocv"
)
var (
cropCmd = &cobra.Command{
Use: "crop",
Short: "Crop a photo file",
Long: "Crop a photo file",
Run: cropCommand,
}
)
func init() {
cropCmd.PersistentFlags().StringVarP(&cascadeFile, "cascadeFile", "c", "", "custom cascade file path")
RootCmd.AddCommand(cropCmd)
}
func cropCommand(cmd *cobra.Command, args []string) {
if len(args) < 1 {
Exit(fmt.Errorf("How to run:\n\tshowimage [imgfile]"), 1)
}
fn := args[0]
f := filepath.Base(fn)
fs := strings.Split(f, ".")
img := gocv.IMRead(fn, gocv.IMReadColor)
defer img.Close()
if img.Empty() {
fmt.Printf("Error reading image from: %s\n", fn)
return
}
cf, err := loadCascadeClassifier(cascadeFile)
if err != nil {
Exit(err, 1)
}
defer cf.Close()
var d string
d, err = getOutputDir()
if err != nil {
Exit(err, 1)
}
// detect faces
rects := cf.DetectMultiScale(img)
// fmt.Printf("found %d faces\n", len(rects))
// draw a rectangle around each face on the original image
for i, r := range rects {
result := gocv.NewMatWithSize(200, 200, gocv.MatTypeCV8U)
gocv.Resize(img.Region(r), &result, image.Pt(result.Rows(), result.Cols()), 0, 0, gocv.InterpolationCubic)
// gocv.Rectangle(&img, r, blue, 3)
gocv.IMWrite(fmt.Sprintf("%s/%s.%d.%s", d, fs[0], i, fs[1]), result)
}
}
|
package kinesumer
import (
"context"
"os"
"sync"
"time"
"github.com/daangn/kinesumer/pkg/xrand"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
const (
jitter = 50 * time.Millisecond
syncInterval = 5*time.Second + jitter
syncTimeout = 5*time.Second - jitter
checkPointTimeout = 2 * time.Second
defaultScanLimit int64 = 2000
defaultScanTimeout = 2 * time.Second
defaultTimeBuffer = 10 * time.Millisecond
recordsChanBuffer = 20
)
// Config defines configs for the Kinesumer client.
type Config struct {
App string // Application name.
// Kinesis configs.
KinesisRegion string
KinesisEndpoint string // Only for local server.
// If you want to consume messages from Kinesis in a different account,
// you need to set up the IAM role to access to target account, and pass the role arn here.
// Reference: https://docs.aws.amazon.com/kinesisanalytics/latest/java/examples-cross.html.
RoleARN string
// State store configs.
DynamoDBRegion string
DynamoDBTable string
DynamoDBEndpoint string // Only for local server.
ScanLimit int64
ScanTimeout time.Duration
}
// Record represents kinesis.Record with stream name.
type Record struct {
Stream string
*kinesis.Record
}
// Shard holds shard id and a flag of "CLOSED" state.
type Shard struct {
ID string
Closed bool
}
// Shards is a collection of Shard.
type Shards []*Shard
func (s Shards) ids() []string {
var ids []string
for _, shard := range s {
ids = append(ids, shard.ID)
}
return ids
}
// Kinesumer implements auto re-balancing consumer group for Kinesis.
// TODO(mingrammer): export prometheus metrics.
type Kinesumer struct {
// Unique identity of a consumer group client.
id string
client *kinesis.Kinesis
// A flag that identifies if the client is a leader.
leader bool
streams []string
// A stream where consumed records will have flowed.
records chan *Record
errors chan error
// A distributed key-value store for managing states.
stateStore *stateStore
// Shard information per stream.
// List of all shards as cache. For only leader node.
shardCaches map[string][]string
// A list of shards a node is currently in charge of.
shards map[string]Shards
// To cache the last sequence numbers for each shard.
checkPoints map[string]*sync.Map
// To manage the next shard iterators for each shard.
nextIters map[string]*sync.Map
// Maximum count of records to scan.
scanLimit int64
// Records scanning maximum timeout.
scanTimeout time.Duration
// To wait the running consumer loops when stopping.
wait sync.WaitGroup
stop chan struct{}
close chan struct{}
}
// NewKinesumer initializes and returns a new Kinesumer client.
func NewKinesumer(cfg *Config) (*Kinesumer, error) {
if cfg.App == "" {
return nil, errors.WithStack(
errors.New("you must pass the app name"),
)
}
// Make unique client id.
id, err := os.Hostname()
if err != nil {
return nil, errors.WithStack(err)
}
id += xrand.StringN(6) // Add suffix.
// Initialize the state store.
stateStore, err := newStateStore(cfg)
if err != nil {
return nil, errors.WithStack(err)
}
// Initialize the AWS session to build Kinesis client.
awsCfg := aws.NewConfig()
awsCfg.WithRegion(cfg.KinesisRegion)
if cfg.KinesisEndpoint != "" {
awsCfg.WithEndpoint(cfg.KinesisEndpoint)
}
sess, err := session.NewSession(awsCfg)
if err != nil {
return nil, errors.WithStack(err)
}
var cfgs []*aws.Config
if cfg.RoleARN != "" {
cfgs = append(cfgs,
aws.NewConfig().WithCredentials(
stscreds.NewCredentials(
sess, cfg.RoleARN,
),
),
)
}
buffer := recordsChanBuffer
kinesumer := &Kinesumer{
id: id,
client: kinesis.New(sess, cfgs...),
stateStore: stateStore,
shardCaches: make(map[string][]string),
shards: make(map[string]Shards),
checkPoints: make(map[string]*sync.Map),
nextIters: make(map[string]*sync.Map),
scanLimit: defaultScanLimit,
scanTimeout: defaultScanTimeout,
records: make(chan *Record, buffer),
errors: make(chan error, 1),
wait: sync.WaitGroup{},
stop: make(chan struct{}),
close: make(chan struct{}),
}
if cfg.ScanLimit > 0 {
kinesumer.scanLimit = cfg.ScanLimit
}
if cfg.ScanTimeout > 0 {
kinesumer.scanTimeout = cfg.ScanTimeout
}
if err := kinesumer.init(); err != nil {
return nil, errors.WithStack(err)
}
return kinesumer, nil
}
func (k *Kinesumer) init() error {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, syncTimeout)
defer cancel()
// Register itself to state store.
if err := k.stateStore.RegisterClient(ctx, k.id); err != nil {
return errors.WithStack(err)
}
// The leader will be determined during the initial sync job.
if err := k.syncShardInfo(ctx); err != nil {
return errors.WithStack(err)
}
go k.loopSyncClient()
return nil
}
func (k *Kinesumer) listShards(stream string) (Shards, error) {
output, err := k.client.ListShards(&kinesis.ListShardsInput{
StreamName: aws.String(stream),
})
if err != nil {
return nil, errors.WithStack(err)
}
var shards []*Shard
for _, shard := range output.Shards {
shards = append(shards, &Shard{
ID: *shard.ShardId,
Closed: shard.SequenceNumberRange.EndingSequenceNumber != nil,
})
}
return shards, nil
}
// Consume consumes messages from Kinesis.
func (k *Kinesumer) Consume(
streams []string) (<-chan *Record, error) {
k.streams = streams
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, syncTimeout)
defer cancel()
if err := k.syncShardInfo(ctx); err != nil {
return nil, errors.WithStack(err)
}
return k.records, nil
}
func (k *Kinesumer) start() {
k.stop = make(chan struct{})
for stream, shardsPerStream := range k.shards {
for _, shard := range shardsPerStream {
k.wait.Add(1)
go k.consume(stream, shard)
}
}
}
func (k *Kinesumer) pause() {
close(k.stop)
k.wait.Wait()
}
func (k *Kinesumer) consume(stream string, shard *Shard) {
defer k.wait.Done()
for {
select {
case <-k.stop:
return
case <-k.close:
return
default:
time.Sleep(defaultTimeBuffer) // Time buffer to prevent high stress.
if closed := k.consumeOnce(stream, shard); closed {
return // Close consume loop if shard is CLOSED and has no data.
}
}
}
}
// It returns a flag whether if shard is CLOSED state and has no remaining data.
func (k *Kinesumer) consumeOnce(stream string, shard *Shard) bool {
ctx := context.Background()
ctx, cancel := context.WithTimeout(ctx, k.scanTimeout)
defer cancel()
shardIter, err := k.getNextShardIterator(ctx, stream, shard.ID)
if err != nil {
k.errors <- errors.WithStack(err)
var riue *kinesis.ResourceInUseException
if errors.As(err, &riue) {
return true
}
return false
}
output, err := k.client.GetRecordsWithContext(ctx, &kinesis.GetRecordsInput{
Limit: aws.Int64(k.scanLimit),
ShardIterator: shardIter,
})
if err != nil {
k.errors <- errors.WithStack(err)
var riue *kinesis.ResourceInUseException
if errors.As(err, &riue) {
return true
}
var eie *kinesis.ExpiredIteratorException
if errors.As(err, &eie) {
k.nextIters[stream].Delete(shard.ID) // Delete expired next iterator cache.
}
return false
}
defer k.nextIters[stream].Store(shard.ID, output.NextShardIterator) // Update iter.
n := len(output.Records)
// We no longer care about shards that have no records left and are in the "CLOSED" state.
if n == 0 {
return shard.Closed
}
var lastSequence string
for i, record := range output.Records {
k.records <- &Record{
Stream: stream,
Record: record,
}
if i == n-1 {
lastSequence = *record.SequenceNumber
}
}
// Check point the sequence number.
ctx = context.Background()
ctx, cancel = context.WithTimeout(ctx, checkPointTimeout)
defer cancel()
if err := k.stateStore.UpdateCheckPoint(ctx, stream, shard.ID, lastSequence); err != nil {
log.Err(err).
Str("stream", stream).
Str("shard id", shard.ID).
Str("missed sequence number", lastSequence).
Msg("kinesumer: failed to UpdateCheckPoint")
}
k.checkPoints[stream].Store(shard.ID, lastSequence)
return false
}
func (k *Kinesumer) getNextShardIterator(
ctx context.Context, stream, shardID string) (*string, error) {
if iter, ok := k.nextIters[stream].Load(shardID); ok {
return iter.(*string), nil
}
input := &kinesis.GetShardIteratorInput{
StreamName: aws.String(stream),
ShardId: aws.String(shardID),
}
if seq, ok := k.checkPoints[stream].Load(shardID); ok {
input.SetShardIteratorType(kinesis.ShardIteratorTypeAfterSequenceNumber)
input.SetStartingSequenceNumber(seq.(string))
} else {
input.SetShardIteratorType(kinesis.ShardIteratorTypeLatest)
}
output, err := k.client.GetShardIteratorWithContext(ctx, input)
if err != nil {
return nil, err
}
k.nextIters[stream].Store(shardID, output.ShardIterator)
return output.ShardIterator, nil
}
// Refresh refreshes the consuming streams.
func (k *Kinesumer) Refresh(streams []string) {
k.streams = streams
}
// Errors returns error channel.
func (k *Kinesumer) Errors() <-chan error {
return k.errors
}
// Close stops the consuming and sync jobs.
func (k *Kinesumer) Close() {
log.Info().
Msg("kinesumer: closing the kinesumer")
close(k.close)
k.wait.Wait()
// Client should drain the remaining records.
close(k.records)
// Drain the remaining errors.
close(k.errors)
for range k.errors {
// Do nothing with errors.
}
// Wait last sync jobs.
time.Sleep(syncTimeout)
log.Info().
Msg("kinesumer: shutdown successfully")
}
|
package ontap
import (
"bytes"
"encoding/xml"
"fmt"
"github.com/go-xmlfmt/xmlfmt"
"net/http"
"regexp"
"strconv"
"strings"
)
type AggrInfo struct {
Name string
SizeTotal string
SizeUsed string
SizeAvailable string
SizeUsedPercent string
State string
Cluster string
// Data Compaction
DataCompactionSpaceSaved string
DataCompactionSpaceSavedPercent string
// Sis
SisSpaceSaved string
SisSpaceSavedPercent string
}
func (c *Client) GetAggrInfo(limit int) ([]AggrInfo, error) {
ixml := &AggrInfoRequest{}
ixml.Version = apiVersion
ixml.Xmlns = XMLns
ixml.AggrGetIter.MaxRecords = strconv.Itoa(limit)
output, err := xml.MarshalIndent(ixml, "", "\t")
payload := bytes.NewReader(output)
req, err := http.NewRequest("POST", c.Url, payload)
if err != nil {
return nil, err
}
response, err := c.doRequest(req)
if err != nil {
return nil, err
}
if c.Debug {
x := xmlfmt.FormatXML(string(response), "\t", " ")
println(x)
}
var result AggrInfoResponse
err = xml.Unmarshal(response, &result)
if err != nil {
return nil, err
}
if strings.Compare(result.Results.Status, "passed") != 0 {
return nil, fmt.Errorf("%s", xmlError)
}
var ail []AggrInfo
for _, v := range result.Results.AttributesList.AggrAttributes {
ai := AggrInfo{
Name: v.AggregateName,
SizeTotal: v.AggrSpaceAttributes.SizeTotal,
SizeUsed: v.AggrSpaceAttributes.SizeUsed,
SizeAvailable: v.AggrSpaceAttributes.SizeAvailable,
SizeUsedPercent: v.AggrSpaceAttributes.PercentUsedCapacity,
Cluster: v.AggrOwnershipAttributes.Cluster,
DataCompactionSpaceSaved: v.AggrSpaceAttributes.DataCompactionSpaceSaved,
DataCompactionSpaceSavedPercent: v.AggrSpaceAttributes.DataCompactionSpaceSavedPercent,
SisSpaceSaved: v.AggrSpaceAttributes.SisSpaceSaved,
SisSpaceSavedPercent: v.AggrSpaceAttributes.SisSpaceSavedPercent,
}
ail = append(ail, ai)
}
return ail, nil
}
func (c *Client) GetAggrPerf() ([]PerfCounter, error) {
// Performance counters of interest
var counters []string
counters = append(counters, "user_reads") // Number of user reads per second to the aggregate. per_sec
counters = append(counters, "read_data") // Amount of data read per second from the aggregate. b_per_sec
counters = append(counters, "user_read_latency") // Average latency per block in microseconds for user read operations. microsec
counters = append(counters, "user_writes")
counters = append(counters, "user_write_latency")
counters = append(counters, "write_data")
counters = append(counters, "latency")
counters = append(counters, "aggr_throughput") // Total amount of CP read data, user read data, and user write data per second. b_per_sec
var inst []string
agi, err := c.GetAggrInfo(100)
if err != nil {
return nil, err
}
// Create instance list and exclude internal aggr
r1, _ := regexp.Compile("^root_")
for _, v := range agi {
if !(r1.MatchString(v.Name)) {
inst = append(inst, v.Name)
}
}
pc, err := c.getPerformanceData("aggregate", counters, inst)
if err != nil {
return nil, err
}
return pc, nil
}
|
package cmd
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"github.com/tharsis/token/app"
)
// TODO fix invalid mnemonic test case
// Error: DeployToken err: " --- at github.com/tharsis/ethermint/app/ante/eth.go:217 (EthNonceVerificationDecorator.AnteHandle) ---\nCaused by: invalid nonce; got 11, expected 10: invalid sequence"
func Test_runDeployCmd(t *testing.T) {
var (
err error
client *app.Client
)
tt := []struct {
name string
pretest func()
expErr bool
}{
{
"success",
func() {
client, err = app.NewClient(app.Mnemonic)
// how to check for an error
},
false,
},
/*
{
"invalid mnemonic",
func() {
invalidMnemonic := strings.Replace(app.Mnemonic, "sight", "seen", -1)
client, err = app.NewClient(invalidMnemonic)
// how to check for an error
},
true,
},
*/
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
cmd := deployContractCommand()
tc.pretest()
clientCtx := app.Context{}.WithClient(client)
ctx := context.WithValue(context.Background(), app.ClientContextKey, &clientCtx)
if tc.expErr {
require.Error(t, cmd.ExecuteContext(ctx))
require.Error(t, err)
} else {
require.NoError(t, err)
require.NoError(t, cmd.ExecuteContext(ctx))
}
})
}
}
|
package auth
import (
"log"
"net/http"
"net/http/httptest"
"os"
"testing"
"github.com/spatiumsocialis/infra/pkg/common"
"github.com/stretchr/testify/assert"
)
func TestMain(m *testing.M) {
if err := common.LoadEnv(); err != nil {
log.Fatalln(err)
}
os.Exit(m.Run())
}
func addTokenToRequest(r *http.Request, token string) {
r.Header.Add("Authorization", "Bearer "+token)
}
func testMiddlewareHelper(t *testing.T, shouldSucceed bool, testToken func(validToken string) string) {
// api key retrieved from https://console.firebase.google.com/u/0/project/spatiumsocialis-e4683/settings/general
token, err := GenerateToken(TestUID)
assert.Nil(t, err)
// Create a test Request with the ID token and a ResponseWriter
r, err := http.NewRequest("", "", nil)
assert.Nil(t, err)
addTokenToRequest(r, testToken(token))
w := httptest.NewRecorder()
handler := Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))
// Call AuthenticateRequest with the test data
handler.ServeHTTP(w, r)
var correctStatus int
if shouldSucceed {
correctStatus = http.StatusOK
} else {
correctStatus = http.StatusUnauthorized
}
if w.Code != correctStatus {
t.Fatalf("AuthorizationMiddleware test failed: response code was %v, should have been %v", w.Code, correctStatus)
}
}
// TestAuthenticationMiddlewareValidToken tests the AuthenticateMiddleware with a valid ID Token and a dummy terminal handler
func TestAuthenticationMiddlewareValidToken(t *testing.T) {
testMiddlewareHelper(t, true, func(validToken string) string {
return validToken
})
}
func TestAuthenticationMiddleWareNoToken(t *testing.T) {
testMiddlewareHelper(t, false, func(validToken string) string {
return ""
})
}
func TestAuthenticationMiddleWareInvalidToken(t *testing.T) {
testMiddlewareHelper(t, false, func(validToken string) string {
return "INVALID_TOKEN"
})
}
|
package main
import (
"fmt"
"os"
s "strings"
"unicode"
)
func main() {
var f = fmt.Printf
f("to upper %s\n", s.ToUpper("Hello world"))
f("to lower %s\n", s.ToLower("Hello world"))
f("%s\n", s.Title("hello world"))
f("%v\n", s.EqualFold("hello World", "HELLO WORld"))
f("%v\n", s.EqualFold("hello World", "HELLo worl"))
f("Prefix: %v\n", s.HasPrefix("Mihalis", "Mi"))
f("Prefix: %v\n", s.HasPrefix("Mihalis", "mi"))
f("Suffix: %v\n", s.HasSuffix("Mihalis", "is"))
f("Suffix: %v\n", s.HasSuffix("Mihalis", "IS"))
f("Index: %v\n", s.Index("Mihalis", "ha"))
f("Index: %v\n", s.Index("Mihalis", "Ha"))
f("Count: %v\n", s.Count("Mihalis", "i"))
f("Count: %v\n", s.Count("Mihalis", "I"))
f("Repeat: %s\n", s.Repeat("ab", 5))
f("TrimSpace: %s\n", s.TrimSpace(" \tThis is a line. \n"))
f("TrimLeft: %s", s.TrimLeft(" \tThis is a\t line. \n", "\n\t "))
f("TrimRight: %s\n", s.TrimRight(" \tThis is a\t line. \n", "\n\t "))
f("Compare: %v\n", s.Compare("Mihalis", "MIHALIS"))
f("Compare: %v\n", s.Compare("Mihalis", "Mihalis"))
f("Compare: %v\n", s.Compare("MIHALIS", "MIHalis"))
f("Fields: %v\n", s.Fields("This is a string!"))
f("Fields: %v\n", s.Fields("Thisis\na\tstring!"))
f("%s\n", s.Split("abcd efg", ""))
f("%s\n", s.Split("abcd efg", " "))
f("%s\n", s.Replace("abcd efg", "", "_", -1))
f("%s\n", s.Replace("abcd efg", "", "_", 4))
f("%s\n", s.Replace("abcd efg", "", "_", 2))
f("%s\n", s.Replace("a b c d e f g", " ", "_", -1))
lines := []string{"Line 1", "Line 2", "Line 3"}
f("Join: %s\n", s.Join(lines, "+++"))
f("SplitAfter: %s\n", s.SplitAfter("123++432++", "++"))
trimFunction := func(c rune) bool {
return !unicode.IsLetter(c)
}
f("TrimFunc: %s\n", s.TrimFunc("123 abc ABC \t .", trimFunction))
str := s.NewReader("this is an error!!")
fmt.Println("str length: ", str.Len())
n, err := str.WriteTo(os.Stdout)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("\nwrote %d bytes to Stdout\n", n)
}
|
package circular
import (
"fmt"
)
type CircularlyLinkedList struct {
head *Node
tail *Node
length int
}
type Node struct {
data interface{}
next *Node
previous *Node
}
func (cl *CircularlyLinkedList) InsertFront(nodeData interface{}) {
fmt.Printf("inserting %v to front of list...\n", nodeData)
n := &Node{
data: nodeData,
}
if cl.head == nil {
n.previous = n
n.next = n
cl.head = n
cl.tail = n
} else {
n.next = cl.head
n.previous = cl.tail
cl.head = n
cl.head.next.previous = n
cl.tail.next = n
}
cl.length++
}
func (cl *CircularlyLinkedList) RemoveFront() {
fmt.Printf("removing head from list...\n")
if cl.head == nil {
return
}
if cl.head.next == nil {
cl.head = nil
cl.length--
return
}
cl.head = cl.head.next
cl.tail.next = cl.head
cl.head.previous = cl.tail
cl.length--
return
}
func (cl *CircularlyLinkedList) Traverse() {
if cl.head == nil {
fmt.Printf("no head on list\n")
return
}
fmt.Printf("traversing list...\n")
currentNode := cl.head
for {
fmt.Printf("node...\n")
if currentNode.previous != nil {
fmt.Printf("previous node data: %v\n", currentNode.previous.data)
}
fmt.Printf("current node data: %v\n", currentNode.data)
if currentNode.next != nil {
fmt.Printf("next node data %v\n", currentNode.next.data)
}
if currentNode == cl.tail {
break
}
currentNode = currentNode.next
}
}
func (cl *CircularlyLinkedList) Size() int {
return cl.length
}
func (cl *CircularlyLinkedList) GetHead() interface{} {
return cl.head.data
}
func (cl *CircularlyLinkedList) GetTail() interface{} {
return cl.tail.data
}
|
package main
import (
"github.com/robfig/cron"
"go-admin-starter/models"
"log"
"time"
)
func main() {
log.Println("Starting...")
c := cron.New()
var tag models.Tag
c.AddFunc("* * * * * *", func() {
log.Println("Run tag.CleanAll...")
tag.CleanAll()
})
var article models.Article
c.AddFunc("* * * * * *", func() {
log.Println("Run article.CleanAll...")
article.CleanAll()
})
c.Start()
t1 := time.NewTimer(time.Second * 10)
for {
select {
case <-t1.C:
t1.Reset(time.Second * 10)
}
}
}
|
package snailframe
//去除两端的字符串
func Strip(s_ string, chars_ string) string {
s , chars := []rune(s_) , []rune(chars_)
length := len(s)
max := len(s) - 1
l, r := true, true //标记当左端或者右端找到正常字符后就停止继续寻找
start, end := 0, max
tmpEnd := 0
charset := make(map[rune]bool) //创建字符集,也就是唯一的字符,方便后面判断是否存在
for i := 0; i < len(chars); i++ {
charset[chars[i]] = true
}
for i := 0; i < length; i++ {
if _, exist := charset[s[i]]; l && !exist {
start = i
l = false
}
tmpEnd = max - i
if _, exist := charset[s[tmpEnd]]; r && !exist {
end = tmpEnd
r = false
}
if !l && !r{
break
}
}
if l && r { // 如果左端和右端都没找到正常字符,那么表示该字符串没有正常字符
return ""
}
return string(s[start : end+1])
} |
package main
import "fmt"
type RPCError struct {
Code int64
Message string
}
func (e *RPCError) Error() string {
return fmt.Sprintf("%s,code=%d", e.Message, e.Code)
}
func NewRpcError(code int64, msg string) error {
return &RPCError{
Code: code,
Message: msg,
}
}
//类型检查
var _ error = (*RPCError)(nil)
//类型检查
func AsErr(err error) error {
return err
}
func main() {
var rpcErr error = NewRpcError(400, "unknow")
err := AsErr(rpcErr)
println(err.Error())
}
|
package bungo
import (
"errors"
"fmt"
)
type InventoryBucketLocation string
func InventoryBucketLocations(key int) InventoryBucketLocation {
out, _ := InventoryBucketLocationsE(key)
return out
}
func InventoryBucketLocationsE(key int) (InventoryBucketLocation, error) {
switch key {
case 1:
return "Inventory", nil
default:
return "", errors.New(fmt.Sprintf("unknown key: %d", key))
}
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package host
import (
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/errors/linuxerr"
"gvisor.dev/gvisor/pkg/hostarch"
"gvisor.dev/gvisor/pkg/marshal/primitive"
"gvisor.dev/gvisor/pkg/sentry/arch"
"gvisor.dev/gvisor/pkg/sentry/kernel"
"gvisor.dev/gvisor/pkg/sentry/unimpl"
"gvisor.dev/gvisor/pkg/sentry/vfs"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/usermem"
)
// TTYFileDescription implements vfs.FileDescriptionImpl for a host file
// descriptor that wraps a TTY FD.
//
// +stateify savable
type TTYFileDescription struct {
fileDescription
// mu protects the fields below.
mu sync.Mutex `state:"nosave"`
// session is the session attached to this TTYFileDescription.
session *kernel.Session
// fgProcessGroup is the foreground process group that is currently
// connected to this TTY.
fgProcessGroup *kernel.ProcessGroup
// termios contains the terminal attributes for this TTY.
termios linux.KernelTermios
}
// InitForegroundProcessGroup sets the foreground process group and session for
// the TTY. This should only be called once, after the foreground process group
// has been created, but before it has started running.
func (t *TTYFileDescription) InitForegroundProcessGroup(pg *kernel.ProcessGroup) {
t.mu.Lock()
defer t.mu.Unlock()
if t.fgProcessGroup != nil {
panic("foreground process group is already set")
}
t.fgProcessGroup = pg
t.session = pg.Session()
}
// ForegroundProcessGroup returns the foreground process for the TTY.
func (t *TTYFileDescription) ForegroundProcessGroup() *kernel.ProcessGroup {
t.mu.Lock()
defer t.mu.Unlock()
return t.fgProcessGroup
}
// Release implements fs.FileOperations.Release.
func (t *TTYFileDescription) Release(ctx context.Context) {
t.mu.Lock()
t.fgProcessGroup = nil
t.mu.Unlock()
t.fileDescription.Release(ctx)
}
// PRead implements vfs.FileDescriptionImpl.PRead.
//
// Reading from a TTY is only allowed for foreground process groups. Background
// process groups will either get EIO or a SIGTTIN.
func (t *TTYFileDescription) PRead(ctx context.Context, dst usermem.IOSequence, offset int64, opts vfs.ReadOptions) (int64, error) {
t.mu.Lock()
defer t.mu.Unlock()
// Are we allowed to do the read?
// drivers/tty/n_tty.c:n_tty_read()=>job_control()=>tty_check_change().
if err := t.checkChange(ctx, linux.SIGTTIN); err != nil {
return 0, err
}
// Do the read.
return t.fileDescription.PRead(ctx, dst, offset, opts)
}
// Read implements vfs.FileDescriptionImpl.Read.
//
// Reading from a TTY is only allowed for foreground process groups. Background
// process groups will either get EIO or a SIGTTIN.
func (t *TTYFileDescription) Read(ctx context.Context, dst usermem.IOSequence, opts vfs.ReadOptions) (int64, error) {
t.mu.Lock()
defer t.mu.Unlock()
// Are we allowed to do the read?
// drivers/tty/n_tty.c:n_tty_read()=>job_control()=>tty_check_change().
if err := t.checkChange(ctx, linux.SIGTTIN); err != nil {
return 0, err
}
// Do the read.
return t.fileDescription.Read(ctx, dst, opts)
}
// PWrite implements vfs.FileDescriptionImpl.PWrite.
func (t *TTYFileDescription) PWrite(ctx context.Context, src usermem.IOSequence, offset int64, opts vfs.WriteOptions) (int64, error) {
t.mu.Lock()
defer t.mu.Unlock()
// Check whether TOSTOP is enabled. This corresponds to the check in
// drivers/tty/n_tty.c:n_tty_write().
if t.termios.LEnabled(linux.TOSTOP) {
if err := t.checkChange(ctx, linux.SIGTTOU); err != nil {
return 0, err
}
}
return t.fileDescription.PWrite(ctx, src, offset, opts)
}
// Write implements vfs.FileDescriptionImpl.Write.
func (t *TTYFileDescription) Write(ctx context.Context, src usermem.IOSequence, opts vfs.WriteOptions) (int64, error) {
t.mu.Lock()
defer t.mu.Unlock()
// Check whether TOSTOP is enabled. This corresponds to the check in
// drivers/tty/n_tty.c:n_tty_write().
if t.termios.LEnabled(linux.TOSTOP) {
if err := t.checkChange(ctx, linux.SIGTTOU); err != nil {
return 0, err
}
}
return t.fileDescription.Write(ctx, src, opts)
}
// Ioctl implements vfs.FileDescriptionImpl.Ioctl.
func (t *TTYFileDescription) Ioctl(ctx context.Context, io usermem.IO, sysno uintptr, args arch.SyscallArguments) (uintptr, error) {
task := kernel.TaskFromContext(ctx)
if task == nil {
return 0, linuxerr.ENOTTY
}
// Ignore arg[0]. This is the real FD:
fd := t.inode.hostFD
ioctl := args[1].Uint64()
switch ioctl {
case linux.FIONREAD:
v, err := ioctlFionread(fd)
if err != nil {
return 0, err
}
var buf [4]byte
hostarch.ByteOrder.PutUint32(buf[:], v)
_, err = io.CopyOut(ctx, args[2].Pointer(), buf[:], usermem.IOOpts{})
return 0, err
case linux.TCGETS:
termios, err := ioctlGetTermios(fd)
if err != nil {
return 0, err
}
_, err = termios.CopyOut(task, args[2].Pointer())
return 0, err
case linux.TCSETS, linux.TCSETSW, linux.TCSETSF:
t.mu.Lock()
defer t.mu.Unlock()
if err := t.checkChange(ctx, linux.SIGTTOU); err != nil {
return 0, err
}
var termios linux.Termios
if _, err := termios.CopyIn(task, args[2].Pointer()); err != nil {
return 0, err
}
err := ioctlSetTermios(fd, ioctl, &termios)
if err == nil {
t.termios.FromTermios(termios)
}
return 0, err
case linux.TIOCGPGRP:
// Args: pid_t *argp
// When successful, equivalent to *argp = tcgetpgrp(fd).
// Get the process group ID of the foreground process group on this
// terminal.
pidns := kernel.PIDNamespaceFromContext(ctx)
if pidns == nil {
return 0, linuxerr.ENOTTY
}
t.mu.Lock()
defer t.mu.Unlock()
// Map the ProcessGroup into a ProcessGroupID in the task's PID namespace.
pgID := primitive.Int32(pidns.IDOfProcessGroup(t.fgProcessGroup))
_, err := pgID.CopyOut(task, args[2].Pointer())
return 0, err
case linux.TIOCSPGRP:
// Args: const pid_t *argp
// Equivalent to tcsetpgrp(fd, *argp).
// Set the foreground process group ID of this terminal.
t.mu.Lock()
defer t.mu.Unlock()
// Check that we are allowed to set the process group.
if err := t.checkChange(ctx, linux.SIGTTOU); err != nil {
// drivers/tty/tty_io.c:tiocspgrp() converts -EIO from tty_check_change()
// to -ENOTTY.
if linuxerr.Equals(linuxerr.EIO, err) {
return 0, linuxerr.ENOTTY
}
return 0, err
}
// Check that calling task's process group is in the TTY session.
if task.ThreadGroup().Session() != t.session {
return 0, linuxerr.ENOTTY
}
var pgIDP primitive.Int32
if _, err := pgIDP.CopyIn(task, args[2].Pointer()); err != nil {
return 0, err
}
pgID := kernel.ProcessGroupID(pgIDP)
// pgID must be non-negative.
if pgID < 0 {
return 0, linuxerr.EINVAL
}
// Process group with pgID must exist in this PID namespace.
pidns := task.PIDNamespace()
pg := pidns.ProcessGroupWithID(pgID)
if pg == nil {
return 0, linuxerr.ESRCH
}
// Check that new process group is in the TTY session.
if pg.Session() != t.session {
return 0, linuxerr.EPERM
}
t.fgProcessGroup = pg
return 0, nil
case linux.TIOCGWINSZ:
// Args: struct winsize *argp
// Get window size.
winsize, err := ioctlGetWinsize(fd)
if err != nil {
return 0, err
}
_, err = winsize.CopyOut(task, args[2].Pointer())
return 0, err
case linux.TIOCSWINSZ:
// Args: const struct winsize *argp
// Set window size.
// Unlike setting the termios, any process group (even background ones) can
// set the winsize.
var winsize linux.Winsize
if _, err := winsize.CopyIn(task, args[2].Pointer()); err != nil {
return 0, err
}
err := ioctlSetWinsize(fd, &winsize)
return 0, err
// Unimplemented commands.
case linux.TIOCSETD,
linux.TIOCSBRK,
linux.TIOCCBRK,
linux.TCSBRK,
linux.TCSBRKP,
linux.TIOCSTI,
linux.TIOCCONS,
linux.FIONBIO,
linux.TIOCEXCL,
linux.TIOCNXCL,
linux.TIOCGEXCL,
linux.TIOCNOTTY,
linux.TIOCSCTTY,
linux.TIOCGSID,
linux.TIOCGETD,
linux.TIOCVHANGUP,
linux.TIOCGDEV,
linux.TIOCMGET,
linux.TIOCMSET,
linux.TIOCMBIC,
linux.TIOCMBIS,
linux.TIOCGICOUNT,
linux.TCFLSH,
linux.TIOCSSERIAL,
linux.TIOCGPTPEER:
unimpl.EmitUnimplementedEvent(ctx, sysno)
fallthrough
default:
return 0, linuxerr.ENOTTY
}
}
// checkChange checks that the process group is allowed to read, write, or
// change the state of the TTY.
//
// This corresponds to Linux drivers/tty/tty_io.c:tty_check_change(). The logic
// is a bit convoluted, but documented inline.
//
// Preconditions: t.mu must be held.
func (t *TTYFileDescription) checkChange(ctx context.Context, sig linux.Signal) error {
task := kernel.TaskFromContext(ctx)
if task == nil {
// No task? Linux does not have an analog for this case, but
// tty_check_change only blocks specific cases and is
// surprisingly permissive. Allowing the change seems
// appropriate.
return nil
}
tg := task.ThreadGroup()
pg := tg.ProcessGroup()
// If the session for the task is different than the session for the
// controlling TTY, then the change is allowed. Seems like a bad idea,
// but that's exactly what linux does.
if tg.Session() != t.fgProcessGroup.Session() {
return nil
}
// If we are the foreground process group, then the change is allowed.
if pg == t.fgProcessGroup {
return nil
}
// We are not the foreground process group.
// Is the provided signal blocked or ignored?
if (task.SignalMask()&linux.SignalSetOf(sig) != 0) || tg.SignalHandlers().IsIgnored(sig) {
// If the signal is SIGTTIN, then we are attempting to read
// from the TTY. Don't send the signal and return EIO.
if sig == linux.SIGTTIN {
return linuxerr.EIO
}
// Otherwise, we are writing or changing terminal state. This is allowed.
return nil
}
// If the process group is an orphan, return EIO.
if pg.IsOrphan() {
return linuxerr.EIO
}
// Otherwise, send the signal to the process group and return ERESTARTSYS.
//
// Note that Linux also unconditionally sets TIF_SIGPENDING on current,
// but this isn't necessary in gVisor because the rationale given in
// 040b6362d58f "tty: fix leakage of -ERESTARTSYS to userland" doesn't
// apply: the sentry will handle -ERESTARTSYS in
// kernel.runApp.execute() even if the kernel.Task isn't interrupted.
//
// Linux ignores the result of kill_pgrp().
_ = pg.SendSignal(kernel.SignalInfoPriv(sig))
return linuxerr.ERESTARTSYS
}
|
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"os"
"os/exec"
"path/filepath"
"text/template"
)
var (
tmpl = template.Must(template.New("protoc").Parse(`protoc -I
{{- range $index, $include := .Includes -}}
{{if $index}}` + string(filepath.ListSeparator) + `{{end -}}
{{.}}
{{- end -}}
{{- if .Descriptors}} --include_imports --descriptor_set_out={{.Descriptors}}{{- end }} --
{{- .Name -}}_out={{if .Plugins}}plugins={{- range $index, $plugin := .Plugins -}}
{{- if $index}}+{{end}}
{{- $plugin}}
{{- end -}}
,{{- end -}}import_path={{.ImportPath}}
{{- range $proto, $gopkg := .PackageMap -}},M
{{- $proto}}={{$gopkg -}}
{{- end -}}
:{{- .OutputDir }}
{{- range .Files}} {{.}}{{end -}}
`))
)
// protocParams defines inputs to a protoc command string.
type protocCmd struct {
Name string // backend name
Includes []string
Plugins []string
Descriptors string
ImportPath string
PackageMap map[string]string
Files []string
OutputDir string
}
func (p *protocCmd) mkcmd() (string, error) {
var buf bytes.Buffer
if err := tmpl.Execute(&buf, p); err != nil {
return "", err
}
return buf.String(), nil
}
func (p *protocCmd) run() error {
arg, err := p.mkcmd()
if err != nil {
return err
}
// pass to sh -c so we don't need to re-split here.
args := []string{shArg, arg}
cmd := exec.Command(shCmd, args...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
return cmd.Run()
}
|
package testutil
import (
config_v2 "github.com/cyberark/secretless-broker/pkg/secretless/config/v2"
)
// GenerateConfigurations returns a Secretless Config along with a comprehensive
// list of LiveConfigurations for use in tests.
// TODO: consider parametrising ConnectPort generator
func GenerateConfigurations() (config_v2.Config, LiveConfigurations) {
// initialised with health-check listener and handler
secretlessConfig := config_v2.Config{
Services: []*config_v2.Service{
{
Debug: true,
Connector: "mysql",
ConnectorConfig: nil,
Credentials: []*config_v2.Credential{
{
Name: "host",
From: "literal",
Get: sampleDbConfig.HostWithTLS,
},
{
Name: "username",
From: "literal",
Get: sampleDbConfig.User,
},
{
Name: "password",
From: "literal",
Get: sampleDbConfig.Password,
},
},
ListenOn: "unix:///sock/mysql.sock",
Name: "health-check",
},
{
Debug: true,
Connector: "pg",
ConnectorConfig: nil,
Credentials: []*config_v2.Credential{
{
Name: "host",
From: "literal",
Get: "health-check",
},
{
Name: "port",
From: "literal",
Get: "3306",
},
{
Name: "username",
From: "literal",
Get: "health-check",
},
{
Name: "password",
From: "literal",
Get: "health-check",
},
},
ListenOn: "tcp://0.0.0.0:5432",
Name: "pg-bench",
},
},
}
liveConfigurations := make(LiveConfigurations, 0)
// TODO: Create a utility xprod function similar to the one here:
// https://stackoverflow.com/questions/29002724/implement-ruby-style-cartesian-product-in-go
// so we can avoid the nested for loops
//
// TODO: Remove "Value" suffixes -- no need for them, the lower case first letter
// distinguishes them from the type itself, so it only degrades readability.
portNumber := 3307
for _, serverTLSSetting := range AllTLSSettings() {
for _, socketType := range AllSocketTypes() {
for _, sslMode := range AllSSLModes() {
for _, sslHost := range AllSSLHosts() {
for _, publicCertStatus := range AllPublicCertStatuses() {
for _, privateKeyStatus := range AllPrivateKeyStatuses() {
for _, rootCertStatus := range AllRootCertStatuses() {
for _, areAuthCredentialsInvalid := range AllAuthCredentialsInvalidity() {
connectionPort := ConnectionPort{
// TODO: perhaps resolve this duplication of listener type
SocketType: socketType,
Port: portNumber,
}
name := "test_service_" + connectionPort.ToPortString()
credentials := areAuthCredentialsInvalid.toSecrets()
liveConfiguration := LiveConfiguration{
AbstractConfiguration: AbstractConfiguration{
SocketType: socketType,
TLSSetting: serverTLSSetting,
SSLHost: sslHost,
SSLMode: sslMode,
RootCertStatus: rootCertStatus,
PrivateKeyStatus: privateKeyStatus,
PublicCertStatus: publicCertStatus,
AuthCredentialInvalidity: areAuthCredentialsInvalid,
},
ConnectionPort: connectionPort,
}
credentials = append(
credentials,
// rootCertStatus
rootCertStatus.toSecret(),
//sslMode
sslMode.toSecret(),
//sslHost
sslHost.toSecret(),
//sslPrivateKeyTypeValue
privateKeyStatus.toSecret(),
//sslPublicCertTypeValue
publicCertStatus.toSecret(),
)
// serverTLSSetting
credentials = append(
credentials,
serverTLSSetting.toSecrets(sampleDbConfig)...,
)
// socketType
address := ""
switch socketType {
case TCP:
address = "tcp://0.0.0.0:" + connectionPort.ToPortString()
case Socket:
address = "unix://" + connectionPort.ToSocketPath()
}
svc := &config_v2.Service{
Debug: true,
// TODO: grab value from envvar for flexibility
Connector: sampleDbConfig.Protocol,
ConnectorConfig: nil,
Credentials: credentials,
ListenOn: config_v2.NetworkAddress(address),
Name: name,
}
secretlessConfig.Services = append(
secretlessConfig.Services,
svc)
liveConfigurations = append(liveConfigurations, liveConfiguration)
portNumber++
}
}
}
}
}
}
}
}
return secretlessConfig, liveConfigurations
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package video
import (
"context"
"time"
"chromiumos/tast/local/bundles/cros/video/playback"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/chrome/lacros"
"chromiumos/tast/local/tracing"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type playbackPerfParams struct {
fileName string
decoderType playback.DecoderType
browserType browser.Type
// Creates a layout of |gridWidth| x |gridHeight| videos for playback. Values
// less than 1 are clamped to a grid of 1x1.
gridWidth int
gridHeight int
// If set, trace system evens using perfetto during playback.
perfTracing bool
// If set, uses a longer video sequence which allows for measuring Media
// Devtools "playback roughness".
measureRoughness bool
}
func init() {
testing.AddTest(&testing.Test{
Func: PlaybackPerf,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Measures video playback performance in Chrome browser with/without HW acceleration",
Contacts: []string{
"mcasas@chromium.org",
"hiroh@chromium.org",
"chromeos-gfx-video@google.com",
},
Attr: []string{"group:graphics", "graphics_video", "graphics_perbuild"},
SoftwareDeps: []string{"chrome"},
Data: []string{"video.html", "playback.js",
tracing.TraceProcessorAmd64, tracing.TraceProcessorArm, tracing.TraceProcessorArm64,
playback.TraceConfigFile, playback.GPUThreadSchedSQLFile},
// Default timeout (i.e. 2 minutes) is not enough for low-end devices.
Timeout: 5 * time.Minute,
Params: []testing.Param{
// Parameters generated by playback_perf_test.go. DO NOT EDIT.
{
Name: "h264_720p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/h264/720p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30"},
ExtraData: []string{"perf/h264/720p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideo",
},
{
Name: "h264_720p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/h264/720p_30fps_300frames.h264.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs"},
ExtraData: []string{"perf/h264/720p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "h264_1080p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30"},
ExtraData: []string{"perf/h264/1080p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideo",
},
{
Name: "h264_1080p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_30fps_300frames.h264.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs"},
ExtraData: []string{"perf/h264/1080p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "h264_1080p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_60fps_600frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_60"},
ExtraData: []string{"perf/h264/1080p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideo",
},
{
Name: "h264_1080p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_60fps_600frames.h264.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs"},
ExtraData: []string{"perf/h264/1080p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "h264_2160p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_2160_30"},
ExtraData: []string{"perf/h264/2160p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideo",
},
{
Name: "h264_2160p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_30fps_300frames.h264.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs"},
ExtraData: []string{"perf/h264/2160p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "h264_2160p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_60fps_600frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_2160_60"},
ExtraData: []string{"perf/h264/2160p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideo",
},
{
Name: "h264_2160p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_60fps_600frames.h264.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs"},
ExtraData: []string{"perf/h264/2160p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp8_720p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp8/720p_30fps_300frames.vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_1080_30"},
ExtraData: []string{"perf/vp8/720p_30fps_300frames.vp8.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp8_720p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp8/720p_30fps_300frames.vp8.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp8/720p_30fps_300frames.vp8.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp8_1080p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp8/1080p_30fps_300frames.vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_1080_30"},
ExtraData: []string{"perf/vp8/1080p_30fps_300frames.vp8.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp8_1080p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp8/1080p_30fps_300frames.vp8.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp8/1080p_30fps_300frames.vp8.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp8_1080p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp8/1080p_60fps_600frames.vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_1080_60"},
ExtraData: []string{"perf/vp8/1080p_60fps_600frames.vp8.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp8_1080p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp8/1080p_60fps_600frames.vp8.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp8/1080p_60fps_600frames.vp8.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp8_2160p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp8/2160p_30fps_300frames.vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_2160_30"},
ExtraData: []string{"perf/vp8/2160p_30fps_300frames.vp8.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp8_2160p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp8/2160p_30fps_300frames.vp8.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp8/2160p_30fps_300frames.vp8.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp8_2160p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp8/2160p_60fps_600frames.vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_2160_60"},
ExtraData: []string{"perf/vp8/2160p_60fps_600frames.vp8.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp8_2160p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp8/2160p_60fps_600frames.vp8.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp8/2160p_60fps_600frames.vp8.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp9_720p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp9/720p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30"},
ExtraData: []string{"perf/vp9/720p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_720p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp9/720p_30fps_300frames.vp9.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp9/720p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp9_1080p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp9/1080p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30"},
ExtraData: []string{"perf/vp9/1080p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_1080p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp9/1080p_30fps_300frames.vp9.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp9/1080p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp9_1080p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp9/1080p_60fps_600frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_60"},
ExtraData: []string{"perf/vp9/1080p_60fps_600frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_1080p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp9/1080p_60fps_600frames.vp9.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp9/1080p_60fps_600frames.vp9.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp9_2160p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp9/2160p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_2160_30"},
ExtraData: []string{"perf/vp9/2160p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_2160p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp9/2160p_30fps_300frames.vp9.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp9/2160p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp9_2160p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp9/2160p_60fps_600frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_2160_60"},
ExtraData: []string{"perf/vp9/2160p_60fps_600frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_2160p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/vp9/2160p_60fps_600frames.vp9.webm",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/vp9/2160p_60fps_600frames.vp9.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp9_4320p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp9/4320p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_4320_30"},
ExtraData: []string{"perf/vp9/4320p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_4320p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/vp9/4320p_60fps_600frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_4320_60"},
ExtraData: []string{"perf/vp9/4320p_60fps_600frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "av1_720p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/av1/720p_30fps_300frames.av1.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_av1_1080_30"},
ExtraData: []string{"perf/av1/720p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithHWAV1Decoding",
},
{
Name: "av1_720p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/av1/720p_30fps_300frames.av1.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/av1/720p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "av1_1080p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/av1/1080p_30fps_300frames.av1.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_av1_1080_30"},
ExtraData: []string{"perf/av1/1080p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithHWAV1Decoding",
},
{
Name: "av1_1080p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/av1/1080p_30fps_300frames.av1.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/av1/1080p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "av1_1080p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/av1/1080p_60fps_600frames.av1.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_av1_1080_60"},
ExtraData: []string{"perf/av1/1080p_60fps_600frames.av1.mp4"},
Fixture: "chromeVideoWithHWAV1Decoding",
},
{
Name: "av1_1080p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/av1/1080p_60fps_600frames.av1.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/av1/1080p_60fps_600frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "av1_2160p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/av1/2160p_30fps_300frames.av1.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_av1_2160_30"},
ExtraData: []string{"perf/av1/2160p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithHWAV1Decoding",
},
{
Name: "av1_2160p_30fps_sw",
Val: playbackPerfParams{
fileName: "perf/av1/2160p_30fps_300frames.av1.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/av1/2160p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "av1_2160p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/av1/2160p_60fps_600frames.av1.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_av1_2160_60"},
ExtraData: []string{"perf/av1/2160p_60fps_600frames.av1.mp4"},
Fixture: "chromeVideoWithHWAV1Decoding",
},
{
Name: "av1_2160p_60fps_sw",
Val: playbackPerfParams{
fileName: "perf/av1/2160p_60fps_600frames.av1.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
},
ExtraData: []string{"perf/av1/2160p_60fps_600frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "hevc_720p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc/720p_30fps_300frames.hevc.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_1080_30"},
ExtraData: []string{"perf/hevc/720p_30fps_300frames.hevc.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc_1080p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc/1080p_30fps_300frames.hevc.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_1080_30"},
ExtraData: []string{"perf/hevc/1080p_30fps_300frames.hevc.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc_1080p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc/1080p_60fps_600frames.hevc.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_1080_60"},
ExtraData: []string{"perf/hevc/1080p_60fps_600frames.hevc.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc_2160p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc/2160p_30fps_300frames.hevc.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_2160_30"},
ExtraData: []string{"perf/hevc/2160p_30fps_300frames.hevc.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc_2160p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc/2160p_60fps_600frames.hevc.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_2160_60"},
ExtraData: []string{"perf/hevc/2160p_60fps_600frames.hevc.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc_4320p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc/4320p_30fps_300frames.hevc.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_4320_30"},
ExtraData: []string{"perf/hevc/4320p_30fps_300frames.hevc.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc_4320p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc/4320p_60fps_600frames.hevc.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_4320_60"},
ExtraData: []string{"perf/hevc/4320p_60fps_600frames.hevc.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc10_2160p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc10/2160p_30fps_300frames.hevc10.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_2160_30_10bpp"},
ExtraData: []string{"perf/hevc10/2160p_30fps_300frames.hevc10.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc10_4320p_30fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc10/4320p_30fps_300frames.hevc10.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_4320_30_10bpp"},
ExtraData: []string{"perf/hevc10/4320p_30fps_300frames.hevc10.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc10_2160p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc10/2160p_60fps_600frames.hevc10.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_2160_60_10bpp"},
ExtraData: []string{"perf/hevc10/2160p_60fps_600frames.hevc10.mp4"},
Fixture: "chromeVideo",
},
{
Name: "hevc10_4320p_60fps_hw",
Val: playbackPerfParams{
fileName: "perf/hevc10/4320p_60fps_600frames.hevc10.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_hevc_4320_60_10bpp"},
ExtraData: []string{"perf/hevc10/4320p_60fps_600frames.hevc10.mp4"},
Fixture: "chromeVideo",
},
{
Name: "h264_1080p_60fps_hw_alt",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_60fps_600frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_60", "video_decoder_legacy_supported"},
ExtraData: []string{"perf/h264/1080p_60fps_600frames.h264.mp4"},
Fixture: "chromeAlternateVideoDecoder",
},
{
Name: "vp8_1080p_60fps_hw_alt",
Val: playbackPerfParams{
fileName: "perf/vp8/1080p_60fps_600frames.vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_1080_60", "video_decoder_legacy_supported"},
ExtraData: []string{"perf/vp8/1080p_60fps_600frames.vp8.webm"},
Fixture: "chromeAlternateVideoDecoder",
},
{
Name: "vp9_1080p_60fps_hw_alt",
Val: playbackPerfParams{
fileName: "perf/vp9/1080p_60fps_600frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_60", "video_decoder_legacy_supported"},
ExtraData: []string{"perf/vp9/1080p_60fps_600frames.vp9.webm"},
Fixture: "chromeAlternateVideoDecoder",
},
{
Name: "vp9_2160p_60fps_hw_alt",
Val: playbackPerfParams{
fileName: "perf/vp9/2160p_60fps_600frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_2160_60", "video_decoder_legacy_supported"},
ExtraData: []string{"perf/vp9/2160p_60fps_600frames.vp9.webm"},
Fixture: "chromeAlternateVideoDecoder",
},
{
Name: "h264_1080p_30fps_hw_long",
Val: playbackPerfParams{
fileName: "crosvideo/1080.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "drm_atomic"},
ExtraData: []string{"crosvideo/1080.mp4"},
Fixture: "chromeVideo",
},
{
Name: "h264_1080p_30fps_sw_long",
Val: playbackPerfParams{
fileName: "crosvideo/1080.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"proprietary_codecs", "drm_atomic"},
ExtraData: []string{"crosvideo/1080.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp8_1080p_30fps_hw_long",
Val: playbackPerfParams{
fileName: "crosvideo/1080_vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_1080_30", "drm_atomic"},
ExtraData: []string{"crosvideo/1080_vp8.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp8_1080p_30fps_sw_long",
Val: playbackPerfParams{
fileName: "crosvideo/1080_vp8.webm",
decoderType: 1,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"drm_atomic"},
ExtraData: []string{"crosvideo/1080_vp8.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "vp9_1080p_30fps_hw_long",
Val: playbackPerfParams{
fileName: "crosvideo/1080.webm",
decoderType: 0,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "drm_atomic"},
ExtraData: []string{"crosvideo/1080.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_1080p_30fps_sw_long",
Val: playbackPerfParams{
fileName: "crosvideo/1080.webm",
decoderType: 1,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"drm_atomic"},
ExtraData: []string{"crosvideo/1080.webm"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "av1_1080p_30fps_hw_long",
Val: playbackPerfParams{
fileName: "crosvideo/av1_1080p_30fps.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_av1_1080_30", "drm_atomic"},
ExtraData: []string{"crosvideo/av1_1080p_30fps.mp4"},
Fixture: "chromeVideoWithHWAV1Decoding",
},
{
Name: "av1_1080p_30fps_sw_long",
Val: playbackPerfParams{
fileName: "crosvideo/av1_1080p_30fps.mp4",
decoderType: 1,
browserType: browser.TypeAsh,
measureRoughness: true,
},
ExtraHardwareDeps: hwdep.D(hwdep.SkipOnModel("hana", "elm"), hwdep.InternalDisplay()),
ExtraSoftwareDeps: []string{"drm_atomic"},
ExtraData: []string{"crosvideo/av1_1080p_30fps.mp4"},
Fixture: "chromeVideoWithSWDecoding",
},
{
Name: "h264_720p_30fps_hw_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/720p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30"},
ExtraData: []string{"perf/h264/720p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoOOPVD",
},
{
Name: "h264_1080p_30fps_hw_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30"},
ExtraData: []string{"perf/h264/1080p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoOOPVD",
},
{
Name: "h264_1080p_60fps_hw_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_60fps_600frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_60"},
ExtraData: []string{"perf/h264/1080p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideoOOPVD",
},
{
Name: "h264_2160p_30fps_hw_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_2160_30"},
ExtraData: []string{"perf/h264/2160p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoOOPVD",
},
{
Name: "h264_2160p_60fps_hw_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_60fps_600frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_2160_60"},
ExtraData: []string{"perf/h264/2160p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideoOOPVD",
},
{
Name: "h264_720p_30fps_hw_lacros_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/720p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeLacros,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "lacros"},
ExtraData: []string{"perf/h264/720p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoLacrosOOPVD",
},
{
Name: "h264_1080p_30fps_hw_lacros_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeLacros,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "lacros"},
ExtraData: []string{"perf/h264/1080p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoLacrosOOPVD",
},
{
Name: "h264_1080p_60fps_hw_lacros_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_60fps_600frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeLacros,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_60", "lacros"},
ExtraData: []string{"perf/h264/1080p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideoLacrosOOPVD",
},
{
Name: "h264_2160p_30fps_hw_lacros_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeLacros,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_2160_30", "lacros"},
ExtraData: []string{"perf/h264/2160p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoLacrosOOPVD",
},
{
Name: "h264_2160p_60fps_hw_lacros_oopvd",
Val: playbackPerfParams{
fileName: "perf/h264/2160p_60fps_600frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeLacros,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_2160_60", "lacros"},
ExtraData: []string{"perf/h264/2160p_60fps_600frames.h264.mp4"},
Fixture: "chromeVideoLacrosOOPVD",
},
{
Name: "h264_720p_30fps_hw_3x3",
Val: playbackPerfParams{
fileName: "perf/h264/720p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30"},
ExtraData: []string{"perf/h264/720p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideo",
},
{
Name: "vp8_720p_30fps_hw_3x3",
Val: playbackPerfParams{
fileName: "perf/vp8/720p_30fps_300frames.vp8.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp8_1080_30"},
ExtraData: []string{"perf/vp8/720p_30fps_300frames.vp8.webm"},
Fixture: "chromeVideo",
},
{
Name: "vp9_720p_30fps_hw_3x3",
Val: playbackPerfParams{
fileName: "perf/vp9/720p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30"},
ExtraData: []string{"perf/vp9/720p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideo",
},
{
Name: "av1_720p_30fps_hw_3x3",
Val: playbackPerfParams{
fileName: "perf/av1/720p_30fps_300frames.av1.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_av1_1080_30"},
ExtraData: []string{"perf/av1/720p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithHWAV1Decoding",
},
{
Name: "h264_720p_30fps_hw_lacros",
Val: playbackPerfParams{
fileName: "perf/h264/720p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeLacros,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "lacros"},
ExtraData: []string{"perf/h264/720p_30fps_300frames.h264.mp4"},
Fixture: "chromeVideoLacros",
},
{
Name: "vp9_720p_30fps_hw_lacros",
Val: playbackPerfParams{
fileName: "perf/vp9/720p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeLacros,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "lacros"},
ExtraData: []string{"perf/vp9/720p_30fps_300frames.vp9.webm"},
Fixture: "chromeVideoLacros",
},
{
Name: "av1_720p_30fps_sw_gav1",
Val: playbackPerfParams{
fileName: "perf/av1/720p_30fps_300frames.av1.mp4",
decoderType: 2,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"arm"},
ExtraData: []string{"perf/av1/720p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecodingAndLibGAV1",
},
{
Name: "av1_720p_60fps_sw_gav1",
Val: playbackPerfParams{
fileName: "perf/av1/720p_60fps_600frames.av1.mp4",
decoderType: 2,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"arm"},
ExtraData: []string{"perf/av1/720p_60fps_600frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecodingAndLibGAV1",
},
{
Name: "av1_1080p_30fps_sw_gav1",
Val: playbackPerfParams{
fileName: "perf/av1/1080p_30fps_300frames.av1.mp4",
decoderType: 2,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"arm"},
ExtraData: []string{"perf/av1/1080p_30fps_300frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecodingAndLibGAV1",
},
{
Name: "av1_1080p_60fps_sw_gav1",
Val: playbackPerfParams{
fileName: "perf/av1/1080p_60fps_600frames.av1.mp4",
decoderType: 2,
browserType: browser.TypeAsh,
},
ExtraSoftwareDeps: []string{"arm"},
ExtraData: []string{"perf/av1/1080p_60fps_600frames.av1.mp4"},
Fixture: "chromeVideoWithSWDecodingAndLibGAV1",
},
{
Name: "h264_1080p_30fps_hw_x2_1threads",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 2,
gridHeight: 1,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/1080p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith1DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_1080p_30fps_hw_x2_2threads",
Val: playbackPerfParams{
fileName: "perf/h264/1080p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 2,
gridHeight: 1,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/1080p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith2DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_480p_30fps_hw_x9_1threads",
Val: playbackPerfParams{
fileName: "perf/h264/480p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/480p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith1DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_480p_30fps_hw_x9_2threads",
Val: playbackPerfParams{
fileName: "perf/h264/480p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/480p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith2DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_480p_30fps_hw_x9_4threads",
Val: playbackPerfParams{
fileName: "perf/h264/480p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/480p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith4DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_480p_30fps_hw_x9_9threads",
Val: playbackPerfParams{
fileName: "perf/h264/480p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/480p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith9DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_360p_30fps_hw_x16_1threads",
Val: playbackPerfParams{
fileName: "perf/h264/360p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/360p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith1DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_360p_30fps_hw_x16_2threads",
Val: playbackPerfParams{
fileName: "perf/h264/360p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/360p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith2DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_360p_30fps_hw_x16_4threads",
Val: playbackPerfParams{
fileName: "perf/h264/360p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/360p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith4DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_360p_30fps_hw_x16_9threads",
Val: playbackPerfParams{
fileName: "perf/h264/360p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/360p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith9DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "h264_360p_30fps_hw_x16_16threads",
Val: playbackPerfParams{
fileName: "perf/h264/360p_30fps_300frames.h264.mp4",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"proprietary_codecs", "autotest-capability:hw_dec_h264_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/h264/360p_30fps_300frames.h264.mp4"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith16DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_1080p_30fps_hw_x2_1threads",
Val: playbackPerfParams{
fileName: "perf/vp9/1080p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 2,
gridHeight: 1,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/1080p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith1DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_1080p_30fps_hw_x2_2threads",
Val: playbackPerfParams{
fileName: "perf/vp9/1080p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 2,
gridHeight: 1,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/1080p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith2DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_480p_30fps_hw_x9_1threads",
Val: playbackPerfParams{
fileName: "perf/vp9/480p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/480p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith1DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_480p_30fps_hw_x9_2threads",
Val: playbackPerfParams{
fileName: "perf/vp9/480p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/480p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith2DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_480p_30fps_hw_x9_4threads",
Val: playbackPerfParams{
fileName: "perf/vp9/480p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/480p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith4DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_480p_30fps_hw_x9_9threads",
Val: playbackPerfParams{
fileName: "perf/vp9/480p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 3,
gridHeight: 3,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/480p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith9DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_360p_30fps_hw_x16_1threads",
Val: playbackPerfParams{
fileName: "perf/vp9/360p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/360p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith1DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_360p_30fps_hw_x16_2threads",
Val: playbackPerfParams{
fileName: "perf/vp9/360p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/360p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith2DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_360p_30fps_hw_x16_4threads",
Val: playbackPerfParams{
fileName: "perf/vp9/360p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/360p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith4DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_360p_30fps_hw_x16_9threads",
Val: playbackPerfParams{
fileName: "perf/vp9/360p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/360p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith9DecoderThreadsAndGlobalVaapiLockDisabled",
},
{
Name: "vp9_360p_30fps_hw_x16_16threads",
Val: playbackPerfParams{
fileName: "perf/vp9/360p_30fps_300frames.vp9.webm",
decoderType: 0,
browserType: browser.TypeAsh,
gridWidth: 4,
gridHeight: 4,
perfTracing: true,
},
ExtraSoftwareDeps: []string{"autotest-capability:hw_dec_vp9_1080_30", "thread_safe_libva_backend"},
ExtraData: []string{"perf/vp9/360p_30fps_300frames.vp9.webm"},
ExtraAttr: []string{"group:graphics", "graphics_video", "graphics_nightly"},
Fixture: "chromeVideoWith16DecoderThreadsAndGlobalVaapiLockDisabled",
},
},
})
}
// PlaybackPerf plays a video in the Chrome browser and measures the performance with or without
// HW decode acceleration as per DecoderType. The values are reported to the performance dashboard.
func PlaybackPerf(ctx context.Context, s *testing.State) {
testOpt := s.Param().(playbackPerfParams)
_, l, cs, err := lacros.Setup(ctx, s.FixtValue(), testOpt.browserType)
if err != nil {
s.Fatal("Failed to initialize test: ", err)
}
defer lacros.CloseLacros(ctx, l)
cr := s.FixtValue().(chrome.HasChrome).Chrome()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
dispInfo, err := display.GetPrimaryInfo(ctx, tconn)
if err != nil {
s.Fatal("Failed to get primary display info: ", err)
}
origShelfBehavior, err := ash.GetShelfBehavior(ctx, tconn, dispInfo.ID)
if err != nil {
s.Fatal("Failed to get shelf behavior: ", err)
}
if err := ash.SetShelfBehavior(ctx, tconn, dispInfo.ID, ash.ShelfBehaviorAlwaysAutoHide); err != nil {
s.Fatal("Failed to set shelf behavior to Never Auto Hide: ", err)
}
defer ash.SetShelfBehavior(ctx, tconn, dispInfo.ID, origShelfBehavior)
playback.RunTest(ctx, s, cs, cr, testOpt.fileName, testOpt.decoderType,
testOpt.gridWidth, testOpt.gridHeight, testOpt.perfTracing, testOpt.measureRoughness)
}
|
package acl
import (
"encoding/json"
"errors"
"fmt"
"github.com/xuperchain/xupercore/kernel/permission/acl/base"
actx "github.com/xuperchain/xupercore/kernel/permission/acl/context"
"github.com/xuperchain/xupercore/kernel/permission/acl/utils"
pb "github.com/xuperchain/xupercore/protos"
)
// Manager manages all ACL releated data, providing read/write interface for ACL table
type Manager struct {
Ctx *actx.AclCtx
}
// NewACLManager create instance of ACLManager
func NewACLManager(ctx *actx.AclCtx) (base.AclManager, error) {
if ctx == nil || ctx.Ledger == nil || ctx.Contract == nil || ctx.BcName == "" {
return nil, fmt.Errorf("acl ctx set error")
}
newAccountGas, err := ctx.Ledger.GetNewAccountGas()
if err != nil {
return nil, fmt.Errorf("get create account gas failed.err:%v", err)
}
t := NewKernContractMethod(ctx.BcName, newAccountGas)
register := ctx.Contract.GetKernRegistry()
register.RegisterKernMethod(utils.SubModName, "NewAccount", t.NewAccount)
register.RegisterKernMethod(utils.SubModName, "SetAccountAcl", t.SetAccountACL)
register.RegisterKernMethod(utils.SubModName, "SetMethodAcl", t.SetMethodACL)
register.RegisterShortcut("NewAccount", utils.SubModName, "NewAccount")
register.RegisterShortcut("SetAccountAcl", utils.SubModName, "SetAccountAcl")
register.RegisterShortcut("SetMethodAcl", utils.SubModName, "SetMethodAcl")
mg := &Manager{
Ctx: ctx,
}
return mg, nil
}
// GetAccountACL get acl of an account
func (mgr *Manager) GetAccountACL(accountName string) (*pb.Acl, error) {
acl, err := mgr.GetObjectBySnapshot(utils.GetAccountBucket(), []byte(accountName))
if err != nil {
return nil, fmt.Errorf("query account acl failed.err:%v", err)
}
if len(acl) <= 0 {
return nil, nil
}
aclBuf := &pb.Acl{}
err = json.Unmarshal(acl, aclBuf)
if err != nil {
return nil, fmt.Errorf("json unmarshal acl failed.acl:%s,err:%v", string(acl), err)
}
return aclBuf, nil
}
// GetContractMethodACL get acl of contract method
func (mgr *Manager) GetContractMethodACL(contractName, methodName string) (*pb.Acl, error) {
key := utils.MakeContractMethodKey(contractName, methodName)
acl, err := mgr.GetObjectBySnapshot(utils.GetContractBucket(), []byte(key))
if err != nil {
return nil, fmt.Errorf("query contract method acl failed.err:%v", err)
}
if len(acl) <= 0 {
return nil, nil
}
aclBuf := &pb.Acl{}
err = json.Unmarshal(acl, aclBuf)
if err != nil {
return nil, fmt.Errorf("json unmarshal acl failed.acl:%s,err:%v", string(acl), err)
}
return aclBuf, nil
}
// GetAccountAddresses get the addresses belongs to contract account
func (mgr *Manager) GetAccountAddresses(accountName string) ([]string, error) {
acl, err := mgr.GetAccountACL(accountName)
if err != nil {
return nil, err
}
return mgr.getAddressesByACL(acl)
}
func (mgr *Manager) GetObjectBySnapshot(bucket string, object []byte) ([]byte, error) {
// 根据tip blockid 创建快照
reader, err := mgr.Ctx.Ledger.GetTipXMSnapshotReader()
if err != nil {
return nil, err
}
return reader.Get(bucket, object)
}
func (mgr *Manager) getAddressesByACL(acl *pb.Acl) ([]string, error) {
addresses := make([]string, 0)
switch acl.GetPm().GetRule() {
case pb.PermissionRule_SIGN_THRESHOLD:
for ak := range acl.GetAksWeight() {
addresses = append(addresses, ak)
}
case pb.PermissionRule_SIGN_AKSET:
for _, set := range acl.GetAkSets().GetSets() {
aks := set.GetAks()
addresses = append(addresses, aks...)
}
default:
return nil, errors.New("Unknown permission rule")
}
return addresses, nil
}
|
package twoDimensionalSliceUnwinder
import (
"fmt"
"math/rand"
"strconv"
)
func Unwind(args []string) []int {
var unwind []int
matrix := composeTwoDimensionalSlice(args)
_, unwind = unwindTwoDimensionalSlice(matrix, unwind)
return unwind
}
func unwindTwoDimensionalSlice(twoDimensional [][]int, unwind []int) ([][]int, []int) {
head, tail := twoDimensional[0], twoDimensional[1:]
unwind = append(unwind, head...)
slicedValues := make([][]int, len(tail[0]))
for index, sliceValue := range tail {
for i := range sliceValue {
if len(slicedValues[i]) <= 0 {
slicedValues[i] = make([]int, len(tail))
}
slicedValues[i][index] = sliceValue[i]
}
}
for i := len(slicedValues)/2 - 1; i >= 0; i-- {
opp := len(slicedValues) - 1 - i
slicedValues[i], slicedValues[opp] = slicedValues[opp], slicedValues[i]
}
if 1 == len(slicedValues) {
unwind = append(unwind, slicedValues[0]...)
return slicedValues, unwind
}
return unwindTwoDimensionalSlice(slicedValues, unwind)
}
func composeTwoDimensionalSlice(args []string) [][]int {
if 0 == len(args) {
return createDefaultTwoDimensionalSlice()
}
n, err := strconv.Atoi(args[:1][0])
if err != nil || 1 == n {
fmt.Printf("The input arg %v is incorrect", n)
fmt.Println()
return createDefaultTwoDimensionalSlice()
}
matrix := make([][]int, n)
for i := 0; i < n; i++ {
rows := make([]int, n)
for index := range rows {
rows[index] = rand.Intn(10)
}
matrix[i] = rows
}
fmt.Printf("The matrix %v was created", matrix)
fmt.Println()
return matrix
}
func createDefaultTwoDimensionalSlice() [][]int {
twoDimensionalSlice := [][]int{
{1, 2, 3, 1},
{4, 5, 6, 4},
{7, 8, 9, 7},
{7, 8, 9, 7},
}
fmt.Println("Default 2D slice was created:")
fmt.Println()
fmt.Printf("%v", twoDimensionalSlice)
fmt.Println()
return twoDimensionalSlice
}
|
package emitter
import (
"github.com/olebedev/emitter"
)
var e = &emitter.Emitter{}
type (
Event = emitter.Event
Group = emitter.Group
)
func New(capacity uint) *emitter.Emitter {
e = emitter.New(capacity)
return e
}
// Use registers middlewares for the pattern.
func Use(pattern string, middlewares ...func(*Event)) {
e.Use(pattern, middlewares...)
}
// On returns a channel that will receive events. As optional second
// argument it takes middlewares.
func On(topic string, middlewares ...func(*Event)) <-chan Event {
return e.On(topic, middlewares...)
}
// Once works exactly like On(see above) but with `Once` as the first middleware.
func Once(topic string, middlewares ...func(*Event)) <-chan Event {
return e.Once(topic, middlewares...)
}
// Off unsubscribes all listeners which were covered by
// topic, it can be pattern as well.
func Off(topic string, channels ...<-chan Event) {
e.Off(topic, channels...)
}
// Listeners returns slice of listeners which were covered by
// topic(it can be pattern) and error if pattern is invalid.
func Listeners(topic string) []<-chan Event {
return e.Listeners(topic)
}
// Topics returns all existing topics.
func Topics() []string {
return e.Topics()
}
// Emit emits an event with the rest arguments to all
// listeners which were covered by topic(it can be pattern).
func Emit(topic string, args ...interface{}) chan struct{} {
return e.Emit(topic, args...)
}
// ResetFlag middleware resets flags
func ResetFlag(evt *Event) { evt.Flags = emitter.FlagReset }
// Once middleware sets FlagOnce flag for an event
func FlagOnce(evt *Event) { evt.Flags = evt.Flags | emitter.FlagOnce }
// Void middleware sets FlagVoid flag for an event
func FlagVoid(evt *Event) { evt.Flags = evt.Flags | emitter.FlagVoid }
// Skip middleware sets FlagSkip flag for an event
func FlagSkip(evt *Event) { evt.Flags = evt.Flags | emitter.FlagSkip }
// Close middleware sets FlagClose flag for an event
func FlagClose(evt *Event) { evt.Flags = evt.Flags | emitter.FlagClose }
// Sync middleware sets FlagSync flag for an event
func FlagSync(evt *Event) { evt.Flags = evt.Flags | emitter.FlagSync }
|
package commander
import (
"errors"
"fmt"
"net/http"
"strings"
"github.com/gempir/gempbot/internal/config"
"github.com/gempir/gempbot/internal/dto"
"github.com/gempir/gempbot/internal/helixclient"
"github.com/gempir/gempbot/internal/humanize"
"github.com/gempir/gempbot/internal/log"
"github.com/gempir/gempbot/internal/store"
"github.com/gempir/go-twitch-irc/v4"
"github.com/nicklaw5/helix/v2"
)
type Handler struct {
cfg *config.Config
db *store.Database
helixClient helixclient.Client
chatSay func(channel, message string)
}
func NewHandler(cfg *config.Config, helixClient helixclient.Client, db *store.Database, chatSay func(channel, message string)) *Handler {
return &Handler{
cfg: cfg,
db: db,
helixClient: helixClient,
chatSay: chatSay,
}
}
// !prediction Will nymn win this game?;yes;no;3m --> yes;no;3m
// !prediction Will he win --> yes;no;1m
// !prediction Will he win;maybe --> maybe;no;1m
func (h *Handler) HandleCommand(payload dto.CommandPayload) {
switch payload.Name {
case dto.CmdNameOutcome:
h.setOutcomeForPrediction(payload)
case dto.CmdNamePrediction:
h.handlePrediction(payload)
}
}
func (h *Handler) handlePrediction(payload dto.CommandPayload) {
if strings.ToLower(payload.Query) == "lock" {
h.lockOrCancelPrediction(payload, dto.PredictionStatusLocked)
return
}
if strings.ToLower(payload.Query) == "cancel" {
h.lockOrCancelPrediction(payload, dto.PredictionStatusCanceled)
return
}
h.startPrediction(payload)
}
func (h *Handler) lockOrCancelPrediction(payload dto.CommandPayload, status string) {
resp, err := h.helixClient.GetPredictions(&helix.PredictionsParams{BroadcasterID: payload.Msg.RoomID})
if err != nil {
log.Error(err)
h.handleError(payload.Msg, err)
return
}
prediction := resp.Data.Predictions[0]
token, err := h.db.GetUserAccessToken(payload.Msg.RoomID)
if err != nil {
h.handleError(payload.Msg, errors.New("no api token, broadcaster needs to login again in dashboard"))
return
}
h.helixClient.SetUserAccessToken(token.AccessToken)
resp, err = h.helixClient.EndPrediction(&helix.EndPredictionParams{BroadcasterID: payload.Msg.RoomID, ID: prediction.ID, Status: status})
h.helixClient.SetUserAccessToken("")
if err != nil {
log.Error(err)
h.handleError(payload.Msg, errors.New("bad twitch api response"))
return
}
log.Infof("[helix] %d CancelOrLockPrediction %s", resp.StatusCode, payload.Msg.RoomID)
if resp.StatusCode >= http.StatusBadRequest {
h.handleError(payload.Msg, fmt.Errorf("bad twitch api response %s", resp.ErrorMessage))
return
}
}
func (h *Handler) setOutcomeForPrediction(payload dto.CommandPayload) {
var winningOutcome helix.Outcomes
resp, err := h.helixClient.GetPredictions(&helix.PredictionsParams{BroadcasterID: payload.Msg.RoomID})
if err != nil {
log.Error(err)
h.handleError(payload.Msg, err)
return
}
prediction := resp.Data.Predictions[0]
for index, outcome := range prediction.Outcomes {
if strings.EqualFold(outcome.Title, payload.Query) || fmt.Sprintf("%d", index+1) == payload.Query {
winningOutcome = outcome
break
}
}
if winningOutcome.ID == "" {
h.handleError(payload.Msg, errors.New("outcome not found"))
return
}
_, err = h.helixClient.EndPrediction(&helix.EndPredictionParams{BroadcasterID: payload.Msg.RoomID, ID: prediction.ID, Status: dto.PredictionStatusResolved, WinningOutcomeID: winningOutcome.ID})
if err != nil {
log.Error(err)
h.handleError(payload.Msg, errors.New("bad twitch api response"))
return
}
}
func (h *Handler) startPrediction(payload dto.CommandPayload) {
split := strings.Split(payload.Query, ";")
if len(split) < 1 {
h.handleError(payload.Msg, errors.New("no title given"))
return
}
title := strings.TrimSpace(split[0])
predictionWindow := 60
if len(split) >= 2 {
var err error
predictionWindow, err = humanize.StringToSeconds(strings.TrimSpace(split[1]))
if err != nil {
log.Error(err)
h.handleError(payload.Msg, errors.New("failed to parse time"))
return
}
}
outcomes := []helix.PredictionChoiceParam{}
if len(split) >= 3 {
for _, outcome := range split[2:] {
outcomes = append(outcomes, helix.PredictionChoiceParam{
Title: outcome,
})
}
}
if len(outcomes) == 0 {
outcomes = append(outcomes, helix.PredictionChoiceParam{
Title: "yes",
})
}
if len(outcomes) == 1 {
outcomes = append(outcomes, helix.PredictionChoiceParam{
Title: "no",
})
}
prediction := &helix.CreatePredictionParams{
BroadcasterID: payload.Msg.RoomID,
Title: title,
Outcomes: outcomes,
PredictionWindow: predictionWindow,
}
_, err := h.helixClient.CreatePrediction(prediction)
if err != nil {
log.Error(err)
h.handleError(payload.Msg, errors.New(err.Error()))
return
}
}
func (h *Handler) handleError(msg twitch.PrivateMessage, err error) {
h.chatSay(msg.Channel, fmt.Sprintf("@%s %s", msg.User.DisplayName, err))
}
|
package main
import (
"bytes"
"flag"
"goout"
"net"
)
var addr string
func handleTCP(tcp *net.TCPConn) {
var ioBuffer bytes.Buffer
var tcpWithTarget *net.TCPConn
for {
req, ok := goout.ParseHttpRequest(tcp, &ioBuffer)
if !ok {
tcp.Close()
if tcpWithTarget != nil {
tcpWithTarget.Close()
}
return
}
path := req.Url
if path == "/conn" {
targetHost := string(req.Body)
tcpAddr, err := net.ResolveTCPAddr("tcp4", targetHost)
if err != nil {
return
}
//repeat connect
if tcpWithTarget != nil {
tcpWithTarget.Close()
return
}
tcpWithTarget, err = net.DialTCP("tcp4", nil, tcpAddr)
if err != nil {
return
}
_, err = goout.WriteHttpResponse(tcp, []byte("Done"))
if err != nil {
return
}
//Recv from remote
go func(target *net.TCPConn, proxyClient *net.TCPConn) {
for {
var buff [10485]byte
//target.SetReadDeadline(time.Now().Add(time.Second * 300))
n, err := target.Read(buff[:])
if err != nil {
target.Close()
proxyClient.Close()
return
}
n, err = goout.WriteHttpResponse(proxyClient, buff[:n])
if err != nil {
target.Close()
proxyClient.Close()
return
}
}
}(tcpWithTarget, tcp)
} else if path == "/send" {
_, err := tcpWithTarget.Write(req.Body)
if err != nil {
tcpWithTarget.Close()
tcp.Close()
return
}
} else if path == "/" {
goout.LogInfo(tcp.RemoteAddr().String() + "-" + tcp.LocalAddr().String())
_, err := goout.WriteHttpResponseWithCt(tcp, []byte("Hello,GFW"), "text/plain; charset=utf-8")
if err != nil {
tcpWithTarget.Close()
return
}
return
}
}
}
func startServer() {
ta, _ := net.ResolveTCPAddr("tcp4", addr)
tc, err := net.ListenTCP("tcp4", ta)
if err != nil {
goout.LogError(err)
panic(err)
}
for {
client, err := tc.AcceptTCP()
if err == nil && client != nil {
go handleTCP(client)
} else if client != nil {
client.Close()
}
}
}
func main() {
flag.StringVar(&addr, "addr", ":80", "server bind address")
flag.Parse()
startServer()
}
|
package levenshteinsearch
import "testing"
func TestSearch(t *testing.T) {
dict := CreateDictionary()
dict.Put("banana")
dict.Put("orange")
dict.Put("monkey")
result := dict.SearchAll("banana", 1)
for word := range result {
if word != "banana" {
t.Error("Expected to find 'banana' with a distance of 1")
}
}
result = dict.SearchAll("banan", 1)
for word := range result {
if word != "banana" {
t.Error("Expected to find 'banan' with a distance of 1")
}
}
result = dict.SearchAll("a", 5)
if len(result) != 2 {
t.Error("Expected to find 'banana' and 'orange' with a distance of 5")
}
result = dict.SearchAll("a", 6)
if len(result) != 3 {
t.Error("Expected to find 'banana', 'orange' and 'monkey' with a distance of 6")
}
}
|
package indexer
import (
"fmt"
"sort"
log "github.com/sirupsen/logrus"
)
type MultipleDefinitionLoader []DefinitionLoader
func defaultMultiLoader() *MultipleDefinitionLoader {
return &MultipleDefinitionLoader{
defaultFsLoader(),
embeddedLoader(),
// escLoader{http.Dir("")},
}
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func (ml MultipleDefinitionLoader) List(selector *Selector) ([]string, error) {
allResults := map[string]struct{}{}
for _, loader := range ml {
result, err := loader.List(selector)
if err != nil {
return nil, err
}
for _, val := range result {
allResults[val] = struct{}{}
}
}
results := make([]string, len(allResults))
i := 0
for key := range allResults {
results[i] = key
i++
}
sort.Strings(results)
log.WithFields(log.Fields{"results": results, "loader": ml}).
Debug("Multiple definitions loader listed indexesCollection")
return results, nil
}
func (ml MultipleDefinitionLoader) String() string {
str := ""
for ix, loader := range ml {
if ix > 0 {
str += ", "
}
str += fmt.Sprintf("%s", loader)
}
return "loaders[" + str + "]"
}
// Load an indexer with the matching name
func (ml MultipleDefinitionLoader) Load(key string) (*Definition, error) {
var def *Definition
// Go over each loader, until we reach the one that contains the definition for the indexer.
for _, loader := range ml {
if loader == nil {
continue
}
loaded, err := loader.Load(key)
if err != nil {
log.Debugf("Couldn't load the Index `%s` using %s. Error : %s\n", key, loader, err)
continue
}
// If it's newer than our last one
if def == nil || loaded.Stats().ModTime.After(def.Stats().ModTime) { // If no definition is loaded so far, or the new one is newer
def = loaded
}
}
if def == nil {
log.Infof("No loaders managed to load Index `%s` from any of these locations: \n", key)
for _, ldr := range ml {
log.Infof("%s\n", ldr)
}
return nil, ErrUnknownIndexer
}
return def, nil
}
|
package main
import "errors"
const (
EmptyMode = ""
FilterMode = "filter"
AllMode = "all"
//action
SubscribeAction = "subscribe"
UnsubscribeAction = "unsubscribe"
)
var (
SpecificHeight = Condition{
Key: "tx.height",
Operation: "Equal",
Value: 5,
}
SpecificHash = Condition{
Key: "tx.hash",
Operation: "Equal",
Value: "4u2nd2",
}
QueryAllTx = ActionMode{
Mode: "filter",
Action: "subscribe",
Topic: "Tx",
Feature: nil,
}
QueryTxSpecificHeight = ActionMode{
Mode: "filter",
Action: "subscribe",
Topic: "Tx",
Feature: []Condition{SpecificHeight},
}
QueryTxSpecificHash = ActionMode{
Mode: "filter",
Action: "subscribe",
Topic: "Tx",
Feature: []Condition{SpecificHash},
}
QueryBlock = ActionMode{
Mode: "filter",
Action: "subscribe",
Topic: "NewBlock",
Feature: nil,
}
QueryCompleteProposal = ActionMode{
Mode: "filter",
Action: "subscribe",
Topic: "CompleteProposal",
Feature: nil,
}
UnsubscribeAll = ActionMode{
Mode: "all",
Action: "unsubscribe",
Topic: "",
Feature: nil,
}
)
type Condition struct {
Key string `json:"key"`
Operation string `json:"operation"`
Value interface{} `json:"value"`
}
type ActionMode struct {
Mode string `json:"mode"`
Action string `json:"action"`
Topic string `json:"topic"`
Feature []Condition `json:"feature"`
}
func (am *ActionMode) ValidateBasic() (bool, error) {
if am.Mode == AllMode && am.Action == SubscribeAction {
return false, errors.New("the mode of subscribe action can't be all")
}
if am.Action == SubscribeAction && am.Mode == EmptyMode {
am.Mode = FilterMode
}
if am.Action == UnsubscribeAction && am.Mode == EmptyMode {
return false, errors.New("the mode of unsubscribe action can't be empty")
}
return true, nil
}
|
package main
import (
"sort"
"strings"
"testing"
)
func TestHierarchy(t *testing.T) {
for k, v := range map[string]string{
"ab | ae | bc": "a [b [c], e]",
"ab | bc | cd | ae | cx | xz": "a [b [c [d, x [z]]], e]"} {
if r := hierarchy(k); r != v {
t.Errorf("failed: hierarchy %s is %s, got %s",
k, v, r)
}
}
}
type uint8s []uint8
func (s uint8s) Len() int { return len(s) }
func (s uint8s) Less(i, j int) bool {
return s[i] < s[j]
}
func (s uint8s) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func contains(a []uint8, b uint8) bool {
for _, i := range a {
if b == i {
return true
}
}
return false
}
func hierarchy(q string) string {
var root uint8
m := make(map[uint8][]uint8)
parents, children := []uint8{}, []uint8{}
s := strings.Split(q, " | ")
for _, i := range s {
m[i[0]] = append(m[i[0]], i[1])
if !contains(parents, i[0]) {
parents = append(parents, i[0])
}
if !contains(children, i[1]) {
children = append(children, i[1])
}
}
sort.Sort(uint8s(parents))
sort.Sort(uint8s(children))
for k := range m {
sort.Sort(uint8s(m[k]))
}
for _, i := range parents {
if !contains(children, i) {
root = i
break
}
}
r, c := string(root), root
children = []uint8{}
for len(m) > 0 {
t := string(c)
if len(m[c]) >= 1 {
children = append(children, m[c]...)
s = make([]string, len(m[c]))
for ix, i := range m[c] {
s[ix] = string(i)
}
t += " [" + strings.Join(s, ", ") + "]"
}
r = strings.Replace(r, string(c), t, -1)
delete(m, c)
for _, i := range children {
if len(m[i]) > 0 {
c = i
break
}
}
}
return r
}
|
//
// Copyright (c) SAS Institute Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package signxap
import "github.com/sassoftware/relic/v7/lib/authenticode"
const (
trailerMagic = 0x53706158 // XapS
)
var (
SpcUUIDSipInfoXap = []byte{0x6F, 0xA6, 0x08, 0xBA, 0x3B, 0x11, 0x58, 0x4D, 0x93, 0x29, 0xA1, 0xB3, 0x7A, 0xF3, 0x0F, 0x0E}
xapSipInfo = authenticode.SpcSipInfo{A: 1, UUID: SpcUUIDSipInfoXap}
)
type xapTrailer struct {
Magic uint32
Unknown1 uint16
TrailerSize uint32
}
type xapHeader struct {
Unknown1, Unknown2 uint16
SignatureSize uint32
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package gio contains functions and structs used for testing the gaming input overlay.
package gio
import (
"context"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/common/android/ui"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/bundles/cros/arc/inputlatency"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/coords"
"chromiumos/tast/local/cpu"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/input"
"chromiumos/tast/local/screenshot"
"chromiumos/tast/testing"
)
const (
apk = "ArcInputOverlayTest.apk"
pkg = "org.chromium.arc.testapp.inputoverlay"
cls = "org.chromium.arc.testapp.inputoverlay.MainActivity"
// inputOverlayFilename is the directory where input overlay files are stored.
inputOverlayFilename = "google_gio"
// cleanupOnErrorTime reserves time for cleanup in case of an error.
cleanupOnErrorTime = time.Second * 30
// errorMargin denotes the allowable +/- difference from the calculated x and
// y coordinate.
errorMargin = 3
// WaitForActiveInputTime reserves time between and for hold-release controls
// to ensure stability.
WaitForActiveInputTime = time.Second
// tapMode is the number of expected logcat lines from a tap event.
tapMode mode = 2
// moveMode is the number of expected logcat lines from a press-release event.
moveMode mode = 3
)
var (
// TopTap denotes the heuristics for the top tap input overlay mapping.
TopTap = ButtonHeuristics{0.5, 0.5}
// BotTap denotes the heuristic for the bottom tap input overlay mapping.
BotTap = ButtonHeuristics{0.9, 0.9}
// emptyHeuristic is used to pass in an empty ButtonHeuristics.
emptyHeuristic = ButtonHeuristics{}
)
// ButtonHeuristics contains heuristics regarding the percentages on the ARC
// phone window where input mappings are located.
type ButtonHeuristics struct {
xHeuristic float64
yHeuristic float64
}
// TestParams stores data common to the tests run in this package.
type TestParams struct {
TestConn *chrome.TestConn
Arc *arc.ARC
Device *ui.Device
Activity *arc.Activity
ActivityStartTime time.Time
windowContentSize coords.Point
lastTimestamp string
}
// mode specifies the type of tap event via the number of expected logcat lines.
type mode int
// coolDownConfig returns the config to wait for the machine to cooldown for game performance tests.
// This overrides the default config timeout (5 minutes) and temperature threshold (46 C)
// settings to reduce test flakes on low-end devices.
func coolDownConfig() cpu.CoolDownConfig {
cdConfig := cpu.DefaultCoolDownConfig(cpu.CoolDownPreserveUI)
cdConfig.PollTimeout = 7 * time.Minute
cdConfig.TemperatureThreshold = 61000
return cdConfig
}
// PerformTestFunc allows callers to run their desired test after a provided activity has been launched.
type PerformTestFunc func(params TestParams) (err error)
// SetupTestApp installs the input overlay test application, starts the activity, and defers to the caller to perform a test.
func SetupTestApp(ctx context.Context, s *testing.State, testFunc PerformTestFunc) {
// Shorten the test context so that even if the test times out
// there will be time to clean up.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, cleanupOnErrorTime)
defer cancel()
// Pull out the common values.
cr := s.FixtValue().(*arc.PreData).Chrome
a := s.FixtValue().(*arc.PreData).ARC
d, err := a.NewUIDevice(ctx)
if err != nil {
s.Fatal("Failed initializing UI Automator: ", err)
}
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Could not open Test API connection: ", err)
}
// Make sure the device is clamshell mode.
tabletModeEnabled, err := ash.TabletModeEnabled(ctx, tconn)
if err != nil {
s.Fatal("Failed to get tablet mode: ", err)
}
if tabletModeEnabled {
// Be nice and restore tablet mode to its original state on exit.
defer ash.SetTabletModeEnabled(ctx, tconn, tabletModeEnabled)
if err := ash.SetTabletModeEnabled(ctx, tconn, false); err != nil {
s.Fatal("Failed to set tablet mode disabled: ", err)
}
// TODO(b/187788935): Wait for "tablet mode animation is finished" in a reliable way.
// If an activity is launched while the tablet mode animation is active, the activity
// will be launched in un undefined state, making the test flaky.
if err := testing.Sleep(ctx, 5*time.Second); err != nil {
s.Fatal("Failed to wait until tablet-mode animation finished: ", err)
}
}
// Install the gaming input overlay test application.
if err := a.Install(ctx, arc.APKPath(apk)); err != nil {
s.Fatal("Failed installing ArcInputOverlayTest: ", err)
}
// Wait for the CPU to idle before performing the test.
if _, err := cpu.WaitUntilCoolDown(ctx, coolDownConfig()); err != nil {
s.Fatal("Failed to wait until CPU is cooled down: ", err)
}
// Take screenshot on failure.
defer func(ctx context.Context) {
if s.HasError() {
captureScreenshot(ctx, s, cr, "failed-launch-test.png")
}
}(cleanupCtx)
// Clear input overlay files.
userPath, err := cryptohome.UserPath(ctx, cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's home directory path: ", err)
}
defer os.RemoveAll(filepath.Join(userPath, inputOverlayFilename))
act, err := arc.NewActivity(a, pkg, cls)
if err != nil {
s.Fatal("Failed to create a new ArcInputOverlayTest activity: ", err)
}
defer act.Close()
// Start timing and launch the activity.
startTime := time.Now()
if err := act.Start(ctx, tconn, arc.WithWindowingMode(arc.WindowingModeFreeform), arc.WithWaitForLaunch()); err != nil {
s.Fatal("Failed to start ArcInputOverlayTest: ", err)
}
defer act.Stop(ctx, tconn)
// Obtain window surface bounds.
loc, err := act.SurfaceBounds(ctx)
if err != nil {
s.Error("Failed to obtain activity window bounds: ", err)
}
appWidth := loc.BottomRight().X - loc.TopLeft().X
appHeight := loc.BottomRight().Y - loc.TopLeft().Y
// Always take a screenshot of the final state for debugging purposes.
// This is done with the cleanup context so the main flow is not interrupted.
defer captureScreenshot(cleanupCtx, s, cr, "final-state.png")
// Defer to the caller to determine when the game is launched.
if err := testFunc(TestParams{
TestConn: tconn,
Arc: a,
Device: d,
Activity: act,
ActivityStartTime: startTime,
windowContentSize: coords.NewPoint(appWidth, appHeight),
lastTimestamp: "00:00:00.000",
}); err != nil {
s.Fatal("Failed to perform test: ", err)
}
}
// CloseAndRelaunchActivity closes and reopens the test application again.
func CloseAndRelaunchActivity(ctx context.Context, params *TestParams) error {
// Close current test application instance.
params.Activity.Stop(ctx, params.TestConn)
// Relaunch another test application instance.
act, err := arc.NewActivity(params.Arc, pkg, cls)
if err != nil {
return errors.Wrap(err, "failed to create a new ArcInputOverlayTest activity")
}
if err := act.StartWithDefaultOptions(ctx, params.TestConn); err != nil {
return errors.Wrap(err, "failed to restart ArcInputOverlayTest")
}
// Reassign "Activity" field in params.
*params.Activity = *act
return nil
}
// MoveOverlayButton returns a function that takes in the given character corresponding
// to a move keystroke and returns an error if tapping the keystroke did not result in
// the correct feedback.
func MoveOverlayButton(kb *input.KeyboardEventWriter, key string, params *TestParams) action.Action {
return func(ctx context.Context) error {
// Hold and release given key, which is associated to an overlay button.
if err := uiauto.Combine("Tap overlay keys and ensure proper behavior",
kb.AccelPressAction(key),
// Add a sleep for one second to simulate user behavior on a key press.
action.Sleep(WaitForActiveInputTime),
kb.AccelReleaseAction(key),
)(ctx); err != nil {
return errors.Wrapf(err, "hold and release key %s failed", key)
}
// Poll for move action pressed; return error if feedback not received correctly;
// here, we do not check for correct tap location.
if err := pollTouchedCorrectly(ctx, params, moveMode, emptyHeuristic); err != nil {
return errors.Wrapf(err, "failed to check key %s", key)
}
return nil
}
}
// TapOverlayButton returns a function that takes in the given character corresponding
// to a tap keystroke and returns an error if tapping the keystroke did not result in
// the correct feedback.
func TapOverlayButton(kb *input.KeyboardEventWriter, key string, params *TestParams, heuristic ButtonHeuristics) action.Action {
return func(ctx context.Context) error {
// Tap given key, which is associated to an overlay button.
if err := kb.Type(ctx, key); err != nil {
return errors.Wrap(err, "failed to type key")
}
// Poll for tap action pressed; return error if feedback not received correctly.
if err := pollTouchedCorrectly(ctx, params, tapMode, heuristic); err != nil {
return errors.Wrapf(err, "failed to check key %s", key)
}
return nil
}
}
// PopulateReceivedTimes populates the given array of events with event timestamps,
// as presented in logcat.
func PopulateReceivedTimes(ctx context.Context, params TestParams, numLines int) ([]inputlatency.InputEvent, error) {
out, err := params.Arc.OutputLogcatGrep(ctx, "InputOverlayPerf")
if err != nil {
return nil, errors.Wrap(err, "failed to execute logcat command")
}
lines := strings.Split(strings.Replace(string(out), ",", "", -1), "\n")
// Last line can be empty.
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
// Make sure that the length of the array is at least as long as expected.
if len(lines) < numLines {
return nil, errors.Errorf("only %v lines returned by logcat: %s", len(lines), lines[0])
}
lines = lines[len(lines)-numLines:]
/*
An example line is shown below:
"09-19 13:01:22.298 4049 4049 V InputOverlayPerf: ACTION_UP 4146633898335"
For this test, all we do is to extract the timestamp shown at the end.
*/
events := make([]inputlatency.InputEvent, 0, numLines)
for _, line := range lines {
lineSplit := strings.Split(line, " ")
timestamp, err := strconv.ParseInt(lineSplit[len(lineSplit)-1], 10, 64)
if err != nil {
return nil, errors.Wrap(err, "could not parse timestamp")
}
events = append(events, inputlatency.InputEvent{EventTimeNS: 0, RecvTimeNS: timestamp})
}
return events, nil
}
// pollTouchedCorrectly makes sure the feedback for a tap touch injection is correct.
// The mode parameter specifies whether three lines (move action) or two lines (tap action)
// should be checked for in logcat, while the x and y heuristic are percentages that
// denote the approximate location of an input overlay button on the screen, in
// the context of the Android phone window.
func pollTouchedCorrectly(ctx context.Context, params *TestParams, m mode, heuristic ButtonHeuristics) error {
out, err := params.Arc.OutputLogcatGrep(ctx, "InputOverlayTest")
if err != nil {
return errors.Wrap(err, "failed to execute logcat command")
}
lines := strings.Split(strings.Replace(string(out), ",", "", -1), "\n")
// Last line can be empty.
if len(lines) > 0 && lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
// Make sure that the length of the array is at least as long as expected.
if len(lines) < int(m) {
return errors.Errorf("only %v lines returned by logcat: %s", len(lines), lines[0])
}
lines = lines[len(lines)-int(m):]
firstLine := strings.Split(lines[0], " ")
/*
An example line is shown below:
"05-03 09:03:45.233 2634 2634 V InputOverlayTest: MotionEvent { action=ACTION_UP,
actionButton=0, id[0]=0, x[0]=362.0, y[0]=642.0, toolType[0]=TOOL_TYPE_FINGER,
buttonState=0, classification=NONE, metaState=META_NUM_LOCK_ON, flags=0x1,
edgeFlags=0x0, pointerCount=1, historySize=0, eventTime=39410, downTime=39359,
deviceId=1, source=0x1002, displayId=0 }"
For this test, first of all, we care about the freshness of the log, which we
can extract from the second item (e.g. "09:03:45.233"). We also want to make note
of the action to verify that the correct sequence of actions took place. Finally,
for the first line, we care about the coordinates of the tap location, which are
given by the "x[0]=" and "y[0]=" elements. We parse this line to look for or
obtain all the above information.
*/
// Check for fresh timestamp.
if firstLine[1] < params.lastTimestamp {
return errors.New("action timestamp not fresh")
}
params.lastTimestamp = firstLine[1]
// Check that the first line has "ACTION_UP" and the last line has "ACTION_DOWN".
if !strings.Contains(lines[0], "ACTION_DOWN") {
return errors.Errorf("ACTION_DOWN not found: %s", lines[0])
}
// Press-release buttons will also have an "ACTION_MOVE" motion event.
if m == moveMode {
if !strings.Contains(lines[1], "ACTION_MOVE") {
return errors.Errorf("ACTION_MOVE not found: %s", lines[1])
}
}
if !strings.Contains(lines[int(m)-1], "ACTION_UP") {
return errors.Errorf("ACTION_UP not found: %s", lines[int(m)-1])
}
// No need to check positioning for joystick controls.
if m == moveMode {
return nil
}
// Get coordinate of tap reported in logcat for relative positioning.
newPoint, err := parsePoint(firstLine)
if err != nil {
return errors.Wrapf(err, "failed to parse for a coordinate: %s", lines[0])
}
// Check that the tapped location is close enough.
if err := confirmApproximateLocation(newPoint, params, heuristic); err != nil {
return errors.Wrap(err, "failed to confirm approximate location")
}
return nil
}
// parsePoint returns the x and y coordinate contained within a logcat output line.
func parsePoint(line []string) (coords.Point, error) {
empty := coords.Point{}
xIdx := -1
yIdx := -1
for i, str := range line {
if strings.HasPrefix(str, "x[0]=") {
xIdx = i
}
if strings.HasPrefix(str, "y[0]=") {
yIdx = i
break
}
}
if xIdx < 0 {
return empty, errors.New("x coordinate for tap not found")
}
if yIdx < 0 {
return empty, errors.New("y coordinate for tap not found")
}
// Both strings "x[0]=" and "y[0]=" are 5 characters long, and we exclude them
// to extract the coordinate.
x, err := strconv.ParseFloat(line[xIdx][5:], 32)
if err != nil {
return empty, errors.Wrap(err, "failed to parse x coordinate")
}
y, err := strconv.ParseFloat(line[yIdx][5:], 32)
if err != nil {
return empty, errors.Wrap(err, "failed to parse x coordinate")
}
return coords.NewPoint(int(x), int(y)), nil
}
// confirmApproximateLocation returns an error if the given point does not fall within the
// approximate location in the activity given by the heuristic parameters.
func confirmApproximateLocation(point coords.Point, params *TestParams, heuristic ButtonHeuristics) error {
x := int(float64(params.windowContentSize.X) * heuristic.xHeuristic)
y := int(float64(params.windowContentSize.Y) * heuristic.yHeuristic)
if point.X < (x-errorMargin) || point.X > (x+errorMargin) {
return errors.Errorf("x coordinate of tap (%d) not close enough to UI element on screen (%d)", point.X, x)
}
if point.Y < (y-errorMargin) || point.Y > (y+errorMargin) {
return errors.Errorf("y coordinate of tap (%d) not close enough to UI element on screen (%d)", point.Y, y)
}
return nil
}
// captureScreenshot takes a screenshot and saves it with the provided filename.
// Since screenshots are useful in debugging but not important to the flow of the test,
// errors are logged rather than bubbled up.
func captureScreenshot(ctx context.Context, s *testing.State, cr *chrome.Chrome, filename string) {
path := filepath.Join(s.OutDir(), filename)
if err := screenshot.CaptureChrome(ctx, cr, path); err != nil {
testing.ContextLog(ctx, "Failed to capture screenshot, info: ", err)
} else {
testing.ContextLogf(ctx, "Saved screenshot to %s", filename)
}
}
|
/*
Copyright 2018 The Doctl Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"fmt"
"strings"
"github.com/digitalocean/doctl"
"github.com/digitalocean/doctl/commands/displayers"
"github.com/digitalocean/doctl/do"
"github.com/digitalocean/godo"
"github.com/spf13/cobra"
)
const (
defaultDatabaseNodeSize = "db-s-1vcpu-1gb"
defaultDatabaseNodeCount = 1
defaultDatabaseRegion = "nyc1"
defaultDatabaseEngine = "pg"
)
// Databases creates the databases command
func Databases() *Command {
cmd := &Command{
Command: &cobra.Command{
Use: "databases",
Aliases: []string{"db", "dbs", "d", "database"},
Short: "database commands",
Long: "database is used to access managed databases commands",
},
}
CmdBuilder(cmd, RunDatabaseList, "list", "list database clusters", Writer, aliasOpt("ls"), displayerType(&displayers.Databases{}))
CmdBuilder(cmd, RunDatabaseGet, "get <database-id>", "get a database cluster", Writer, aliasOpt("g"), displayerType(&displayers.Databases{}))
createLongDesc := `create a database cluster
When creating a new database cluster, use the '--engine' flag to specify the
type. Use 'pg' for PostgreSQL, 'mysql' for MySQL, or 'redis' for Redis.
`
cmdDatabaseCreate := CmdBuilder(cmd, RunDatabaseCreate, "create <name>", createLongDesc, Writer,
aliasOpt("c"))
AddIntFlag(cmdDatabaseCreate, doctl.ArgDatabaseNumNodes, "", defaultDatabaseNodeCount, "number of nodes in database cluster")
AddStringFlag(cmdDatabaseCreate, doctl.ArgRegionSlug, "", defaultDatabaseRegion, "database region")
AddStringFlag(cmdDatabaseCreate, doctl.ArgSizeSlug, "", defaultDatabaseNodeSize, "database size")
AddStringFlag(cmdDatabaseCreate, doctl.ArgDatabaseEngine, "", defaultDatabaseEngine, "database engine")
AddStringFlag(cmdDatabaseCreate, doctl.ArgVersion, "", "", "database engine version")
AddStringFlag(cmdDatabaseCreate, doctl.ArgPrivateNetworkUUID, "", "", "private network uuid")
cmdDatabaseDelete := CmdBuilder(cmd, RunDatabaseDelete, "delete <database-id>", "delete database cluster", Writer,
aliasOpt("rm"))
AddBoolFlag(cmdDatabaseDelete, doctl.ArgForce, doctl.ArgShortForce, false, "force database delete")
CmdBuilder(cmd, RunDatabaseConnectionGet, "connection <database-id>", "get database cluster connection info", Writer,
aliasOpt("conn"), displayerType(&displayers.DatabaseConnection{}))
CmdBuilder(cmd, RunDatabaseBackupsList, "backups <database-id>", "list database cluster backups", Writer,
aliasOpt("bu"), displayerType(&displayers.DatabaseBackups{}))
cmdDatabaseResize := CmdBuilder(cmd, RunDatabaseResize, "resize <database-id>", "resize a database cluster", Writer,
aliasOpt("rs"))
AddIntFlag(cmdDatabaseResize, doctl.ArgDatabaseNumNodes, "", 0, "number of nodes in database cluster", requiredOpt())
AddStringFlag(cmdDatabaseResize, doctl.ArgSizeSlug, "", "", "database size", requiredOpt())
cmdDatabaseMigrate := CmdBuilder(cmd, RunDatabaseMigrate, "migrate <database-id", "migrate a database cluster", Writer,
aliasOpt("m"))
AddStringFlag(cmdDatabaseMigrate, doctl.ArgRegionSlug, "", "", "new database region", requiredOpt())
AddStringFlag(cmdDatabaseMigrate, doctl.ArgPrivateNetworkUUID, "", "", "private network uuid")
cmd.AddCommand(databaseReplica())
cmd.AddCommand(databaseMaintenanceWindow())
cmd.AddCommand(databaseUser())
cmd.AddCommand(databaseDB())
cmd.AddCommand(databasePool())
cmd.AddCommand(sqlMode())
return cmd
}
// Clusters
// RunDatabaseList returns a list of database clusters.
func RunDatabaseList(c *CmdConfig) error {
dbs, err := c.Databases().List()
if err != nil {
return err
}
return displayDatabases(c, true, dbs...)
}
// RunDatabaseGet returns an individual database cluster
func RunDatabaseGet(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
db, err := c.Databases().Get(id)
if err != nil {
return err
}
return displayDatabases(c, false, *db)
}
// RunDatabaseCreate creates a database cluster
func RunDatabaseCreate(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
r, err := buildDatabaseCreateRequestFromArgs(c)
if err != nil {
return err
}
db, err := c.Databases().Create(r)
if err != nil {
return err
}
return displayDatabases(c, false, *db)
}
func buildDatabaseCreateRequestFromArgs(c *CmdConfig) (*godo.DatabaseCreateRequest, error) {
r := &godo.DatabaseCreateRequest{Name: c.Args[0]}
region, err := c.Doit.GetString(c.NS, doctl.ArgRegionSlug)
if err != nil {
return nil, err
}
r.Region = region
numNodes, err := c.Doit.GetInt(c.NS, doctl.ArgDatabaseNumNodes)
if err != nil {
return nil, err
}
r.NumNodes = numNodes
size, err := c.Doit.GetString(c.NS, doctl.ArgSizeSlug)
if err != nil {
return nil, err
}
r.SizeSlug = size
engine, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseEngine)
if err != nil {
return nil, err
}
r.EngineSlug = engine
version, err := c.Doit.GetString(c.NS, doctl.ArgVersion)
if err != nil {
return nil, err
}
r.Version = version
privateNetworkUUID, err := c.Doit.GetString(c.NS, doctl.ArgPrivateNetworkUUID)
if err != nil {
return nil, err
}
r.PrivateNetworkUUID = privateNetworkUUID
return r, nil
}
// RunDatabaseDelete deletes a database cluster
func RunDatabaseDelete(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
force, err := c.Doit.GetBool(c.NS, doctl.ArgForce)
if err != nil {
return err
}
if force || AskForConfirm("delete this database cluster") == nil {
id := c.Args[0]
return c.Databases().Delete(id)
}
return fmt.Errorf("operation aborted")
}
func displayDatabases(c *CmdConfig, short bool, dbs ...do.Database) error {
item := &displayers.Databases{
Databases: do.Databases(dbs),
Short: short,
}
return c.Display(item)
}
// RunDatabaseConnectionGet gets database connection info
func RunDatabaseConnectionGet(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
connInfo, err := c.Databases().GetConnection(id)
if err != nil {
return err
}
return displayDatabaseConnection(c, *connInfo)
}
func displayDatabaseConnection(c *CmdConfig, conn do.DatabaseConnection) error {
item := &displayers.DatabaseConnection{DatabaseConnection: conn}
return c.Display(item)
}
// RunDatabaseBackupsList lists all the backups for a database cluster
func RunDatabaseBackupsList(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
backups, err := c.Databases().ListBackups(id)
if err != nil {
return err
}
return displayDatabaseBackups(c, backups)
}
func displayDatabaseBackups(c *CmdConfig, bu do.DatabaseBackups) error {
item := &displayers.DatabaseBackups{DatabaseBackups: bu}
return c.Display(item)
}
// RunDatabaseResize resizes a database cluster
func RunDatabaseResize(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
r, err := buildDatabaseResizeRequestFromArgs(c)
if err != nil {
return err
}
return c.Databases().Resize(id, r)
}
func buildDatabaseResizeRequestFromArgs(c *CmdConfig) (*godo.DatabaseResizeRequest, error) {
r := &godo.DatabaseResizeRequest{}
numNodes, err := c.Doit.GetInt(c.NS, doctl.ArgDatabaseNumNodes)
if err != nil {
return nil, err
}
r.NumNodes = numNodes
size, err := c.Doit.GetString(c.NS, doctl.ArgSizeSlug)
if err != nil {
return nil, err
}
r.SizeSlug = size
return r, nil
}
// RunDatabaseMigrate migrates a database cluster to a new region
func RunDatabaseMigrate(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
r, err := buildDatabaseMigrateRequestFromArgs(c)
if err != nil {
return err
}
return c.Databases().Migrate(id, r)
}
func buildDatabaseMigrateRequestFromArgs(c *CmdConfig) (*godo.DatabaseMigrateRequest, error) {
r := &godo.DatabaseMigrateRequest{}
region, err := c.Doit.GetString(c.NS, doctl.ArgRegionSlug)
if err != nil {
return nil, err
}
r.Region = region
privateNetworkUUID, err := c.Doit.GetString(c.NS, doctl.ArgPrivateNetworkUUID)
if err != nil {
return nil, err
}
r.PrivateNetworkUUID = privateNetworkUUID
return r, nil
}
func databaseMaintenanceWindow() *Command {
cmd := &Command{
Command: &cobra.Command{
Use: "maintenance-window",
Aliases: []string{"maintenance", "mw", "main"},
Short: "maintenance window commands",
Long: "maintenance is used to access maintenance window commands for a database cluster",
},
}
CmdBuilder(cmd, RunDatabaseMaintenanceGet, "get <database-id>",
"get maintenance window info", Writer, aliasOpt("g"),
displayerType(&displayers.DatabaseMaintenanceWindow{}))
cmdDatabaseCreate := CmdBuilder(cmd, RunDatabaseMaintenanceUpdate,
"update <database-id>", "update maintenance window", Writer, aliasOpt("u"))
AddStringFlag(cmdDatabaseCreate, doctl.ArgDatabaseMaintenanceDay, "", "",
"new maintenance window day", requiredOpt())
AddStringFlag(cmdDatabaseCreate, doctl.ArgDatabaseMaintenanceHour, "", "",
"new maintenance window hour", requiredOpt())
return cmd
}
// Database Maintenance Window
// RunDatabaseMaintenanceGet retrieves the maintenance window info for a database cluster
func RunDatabaseMaintenanceGet(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
window, err := c.Databases().GetMaintenance(id)
if err != nil {
return err
}
return displayDatabaseMaintenanceWindow(c, *window)
}
func displayDatabaseMaintenanceWindow(c *CmdConfig, mw do.DatabaseMaintenanceWindow) error {
item := &displayers.DatabaseMaintenanceWindow{DatabaseMaintenanceWindow: mw}
return c.Display(item)
}
// RunDatabaseMaintenanceUpdate updates the maintenance window info for a database cluster
func RunDatabaseMaintenanceUpdate(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
r, err := buildDatabaseUpdateMaintenanceRequestFromArgs(c)
if err != nil {
return err
}
return c.Databases().UpdateMaintenance(id, r)
}
func buildDatabaseUpdateMaintenanceRequestFromArgs(c *CmdConfig) (*godo.DatabaseUpdateMaintenanceRequest, error) {
r := &godo.DatabaseUpdateMaintenanceRequest{}
day, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseMaintenanceDay)
if err != nil {
return nil, err
}
r.Day = strings.ToLower(day)
hour, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseMaintenanceHour)
if err != nil {
return nil, err
}
r.Hour = hour
return r, nil
}
func databaseUser() *Command {
cmd := &Command{
Command: &cobra.Command{
Use: "user",
Aliases: []string{"u"},
Short: "database user commands",
Long: "database is used to access database user commands",
},
}
CmdBuilder(cmd, RunDatabaseUserList, "list <database-id>", "list database users",
Writer, aliasOpt("ls"), displayerType(&displayers.DatabaseUsers{}))
CmdBuilder(cmd, RunDatabaseUserGet, "get <database-id> <user-id>",
"get a database user", Writer, aliasOpt("g"),
displayerType(&displayers.DatabaseUsers{}))
cmdDatabaseUserCreate := CmdBuilder(cmd, RunDatabaseUserCreate,
"create <database-id> <user-name>", "create a database user", Writer, aliasOpt("c"))
AddStringFlag(cmdDatabaseUserCreate, doctl.ArgDatabaseUserMySQLAuthPlugin, "", "",
"set auth mode for MySQL users")
cmdDatabaseUserDelete := CmdBuilder(cmd, RunDatabaseUserDelete,
"delete <database-id> <user-id>", "delete database cluster",
Writer, aliasOpt("rm"))
AddBoolFlag(cmdDatabaseUserDelete, doctl.ArgForce, doctl.ArgShortForce, false, "force database delete")
return cmd
}
// Database Users
// RunDatabaseUserList retrieves a list of users for specific database cluster
func RunDatabaseUserList(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
users, err := c.Databases().ListUsers(id)
if err != nil {
return err
}
return displayDatabaseUsers(c, users...)
}
// RunDatabaseUserGet retrieves a database user for a specific database cluster
func RunDatabaseUserGet(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
userID := c.Args[1]
user, err := c.Databases().GetUser(databaseID, userID)
if err != nil {
return err
}
return displayDatabaseUsers(c, *user)
}
// RunDatabaseUserCreate creates a database user for a database cluster
func RunDatabaseUserCreate(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
var (
databaseID = c.Args[0]
userName = c.Args[1]
)
req := &godo.DatabaseCreateUserRequest{Name: userName}
authMode, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseUserMySQLAuthPlugin)
if err != nil {
return err
}
if authMode != "" {
req.MySQLSettings = &godo.DatabaseMySQLUserSettings{
AuthPlugin: authMode,
}
}
user, err := c.Databases().CreateUser(databaseID, req)
if err != nil {
return err
}
return displayDatabaseUsers(c, *user)
}
// RunDatabaseUserDelete deletes a database user
func RunDatabaseUserDelete(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
force, err := c.Doit.GetBool(c.NS, doctl.ArgForce)
if err != nil {
return err
}
if force || AskForConfirm("delete this database user") == nil {
databaseID := c.Args[0]
userID := c.Args[1]
return c.Databases().DeleteUser(databaseID, userID)
}
return fmt.Errorf("operation aborted")
}
func displayDatabaseUsers(c *CmdConfig, users ...do.DatabaseUser) error {
item := &displayers.DatabaseUsers{DatabaseUsers: users}
return c.Display(item)
}
func databasePool() *Command {
cmd := &Command{
Command: &cobra.Command{
Use: "pool",
Aliases: []string{"p"},
Short: "database pool commands",
Long: "database is used to access database pool commands",
},
}
CmdBuilder(cmd, RunDatabasePoolList, "list <database-id>", "list database pools",
Writer, aliasOpt("ls"), displayerType(&displayers.DatabasePools{}))
CmdBuilder(cmd, RunDatabasePoolGet, "get <database-id> <pool-name>",
"get a database pool", Writer, aliasOpt("g"),
displayerType(&displayers.DatabasePools{}))
cmdDatabasePoolCreate := CmdBuilder(cmd, RunDatabasePoolCreate,
"create <database-id> <pool-name>", "create a database pool", Writer,
aliasOpt("c"))
AddStringFlag(cmdDatabasePoolCreate, doctl.ArgDatabasePoolMode, "",
"transaction", "pool mode")
AddIntFlag(cmdDatabasePoolCreate, doctl.ArgSizeSlug, "", 0, "pool size",
requiredOpt())
AddStringFlag(cmdDatabasePoolCreate, doctl.ArgDatabasePoolUserName, "", "",
"database user name", requiredOpt())
AddStringFlag(cmdDatabasePoolCreate, doctl.ArgDatabasePoolDBName, "", "",
"database db name", requiredOpt())
cmdDatabasePoolDelete := CmdBuilder(cmd, RunDatabasePoolDelete,
"delete <database-id> <pool-name>", "delete database cluster", Writer,
aliasOpt("rm"))
AddBoolFlag(cmdDatabasePoolDelete, doctl.ArgForce, doctl.ArgShortForce,
false, "force database delete")
return cmd
}
// Database Pools
// RunDatabasePoolList retrieves a list of pools for specific database cluster
func RunDatabasePoolList(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
pools, err := c.Databases().ListPools(id)
if err != nil {
return err
}
return displayDatabasePools(c, pools...)
}
// RunDatabasePoolGet retrieves a database pool for a specific database cluster
func RunDatabasePoolGet(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
poolID := c.Args[1]
pool, err := c.Databases().GetPool(databaseID, poolID)
if err != nil {
return err
}
return displayDatabasePools(c, *pool)
}
// RunDatabasePoolCreate creates a database pool for a database cluster
func RunDatabasePoolCreate(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
r, err := buildDatabaseCreatePoolRequestFromArgs(c)
if err != nil {
return err
}
pool, err := c.Databases().CreatePool(databaseID, r)
if err != nil {
return err
}
return displayDatabasePools(c, *pool)
}
func buildDatabaseCreatePoolRequestFromArgs(c *CmdConfig) (*godo.DatabaseCreatePoolRequest, error) {
req := &godo.DatabaseCreatePoolRequest{Name: c.Args[1]}
mode, err := c.Doit.GetString(c.NS, doctl.ArgDatabasePoolMode)
if err != nil {
return nil, err
}
req.Mode = mode
size, err := c.Doit.GetInt(c.NS, doctl.ArgDatabasePoolSize)
if err != nil {
return nil, err
}
req.Size = size
db, err := c.Doit.GetString(c.NS, doctl.ArgDatabasePoolDBName)
if err != nil {
return nil, err
}
req.Database = db
user, err := c.Doit.GetString(c.NS, doctl.ArgDatabasePoolUserName)
if err != nil {
return nil, err
}
req.User = user
return req, nil
}
// RunDatabasePoolDelete deletes a database pool
func RunDatabasePoolDelete(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
force, err := c.Doit.GetBool(c.NS, doctl.ArgForce)
if err != nil {
return err
}
if force || AskForConfirm("delete this database pool") == nil {
databaseID := c.Args[0]
poolID := c.Args[1]
return c.Databases().DeletePool(databaseID, poolID)
}
return fmt.Errorf("operation aborted")
}
func displayDatabasePools(c *CmdConfig, pools ...do.DatabasePool) error {
item := &displayers.DatabasePools{DatabasePools: pools}
return c.Display(item)
}
func databaseDB() *Command {
cmd := &Command{
Command: &cobra.Command{
Use: "db",
Short: "database db commands",
Long: "database is used to access database db commands",
},
}
CmdBuilder(cmd, RunDatabaseDBList, "list <database-id>", "list dbs", Writer,
aliasOpt("ls"), displayerType(&displayers.DatabaseDBs{}))
CmdBuilder(cmd, RunDatabaseDBGet, "get <database-id> <db-name>", "get a db",
Writer, aliasOpt("g"), displayerType(&displayers.DatabaseDBs{}))
CmdBuilder(cmd, RunDatabaseDBCreate, "create <database-id> <db-name>",
"create a db", Writer, aliasOpt("c"))
cmdDatabaseDBDelete := CmdBuilder(cmd, RunDatabaseDBDelete,
"delete <database-id> <db-name>", "delete db", Writer, aliasOpt("rm"))
AddBoolFlag(cmdDatabaseDBDelete, doctl.ArgForce, doctl.ArgShortForce,
false, "force database delete")
return cmd
}
// Database DBs
// RunDatabaseDBList retrieves a list of databases for specific database cluster
func RunDatabaseDBList(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
dbs, err := c.Databases().ListDBs(id)
if err != nil {
return err
}
return displayDatabaseDBs(c, dbs...)
}
// RunDatabaseDBGet retrieves a database for a specific database cluster
func RunDatabaseDBGet(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
dbID := c.Args[1]
db, err := c.Databases().GetDB(databaseID, dbID)
if err != nil {
return err
}
return displayDatabaseDBs(c, *db)
}
// RunDatabaseDBCreate creates a database for a database cluster
func RunDatabaseDBCreate(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
req := &godo.DatabaseCreateDBRequest{Name: c.Args[1]}
db, err := c.Databases().CreateDB(databaseID, req)
if err != nil {
return err
}
return displayDatabaseDBs(c, *db)
}
// RunDatabaseDBDelete deletes a database
func RunDatabaseDBDelete(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
force, err := c.Doit.GetBool(c.NS, doctl.ArgForce)
if err != nil {
return err
}
if force || AskForConfirm("delete this database db") == nil {
databaseID := c.Args[0]
dbID := c.Args[1]
return c.Databases().DeleteDB(databaseID, dbID)
}
return fmt.Errorf("operation aborted")
}
func displayDatabaseDBs(c *CmdConfig, dbs ...do.DatabaseDB) error {
item := &displayers.DatabaseDBs{DatabaseDBs: dbs}
return c.Display(item)
}
func databaseReplica() *Command {
cmd := &Command{
Command: &cobra.Command{
Use: "replica",
Aliases: []string{"rep", "r"},
Short: "database replica commands",
Long: "database is used to access database replica commands",
},
}
CmdBuilder(cmd, RunDatabaseReplicaList, "list <database-id>",
"list database replicas", Writer, aliasOpt("ls"),
displayerType(&displayers.DatabaseReplicas{}))
CmdBuilder(cmd, RunDatabaseReplicaGet, "get <database-id> <replica-name>",
"get a database replica", Writer, aliasOpt("g"),
displayerType(&displayers.DatabaseReplicas{}))
cmdDatabaseReplicaCreate := CmdBuilder(cmd, RunDatabaseReplicaCreate,
"create <database-id> <replica-name>", "create a database replica",
Writer, aliasOpt("c"))
AddStringFlag(cmdDatabaseReplicaCreate, doctl.ArgRegionSlug, "",
defaultDatabaseRegion, "database replica region")
AddStringFlag(cmdDatabaseReplicaCreate, doctl.ArgSizeSlug, "",
defaultDatabaseNodeSize, "database replica size")
AddStringFlag(cmdDatabaseReplicaCreate, doctl.ArgPrivateNetworkUUID, "",
"", "private network uuid")
cmdDatabaseReplicaDelete := CmdBuilder(cmd, RunDatabaseReplicaDelete,
"delete <database-id> <replica-name>", "delete database replica",
Writer, aliasOpt("rm"))
AddBoolFlag(cmdDatabaseReplicaDelete, doctl.ArgForce, doctl.ArgShortForce,
false, "force database delete")
CmdBuilder(cmd, RunDatabaseReplicaConnectionGet,
"connection <database-id> <replica-name>",
"get database replica connection info", Writer, aliasOpt("conn"))
return cmd
}
// Database Replicas
// RunDatabaseReplicaList retrieves a list of replicas for specific database cluster
func RunDatabaseReplicaList(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
id := c.Args[0]
replicas, err := c.Databases().ListReplicas(id)
if err != nil {
return err
}
return displayDatabaseReplicas(c, true, replicas...)
}
// RunDatabaseReplicaGet retrieves a read-only replica for a specific database cluster
func RunDatabaseReplicaGet(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
replicaID := c.Args[1]
replica, err := c.Databases().GetReplica(databaseID, replicaID)
if err != nil {
return err
}
return displayDatabaseReplicas(c, false, *replica)
}
// RunDatabaseReplicaCreate creates a read-only replica for a database cluster
func RunDatabaseReplicaCreate(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
r, err := buildDatabaseCreateReplicaRequestFromArgs(c)
if err != nil {
return err
}
replica, err := c.Databases().CreateReplica(databaseID, r)
if err != nil {
return err
}
return displayDatabaseReplicas(c, false, *replica)
}
func buildDatabaseCreateReplicaRequestFromArgs(c *CmdConfig) (*godo.DatabaseCreateReplicaRequest, error) {
r := &godo.DatabaseCreateReplicaRequest{Name: c.Args[1]}
size, err := c.Doit.GetString(c.NS, doctl.ArgSizeSlug)
if err != nil {
return nil, err
}
r.Size = size
region, err := c.Doit.GetString(c.NS, doctl.ArgRegionSlug)
if err != nil {
return nil, err
}
r.Region = region
privateNetworkUUID, err := c.Doit.GetString(c.NS, doctl.ArgPrivateNetworkUUID)
if err != nil {
return nil, err
}
r.PrivateNetworkUUID = privateNetworkUUID
return r, nil
}
// RunDatabaseReplicaDelete deletes a read-only replica
func RunDatabaseReplicaDelete(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
force, err := c.Doit.GetBool(c.NS, doctl.ArgForce)
if err != nil {
return err
}
if force || AskForConfirm("delete this database replica") == nil {
databaseID := c.Args[0]
replicaID := c.Args[1]
return c.Databases().DeleteReplica(databaseID, replicaID)
}
return fmt.Errorf("operation aborted")
}
func displayDatabaseReplicas(c *CmdConfig, short bool, replicas ...do.DatabaseReplica) error {
item := &displayers.DatabaseReplicas{
DatabaseReplicas: replicas,
Short: short,
}
return c.Display(item)
}
// RunDatabaseReplicaConnectionGet gets read-only replica connection info
func RunDatabaseReplicaConnectionGet(c *CmdConfig) error {
if len(c.Args) == 0 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
replicaID := c.Args[1]
connInfo, err := c.Databases().GetReplicaConnection(databaseID, replicaID)
if err != nil {
return err
}
return displayDatabaseReplicaConnection(c, *connInfo)
}
func displayDatabaseReplicaConnection(c *CmdConfig, conn do.DatabaseConnection) error {
item := &displayers.DatabaseConnection{DatabaseConnection: conn}
return c.Display(item)
}
func sqlMode() *Command {
cmd := &Command{
Command: &cobra.Command{
Use: "sql-mode",
Aliases: []string{"sm"},
Short: "database sql-mode commands",
Long: "sql-mode is used to access database sql-mode commands",
},
}
CmdBuilder(cmd, RunDatabaseGetSQLModes, "get <database-id>",
"get sql modes", Writer,
displayerType(&displayers.DatabaseSQLModes{}), aliasOpt("g"))
CmdBuilder(cmd, RunDatabaseSetSQLModes, "set <database-id> <sql-mode-1> ... <sql-mode-n>",
"set sql modes", Writer, aliasOpt("s"))
return cmd
}
// RunDatabaseGetSQLModes gets the sql modes set on the database
func RunDatabaseGetSQLModes(c *CmdConfig) error {
if len(c.Args) != 1 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
sqlModes, err := c.Databases().GetSQLMode(databaseID)
if err != nil {
return err
}
return displaySQLModes(c, sqlModes)
}
func displaySQLModes(c *CmdConfig, sqlModes []string) error {
return c.Display(&displayers.DatabaseSQLModes{
DatabaseSQLModes: sqlModes,
})
}
// RunDatabaseSetSQLModes sets the sql modes on the database
func RunDatabaseSetSQLModes(c *CmdConfig) error {
if len(c.Args) < 2 {
return doctl.NewMissingArgsErr(c.NS)
}
databaseID := c.Args[0]
sqlModes := c.Args[1:]
return c.Databases().SetSQLMode(databaseID, sqlModes...)
}
|
package main
import (
"net/http"
"path/filepath"
"strconv"
"github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
"net"
"os"
"strings"
)
var (
app = kingpin.New("serve", "Serve is a simple utility to serve a directory via HTTP")
port = app.Flag("port", "The port of the HTTP server.").Default("3000").Short('p').Int()
directory = app.Arg("directory", "The directory to serve").Default(".").ExistingDir()
verbose = app.Flag("verbose", "Enable verbose output").Default("true").Short('v').Bool()
)
func main() {
app.Parse(os.Args[1:])
if *verbose {
logrus.SetLevel(logrus.DebugLevel)
} else {
logrus.SetLevel(logrus.InfoLevel)
}
// Resolve the absolute path from the given directory
absDir, err := filepath.Abs(*directory)
// Produce a relative directory ($HOME -> ~)
relDir := strings.Replace(absDir, os.Getenv("HOME"), "~", 1) + "/"
if err != nil {
logrus.Fatal("Failed to get directory: ", err)
}
logrus.Infof("Serving %s on http://%s:%s \n", relDir, "0.0.0.0", strconv.Itoa(*port))
// Create HTTP server
server := &http.Server{Addr: ":" + strconv.Itoa(*port), Handler: http.FileServer(http.Dir(absDir)), ConnState: func(conn net.Conn, state http.ConnState) {
if state == http.StateActive {
logrus.Debug("Serving client %s", conn.RemoteAddr())
}
}}
if err := server.ListenAndServe(); err != nil {
logrus.Fatal("Failed to start server: ", err)
}
}
|
package Week_01
import "sort"
// 先合并再排序
func merge(nums1 []int, m int, nums2 []int, n int) {
nums1 = append(nums1[:m], nums2[:n]...)
sort.Ints(nums1)
}
// 双指针,从后向前插入;最后的数最大
func merge2(nums1 []int, m int, nums2 []int, n int) {
for m > 0 && n > 0 { //当nums1和nums2都有数据时,从最大下标处开始比较。大的放到nums1的最右方
if nums1[m-1] > nums2[n-1] {
nums1[m+n-1] = nums1[m-1]
m-- // nums1大,则m--
} else {
nums1[m+n-1] = nums2[n-1]
n-- // nums2大,则n--
}
}
if m == 0 { //分两种情况:1.nums1原本的有效数据为0;2.经过for循环的比较后,nums1的数据因为大于nums2的数据,所以优先塞到偏右面去了,会导致nums2还有剩余数据
for i := 0; i < n; i++ {
nums1[i] = nums2[i]
}
}
}
|
package fs
import (
"github.com/fsnotify/fsnotify"
"os"
)
type Enriched struct {
InstanceID string
Event *fsnotify.Event
FullPath string
IsDirectory bool
Directory string
}
func (e *Enriched) Read() chan *Frame {
return readSlowly(e.InstanceID, e.FullPath)
}
func StartEnrich(instanceID, directory string, in chan *fsnotify.Event) (out chan *Enriched) {
out = make(chan *Enriched)
go func() {
for {
i := <-in
o := &Enriched{
Event: i,
FullPath: i.Name,
InstanceID: instanceID,
}
o.IsDirectory = isDir(o.FullPath)
out <- o
}
}()
return
}
func isDir(path string) bool {
if info, err := os.Stat(path); err == nil && info.IsDir() {
return true
}
return false
}
|
package store
import (
"testing"
"github.com/skoltai/limithandling/domain"
"github.com/stretchr/testify/assert"
)
func TestAppCollection(t *testing.T) {
c := newAppCollection()
app := App{
OwnerID: 1,
SubscriptionID: 2,
App: domain.App{Name: "testapp"},
}
app.ID = c.create(app)
got, err := c.get(app.ID)
assert.NoError(t, err)
assert.Equal(t, app, got)
_, err = c.get(0)
assert.Error(t, err)
}
func TestAppUpdate(t *testing.T) {
c := newAppCollection()
assert.False(t, c.update(App{ID: 0}))
assert.False(t, c.update(App{ID: 1}))
app := App{
OwnerID: 1,
SubscriptionID: 1,
App: domain.App{Name: "test1"},
}
app.ID = c.create(app)
got, _ := c.get(app.ID)
assert.Equal(t, app, got)
want := App{
ID: app.ID,
OwnerID: 1,
SubscriptionID: 2,
App: domain.App{Name: "test1"},
}
ok := c.update(want)
assert.True(t, ok)
got, _ = c.get(want.ID)
assert.Equal(t, want, got)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.