text stringlengths 11 4.05M |
|---|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package hwsec
import (
"context"
"strings"
"time"
"github.com/golang/protobuf/proto"
apb "chromiumos/system_api/attestation_proto"
"chromiumos/tast/common/hwsec"
"chromiumos/tast/common/testexec"
hwseclocal "chromiumos/tast/local/hwsec"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: AttestationNoExternalServer,
Desc: "Verifies attestation-related functionality with the locally PCA and VA response",
Attr: []string{"group:mainline", "informational"},
Contacts: []string{"cylai@chromium.org", "cros-hwsec@google.com"},
SoftwareDeps: []string{"tpm"},
Timeout: 4 * time.Minute,
})
}
// isTPM2 checks if the DUT has a TPM2.0 implementation. In case of any error, |false| is returned.
func isTPM2(ctx context.Context) bool {
out, err := testexec.CommandContext(ctx, "tpmc", "tpmversion").Output()
// If tpmc is not available, assume it's TPM-less.
if err != nil {
return false
}
return strings.TrimSpace(string(out)) == "2.0"
}
// AttestationNoExternalServer runs through the attestation flow, including enrollment, cert, sign challenge.
// Also, it verifies the the key access functionality. All the external dependencies are replaced with the locally generated server responses.
func AttestationNoExternalServer(ctx context.Context, s *testing.State) {
s.Log("Restarting ui job")
if err := upstart.RestartJob(ctx, "ui"); err != nil {
s.Fatal("Failed to restart ui job: ", err)
}
r := hwseclocal.NewCmdRunner()
helper, err := hwseclocal.NewFullHelper(ctx, r)
if err != nil {
s.Fatal("Helper creation error: ", err)
}
attestation := helper.AttestationClient()
cryptohome := helper.CryptohomeClient()
mountInfo := hwsec.NewCryptohomeMountInfo(r, cryptohome)
const username = "test@crashwsec.bigr.name"
s.Log("Resetting vault in case the cryptohome status is contaminated")
// Okay to call it even if the vault doesn't exist.
if _, err := cryptohome.RemoveVault(ctx, username); err != nil {
s.Fatal("Failed to cleanup: ", err)
}
if err := helper.EnsureTPMIsReady(ctx, hwsec.DefaultTakingOwnershipTimeout); err != nil {
s.Fatal("Failed to ensure tpm readiness: ", err)
}
ali := hwseclocal.NewAttestationLocalInfra(helper.DaemonController())
if err := ali.Enable(ctx); err != nil {
s.Fatal("Failed to enable local test infra feature: ", err)
}
defer func(ctx context.Context) {
if err := ali.Disable(ctx); err != nil {
s.Error("Failed to disable local test infra feature: ", err)
}
}(ctx)
s.Log("TPM is ensured to be ready")
if err := helper.EnsureIsPreparedForEnrollment(ctx, hwsec.DefaultPreparationForEnrolmentTimeout); err != nil {
s.Fatal("Failed to prepare for enrollment: ", err)
}
at := hwsec.NewAttestationTestWith(attestation, hwsec.DefaultPCA, hwseclocal.NewPCAAgentClient(), hwseclocal.NewLocalVA())
ac, err := hwseclocal.NewAttestationDBus(ctx)
if err != nil {
s.Fatal("Failed to create attestation client: ", err)
}
enrollReply, err := ac.Enroll(ctx, &apb.EnrollRequest{Forced: proto.Bool(true)})
if err != nil {
s.Fatal("Failed to call Enroll D-Bus API: ", err)
}
if *enrollReply.Status != apb.AttestationStatus_STATUS_SUCCESS {
s.Fatal("Failed to enroll: ", enrollReply.Status.String())
}
if err := cryptohome.MountVault(ctx, "fake_label", hwsec.NewPassAuthConfig(username, "testpass"), true /* create */, hwsec.NewVaultConfig()); err != nil {
s.Fatal("Failed to create user vault: ", err)
}
defer func(ctx context.Context) {
s.Log("Resetting vault after use")
if err := mountInfo.CleanUpMount(ctx, username); err != nil {
s.Error("Failed to cleanup: ", err)
}
}(ctx)
for _, param := range []struct {
name string
username string
keyType apb.KeyType
}{
{
name: "system_cert",
username: "",
},
{
name: "user_cert_rsa",
username: username,
keyType: apb.KeyType_KEY_TYPE_RSA,
},
{
name: "user_cert_ecc",
username: username,
keyType: apb.KeyType_KEY_TYPE_ECC,
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
if !isTPM2(ctx) && param.keyType == apb.KeyType_KEY_TYPE_ECC {
s.Log("Skipping unsupported key type item: ", param.name)
return
}
username := param.username
certReply, err := ac.GetCertificate(ctx, &apb.GetCertificateRequest{Username: proto.String(username), KeyLabel: proto.String(hwsec.DefaultCertLabel), KeyType: ¶m.keyType})
if err != nil {
s.Fatal("Failed to call D-Bus API to get certificate: ", err)
}
if *certReply.Status != apb.AttestationStatus_STATUS_SUCCESS {
s.Fatal("Failed to get certificate: ", certReply.Status.String())
}
// TODO(b/165426637): Enable it after we inject the fake device policy with customer ID.
if username != "" {
if err := at.SignEnterpriseChallenge(ctx, username, hwsec.DefaultCertLabel); err != nil {
s.Fatal("Failed to sign enterprise challenge: ", err)
}
}
if err := at.SignSimpleChallenge(ctx, username, hwsec.DefaultCertLabel); err != nil {
s.Fatal("Failed to sign simple challenge: ", err)
}
s.Log("Start key payload closed-loop testing")
s.Log("Setting key payload")
expectedPayload := hwsec.DefaultKeyPayload
_, err = attestation.SetKeyPayload(ctx, username, hwsec.DefaultCertLabel, expectedPayload)
if err != nil {
s.Fatal("Failed to set key payload: ", err)
}
s.Log("Getting key payload")
resultPayload, err := attestation.GetKeyPayload(ctx, username, hwsec.DefaultCertLabel)
if err != nil {
s.Fatal("Failed to get key payload: ", err)
}
if resultPayload != expectedPayload {
s.Fatalf("Inconsistent paylaod -- result: %s / expected: %s", resultPayload, expectedPayload)
}
s.Log("Start key payload closed-loop done")
s.Log("Start verifying key registration")
isSuccessful, err := attestation.RegisterKeyWithChapsToken(ctx, username, hwsec.DefaultCertLabel)
if err != nil {
s.Fatal("Failed to register key with chaps token due to error: ", err)
}
if !isSuccessful {
s.Fatal("Failed to register key with chaps token")
}
// Now the key has been registered and remove from the key store
_, err = attestation.GetPublicKey(ctx, username, hwsec.DefaultCertLabel)
if err == nil {
s.Fatal("unsidered successful operation -- key should be removed after registration")
}
// Well, actually we need more on system key so the key registration is validated.
s.Log("Key registration verified")
s.Log("Verifying deletion of keys by prefix")
for _, label := range []string{"label1", "label2", "label3"} {
certReply, err := ac.GetCertificate(ctx, &apb.GetCertificateRequest{Username: proto.String(username), KeyLabel: proto.String(label)})
if err != nil {
s.Fatalf("Failed to create certificate request for label %q: %v", label, err)
}
if *certReply.Status != apb.AttestationStatus_STATUS_SUCCESS {
s.Fatalf("Failed to get certificate for label %q: %v", label, certReply.Status.String())
}
_, err = attestation.GetPublicKey(ctx, username, label)
if err != nil {
s.Fatalf("Failed to get public key for label %q: %v", label, err)
}
}
s.Log("Deleting keys just created")
if err := attestation.DeleteKeys(ctx, username, "label"); err != nil {
s.Fatal("Failed to remove the key group: ", err)
}
for _, label := range []string{"label1", "label2", "label3"} {
if _, err := attestation.GetPublicKey(ctx, username, label); err == nil {
s.Fatalf("key with label %q still found: %v", label, err)
}
}
s.Log("Deletion of keys by prefix verified")
})
}
}
|
package mr
import (
"log"
"net"
"net/http"
"net/rpc"
"os"
"sync"
"time"
)
type State int
type Task struct {
State State
Filename string
}
const (
TaskStateIdle State = iota
TaskStateClaimed
TaskStateCompleted
)
type Master struct {
// Your definitions here.
mu sync.Mutex
mapTasks map[int]*Task
curMapTaskID int
nReduce int
reduceTasks map[int]*Task
numMapComplete int
}
// Your code here -- RPC handlers for the worker to call.
// GetTask API for worker to get task
func (m *Master) GetTask(request *GetTaskRequest, response *GetTaskResponse) error {
m.mu.Lock()
defer m.mu.Unlock()
if m.numMapComplete < len(m.mapTasks) {
for taskID, task := range m.mapTasks {
if task.State == TaskStateIdle {
response.TaskType = TaskTypeMap
response.TaskID = taskID
response.Filename = task.Filename
response.NReduce = m.nReduce
task.State = TaskStateClaimed
// fmt.Println("checking worker crash ", taskID, TaskTypeMap)
go m.checkWorkerCrash(taskID, TaskTypeMap)
return nil
}
}
} else {
var mapTaskIDs []int
for taskID := range m.mapTasks {
mapTaskIDs = append(mapTaskIDs, taskID)
}
for taskID, task := range m.reduceTasks {
if task.State == TaskStateIdle {
response.TaskType = TaskTypeReduce
response.TaskID = taskID
response.MapTaskIDs = mapTaskIDs
task.State = TaskStateClaimed
// fmt.Println("checking worker crash ", taskID, TaskTypeMap)
go m.checkWorkerCrash(taskID, TaskTypeReduce)
return nil
}
}
}
response.TaskType = TaskTypeNoTask
return nil
}
func (m *Master) checkWorkerCrash(taskID int, taskType TaskType) {
time.Sleep(10 * time.Second)
m.mu.Lock()
defer m.mu.Unlock()
if taskType == TaskTypeMap {
if m.mapTasks[taskID].State == TaskStateClaimed {
m.mapTasks[m.curMapTaskID] = &Task{
State: TaskStateIdle,
Filename: m.mapTasks[taskID].Filename,
}
m.curMapTaskID++
delete(m.mapTasks, taskID)
}
} else if taskType == TaskTypeReduce {
if m.reduceTasks[taskID].State == TaskStateClaimed {
m.reduceTasks[taskID].State = TaskStateIdle
}
}
// fmt.Println("worker checked ", taskID, taskType)
}
// CompleteTask API for complete task
func (m *Master) CompleteTask(request *CompleteTaskRequest, response *CompleteTaskResponse) error {
m.mu.Lock()
defer m.mu.Unlock()
taskID := request.TaskID
taskType := request.TaskType
if taskType == TaskTypeMap {
if _, ok := m.mapTasks[taskID]; !ok {
return nil
}
m.mapTasks[taskID].State = TaskStateCompleted
m.numMapComplete++
} else if taskType == TaskTypeReduce {
if _, ok := m.reduceTasks[taskID]; !ok {
return nil
}
m.reduceTasks[taskID].State = TaskStateCompleted
}
return nil
}
//
// an example RPC handler.
//
// the RPC argument and reply types are defined in rpc.go.
//
func (m *Master) Example(args *ExampleArgs, reply *ExampleReply) error {
reply.Y = args.X + 1
return nil
}
//
// start a thread that listens for RPCs from worker.go
//
func (m *Master) server() {
rpc.Register(m)
rpc.HandleHTTP()
//l, e := net.Listen("tcp", ":1234")
sockname := masterSock()
os.Remove(sockname)
l, e := net.Listen("unix", sockname)
if e != nil {
log.Fatal("listen error:", e)
}
go http.Serve(l, nil)
}
//
// main/mrmaster.go calls Done() periodically to find out
// if the entire job has finished.
//
func (m *Master) Done() bool {
ret := true
// Your code here.
m.mu.Lock()
defer m.mu.Unlock()
for _, task := range m.reduceTasks {
if task.State != TaskStateCompleted {
return false
}
}
return ret
}
//
// create a Master.
// main/mrmaster.go calls this function.
// nReduce is the number of reduce tasks to use.
//
func MakeMaster(files []string, nReduce int) *Master {
m := Master{}
// Your code here.
m.mu.Lock()
defer m.mu.Unlock()
m.curMapTaskID = 0
m.mapTasks = make(map[int]*Task)
m.nReduce = nReduce
for _, file := range files {
m.mapTasks[m.curMapTaskID] = &Task{
State: TaskStateIdle,
Filename: file,
}
m.curMapTaskID++
}
m.reduceTasks = make(map[int]*Task)
for i := 0; i < nReduce; i++ {
m.reduceTasks[i] = &Task{
State: TaskStateIdle,
}
}
m.numMapComplete = 0
m.server()
return &m
}
|
package config
import (
"fmt"
"path/filepath"
"runtime"
"github.com/spf13/viper"
)
var (
_, b, _, _ = runtime.Caller(0)
basepath = filepath.Dir(b)
)
func ReadConfig() {
viper.SetConfigName("config")
viper.SetConfigType("yaml")
viper.AddConfigPath("./config/")
viper.AddConfigPath(basepath)
viper.AddConfigPath(".")
err := viper.ReadInConfig()
if err != nil {
panic(fmt.Errorf("Fatal error config file: %s \n", err))
}
}
func GetString(key string) string {
return viper.GetString(key)
}
|
package solutions
func wordBreak(s string, wordDict []string) bool {
result := make([]bool, len(s) + 1)
result[0] = true
for i := 0 ; i < len(s); i++ {
if result[i] == false {
continue
}
for _, word := range wordDict {
j := i + len(word)
if j <= len(s) && s[i: j] == word {
result[j] = true
}
}
}
return result[len(s)]
}
|
package autoconfig
import "go-gateway/pkg/httpgateway"
// 标准的配置文件示例
var GateWayC = `{
"spring": {
"application": {
"name": "xxxx"
},
"cloud": {
"consul": {
"host": "localhost",
"port": 8500,
"discovery": {
"enabled": true,
"instance-id": "",
"service-name": "xxxx",
"prefer-ip-address": true
}
},
"gateway": {
"discovery": {
"locator": {
"enabled": true
}
},
"routes": [{
"id": "activity-route",
"uri": "http://www.baidu.com",
"predicates": [{
"name": "Weight",
"args": {
"group": "s1",
"value": "30"
}
},{
"name": "Path",
"args": {
"value": "/index.php"
}
}],
"filters": [{
"name": "AddRequestHeader",
"args": {
"name": "'foo'",
"value": "'bar'"
}
},
{
"name": "RewritePath",
"args": {
"regexp": "'/' + serviceId + '/(?<remaining>.*)'",
"replacement": "'/${remaining}'"
}
}
]
}, {
"id": "activity-route-2",
"uri": "http://www.baidu.com",
"predicates": [{
"name": "Weight",
"args": {
"group": "s1",
"value": "20"
}
},{
"name": "Path",
"args": {
"value": "/index.php"
}
}],
"filters": [{
"name": "AddRequestHeader",
"args": {
"name": "'foo'",
"value": "'bar'"
}
}]
}]
}
}
}
}`
// 基础配置转换加载到内存
var (
GateWaycfgInstance = &GateWayCfg{}
PredicateConfig map[routeId][]httpgateway.PredicateConfig
BizRoutes map[routeId]BizRoute
)
type GateWayCfg struct {
}
// 初始化
func (cfg *GateWayCfg) Init() {
gateWayConfig := ConfigParse(GateWayC)
var parseErr error
PredicateConfig, BizRoutes, parseErr = PredicatesParse(gateWayConfig)
if parseErr != nil {
panic(parseErr.Error())
}
}
// 监听配置变化W
func (cfg *GateWayCfg) Watch() {
}
|
// Copyright (c) 2020 Target Brands, Inc. All rights reserved.
//
// Use of this source code is governed by the LICENSE file in this repository.
package native
import (
"flag"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"github.com/go-vela/types/pipeline"
"github.com/gin-gonic/gin"
"github.com/urfave/cli/v2"
)
func TestNative_Compile_StagesPipeline(t *testing.T) {
// setup types
set := flag.NewFlagSet("test", 0)
c := cli.NewContext(nil, set, nil)
installEnv := environment(nil, nil, nil, nil)
installEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
installEnv["GRADLE_USER_HOME"] = ".gradle"
installEnv["HOME"] = "/root"
installEnv["SHELL"] = "/bin/sh"
installEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew downloadDependencies"})
testEnv := environment(nil, nil, nil, nil)
testEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
testEnv["GRADLE_USER_HOME"] = ".gradle"
testEnv["HOME"] = "/root"
testEnv["SHELL"] = "/bin/sh"
testEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew check"})
buildEnv := environment(nil, nil, nil, nil)
buildEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
buildEnv["GRADLE_USER_HOME"] = ".gradle"
buildEnv["HOME"] = "/root"
buildEnv["SHELL"] = "/bin/sh"
buildEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew build"})
dockerEnv := environment(nil, nil, nil, nil)
dockerEnv["PARAMETER_REGISTRY"] = "index.docker.io"
dockerEnv["PARAMETER_REPO"] = "github/octocat"
dockerEnv["PARAMETER_TAGS"] = "latest,dev"
want := &pipeline.Build{
Version: "1",
ID: "__0",
Metadata: pipeline.Metadata{
Template: false,
},
Stages: pipeline.StageSlice{
&pipeline.Stage{
Name: "init",
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_init_init",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "#init",
Name: "init",
Number: 1,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "clone",
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_clone_clone",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "target/vela-git:v0.3.0",
Name: "clone",
Number: 2,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "install",
Needs: []string{"clone"},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_install_install",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: installEnv,
Image: "openjdk:latest",
Name: "install",
Number: 3,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "test",
Needs: []string{"install"},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_test_test",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: testEnv,
Image: "openjdk:latest",
Name: "test",
Number: 4,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "build",
Needs: []string{"install"},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_build_build",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: buildEnv,
Image: "openjdk:latest",
Name: "build",
Number: 5,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "publish",
Needs: []string{"build"},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_publish_publish",
Directory: "/home//",
Image: "plugins/docker:18.09",
Environment: dockerEnv,
Name: "publish",
Number: 6,
Pull: true,
Secrets: pipeline.StepSecretSlice{
&pipeline.StepSecret{
Source: "docker_username",
Target: "registry_username",
},
&pipeline.StepSecret{
Source: "docker_password",
Target: "registry_password",
},
},
},
},
},
},
Secrets: pipeline.SecretSlice{
&pipeline.Secret{
Name: "docker_username",
Key: "org/repo/docker/username",
Engine: "native",
Type: "repo",
},
&pipeline.Secret{
Name: "docker_password",
Key: "org/repo/docker/password",
Engine: "vault",
Type: "repo",
},
},
}
// run test
yaml, err := ioutil.ReadFile("testdata/stages_pipeline.yml")
if err != nil {
t.Errorf("Reading yaml file return err: %v", err)
}
compiler, err := New(c)
if err != nil {
t.Errorf("Creating compiler returned err: %v", err)
}
got, err := compiler.Compile(yaml)
if err != nil {
t.Errorf("Compile returned err: %v", err)
}
// WARNING: hack to compare stages
//
// Channel values can only be compared for equality.
// Two channel values are considered equal if they
// originated from the same make call meaning they
// refer to the same channel value in memory.
for i, stage := range got.Stages {
tmp := want.Stages
tmp[i].Done = stage.Done
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Compile is %v, want %v", got, want)
}
}
func TestNative_Compile_StepsPipeline(t *testing.T) {
// setup types
set := flag.NewFlagSet("test", 0)
c := cli.NewContext(nil, set, nil)
installEnv := environment(nil, nil, nil, nil)
installEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
installEnv["GRADLE_USER_HOME"] = ".gradle"
installEnv["HOME"] = "/root"
installEnv["SHELL"] = "/bin/sh"
installEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew downloadDependencies"})
testEnv := environment(nil, nil, nil, nil)
testEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
testEnv["GRADLE_USER_HOME"] = ".gradle"
testEnv["HOME"] = "/root"
testEnv["SHELL"] = "/bin/sh"
testEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew check"})
buildEnv := environment(nil, nil, nil, nil)
buildEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
buildEnv["GRADLE_USER_HOME"] = ".gradle"
buildEnv["HOME"] = "/root"
buildEnv["SHELL"] = "/bin/sh"
buildEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew build"})
dockerEnv := environment(nil, nil, nil, nil)
dockerEnv["PARAMETER_REGISTRY"] = "index.docker.io"
dockerEnv["PARAMETER_REPO"] = "github/octocat"
dockerEnv["PARAMETER_TAGS"] = "latest,dev"
want := &pipeline.Build{
Version: "1",
ID: "__0",
Metadata: pipeline.Metadata{
Template: false,
},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "step___0_init",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "#init",
Name: "init",
Number: 1,
Pull: true,
},
&pipeline.Container{
ID: "step___0_clone",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "target/vela-git:v0.3.0",
Name: "clone",
Number: 2,
Pull: true,
},
&pipeline.Container{
ID: "step___0_install",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: installEnv,
Image: "openjdk:latest",
Name: "install",
Number: 3,
Pull: true,
},
&pipeline.Container{
ID: "step___0_test",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: testEnv,
Image: "openjdk:latest",
Name: "test",
Number: 4,
Pull: true,
},
&pipeline.Container{
ID: "step___0_build",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: buildEnv,
Image: "openjdk:latest",
Name: "build",
Number: 5,
Pull: true,
},
&pipeline.Container{
ID: "step___0_publish",
Directory: "/home//",
Image: "plugins/docker:18.09",
Environment: dockerEnv,
Name: "publish",
Number: 6,
Pull: true,
Secrets: pipeline.StepSecretSlice{
&pipeline.StepSecret{
Source: "docker_username",
Target: "registry_username",
},
&pipeline.StepSecret{
Source: "docker_password",
Target: "registry_password",
},
},
},
},
Secrets: pipeline.SecretSlice{
&pipeline.Secret{
Name: "docker_username",
Key: "org/repo/docker/username",
Engine: "native",
Type: "repo",
},
&pipeline.Secret{
Name: "docker_password",
Key: "org/repo/docker/password",
Engine: "vault",
Type: "repo",
},
},
}
// run test
yaml, err := ioutil.ReadFile("testdata/steps_pipeline.yml")
if err != nil {
t.Errorf("Reading yaml file return err: %v", err)
}
compiler, err := New(c)
if err != nil {
t.Errorf("Creating compiler returned err: %v", err)
}
got, err := compiler.Compile(yaml)
if err != nil {
t.Errorf("Compile returned err: %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Compile is %v, want %v", got, want)
}
}
func TestNative_Compile_StagesPipelineTemplate(t *testing.T) {
// setup context
gin.SetMode(gin.TestMode)
resp := httptest.NewRecorder()
_, engine := gin.CreateTestContext(resp)
// setup mock server
engine.GET("/api/v3/repos/:org/:name/contents/:path", func(c *gin.Context) {
c.Header("Content-Type", "application/json")
c.Status(http.StatusOK)
c.File("testdata/template.json")
})
s := httptest.NewServer(engine)
defer s.Close()
// setup types
set := flag.NewFlagSet("test", 0)
set.Bool("github-driver", true, "doc")
set.String("github-url", s.URL, "doc")
set.String("github-token", "", "doc")
c := cli.NewContext(nil, set, nil)
installEnv := environment(nil, nil, nil, nil)
installEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
installEnv["GRADLE_USER_HOME"] = ".gradle"
installEnv["HOME"] = "/root"
installEnv["SHELL"] = "/bin/sh"
installEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew downloadDependencies"})
testEnv := environment(nil, nil, nil, nil)
testEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
testEnv["GRADLE_USER_HOME"] = ".gradle"
testEnv["HOME"] = "/root"
testEnv["SHELL"] = "/bin/sh"
testEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew check"})
buildEnv := environment(nil, nil, nil, nil)
buildEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
buildEnv["GRADLE_USER_HOME"] = ".gradle"
buildEnv["HOME"] = "/root"
buildEnv["SHELL"] = "/bin/sh"
buildEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew build"})
dockerEnv := environment(nil, nil, nil, nil)
dockerEnv["PARAMETER_REGISTRY"] = "index.docker.io"
dockerEnv["PARAMETER_REPO"] = "github/octocat"
dockerEnv["PARAMETER_TAGS"] = "latest,dev"
want := &pipeline.Build{
Version: "1",
ID: "__0",
Metadata: pipeline.Metadata{
Template: false,
},
Stages: pipeline.StageSlice{
&pipeline.Stage{
Name: "init",
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_init_init",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "#init",
Name: "init",
Number: 1,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "clone",
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_clone_clone",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "target/vela-git:v0.3.0",
Name: "clone",
Number: 2,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "gradle",
Needs: []string{"clone"},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_gradle_sample_install",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: installEnv,
Image: "openjdk:latest",
Name: "sample_install",
Number: 3,
Pull: true,
},
&pipeline.Container{
ID: "__0_gradle_sample_test",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: testEnv,
Image: "openjdk:latest",
Name: "sample_test",
Number: 4,
Pull: true,
},
&pipeline.Container{
ID: "__0_gradle_sample_build",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Directory: "/home//",
Entrypoint: []string{"/bin/sh", "-c"},
Environment: buildEnv,
Image: "openjdk:latest",
Name: "sample_build",
Number: 5,
Pull: true,
},
},
},
&pipeline.Stage{
Name: "publish",
Needs: []string{"gradle"},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "__0_publish_publish",
Directory: "/home//",
Image: "plugins/docker:18.09",
Environment: dockerEnv,
Name: "publish",
Number: 6,
Pull: true,
Secrets: pipeline.StepSecretSlice{
&pipeline.StepSecret{
Source: "docker_username",
Target: "registry_username",
},
&pipeline.StepSecret{
Source: "docker_password",
Target: "registry_password",
},
},
},
},
},
},
Secrets: pipeline.SecretSlice{
&pipeline.Secret{
Name: "docker_username",
Key: "org/repo/docker/username",
Engine: "native",
Type: "repo",
},
&pipeline.Secret{
Name: "docker_password",
Key: "org/repo/docker/password",
Engine: "vault",
Type: "repo",
},
},
}
// run test
yaml, err := ioutil.ReadFile("testdata/stages_pipeline_template.yml")
if err != nil {
t.Errorf("Reading yaml file return err: %v", err)
}
compiler, err := New(c)
if err != nil {
t.Errorf("Creating compiler returned err: %v", err)
}
got, err := compiler.Compile(yaml)
if err != nil {
t.Errorf("Compile returned err: %v", err)
}
// WARNING: hack to compare stages
//
// Channel values can only be compared for equality.
// Two channel values are considered equal if they
// originated from the same make call meaning they
// refer to the same channel value in memory.
for i, stage := range got.Stages {
tmp := want.Stages
tmp[i].Done = stage.Done
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Compile is %v, want %v", got, want)
}
}
func TestNative_Compile_StepsPipelineTemplate(t *testing.T) {
// setup context
gin.SetMode(gin.TestMode)
resp := httptest.NewRecorder()
_, engine := gin.CreateTestContext(resp)
// setup mock server
engine.GET("/api/v3/repos/foo/bar/contents/:path", func(c *gin.Context) {
c.Header("Content-Type", "application/json")
c.Status(http.StatusOK)
c.File("testdata/template.json")
})
s := httptest.NewServer(engine)
defer s.Close()
// setup types
set := flag.NewFlagSet("test", 0)
set.Bool("github-driver", true, "doc")
set.String("github-url", s.URL, "doc")
set.String("github-token", "", "doc")
c := cli.NewContext(nil, set, nil)
installEnv := environment(nil, nil, nil, nil)
installEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
installEnv["GRADLE_USER_HOME"] = ".gradle"
installEnv["HOME"] = "/root"
installEnv["SHELL"] = "/bin/sh"
installEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew downloadDependencies"})
testEnv := environment(nil, nil, nil, nil)
testEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
testEnv["GRADLE_USER_HOME"] = ".gradle"
testEnv["HOME"] = "/root"
testEnv["SHELL"] = "/bin/sh"
testEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew check"})
buildEnv := environment(nil, nil, nil, nil)
buildEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
buildEnv["GRADLE_USER_HOME"] = ".gradle"
buildEnv["HOME"] = "/root"
buildEnv["SHELL"] = "/bin/sh"
buildEnv["VELA_BUILD_SCRIPT"] = generateScriptPosix([]string{"./gradlew build"})
dockerEnv := environment(nil, nil, nil, nil)
dockerEnv["PARAMETER_REGISTRY"] = "index.docker.io"
dockerEnv["PARAMETER_REPO"] = "github/octocat"
dockerEnv["PARAMETER_TAGS"] = "latest,dev"
want := &pipeline.Build{
Version: "1",
ID: "__0",
Metadata: pipeline.Metadata{
Template: false,
},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "step___0_init",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "#init",
Name: "init",
Number: 1,
Pull: true,
},
&pipeline.Container{
ID: "step___0_clone",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "target/vela-git:v0.3.0",
Name: "clone",
Number: 2,
Pull: true,
},
&pipeline.Container{
ID: "step___0_sample_install",
Directory: "/home//",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Entrypoint: []string{"/bin/sh", "-c"},
Environment: installEnv,
Image: "openjdk:latest",
Name: "sample_install",
Number: 3,
Pull: true,
},
&pipeline.Container{
ID: "step___0_sample_test",
Directory: "/home//",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Entrypoint: []string{"/bin/sh", "-c"},
Environment: testEnv,
Image: "openjdk:latest",
Name: "sample_test",
Number: 4,
Pull: true,
},
&pipeline.Container{
ID: "step___0_sample_build",
Directory: "/home//",
Commands: []string{"echo $VELA_BUILD_SCRIPT | base64 -d | /bin/sh -e"},
Entrypoint: []string{"/bin/sh", "-c"},
Environment: buildEnv,
Image: "openjdk:latest",
Name: "sample_build",
Number: 5,
Pull: true,
},
&pipeline.Container{
ID: "step___0_docker",
Directory: "/home//",
Image: "plugins/docker:18.09",
Environment: dockerEnv,
Name: "docker",
Number: 6,
Pull: true,
Secrets: pipeline.StepSecretSlice{
&pipeline.StepSecret{
Source: "docker_username",
Target: "registry_username",
},
&pipeline.StepSecret{
Source: "docker_password",
Target: "registry_password",
},
},
},
},
Secrets: pipeline.SecretSlice{
&pipeline.Secret{
Name: "docker_username",
Key: "org/repo/docker/username",
Engine: "native",
Type: "repo",
},
&pipeline.Secret{
Name: "docker_password",
Key: "org/repo/docker/password",
Engine: "vault",
Type: "repo",
},
},
}
// run test
yaml, err := ioutil.ReadFile("testdata/steps_pipeline_template.yml")
if err != nil {
t.Errorf("Reading yaml file return err: %v", err)
}
compiler, err := New(c)
if err != nil {
t.Errorf("Creating compiler returned err: %v", err)
}
got, err := compiler.Compile(yaml)
if err != nil {
t.Errorf("Compile returned err: %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Compile is %v, want %v", got, want)
}
}
func TestNative_Compile_InvalidType(t *testing.T) {
// setup context
gin.SetMode(gin.TestMode)
resp := httptest.NewRecorder()
_, engine := gin.CreateTestContext(resp)
// setup mock server
engine.GET("/api/v3/repos/foo/bar/contents/:path", func(c *gin.Context) {
c.Header("Content-Type", "application/json")
c.Status(http.StatusOK)
c.File("testdata/template.json")
})
s := httptest.NewServer(engine)
defer s.Close()
// setup types
set := flag.NewFlagSet("test", 0)
set.Bool("github-driver", true, "doc")
set.String("github-url", s.URL, "doc")
set.String("github-token", "", "doc")
c := cli.NewContext(nil, set, nil)
gradleEnv := environment(nil, nil, nil, nil)
gradleEnv["GRADLE_OPTS"] = "-Dorg.gradle.daemon=false -Dorg.gradle.workers.max=1 -Dorg.gradle.parallel=false"
gradleEnv["GRADLE_USER_HOME"] = ".gradle"
dockerEnv := environment(nil, nil, nil, nil)
dockerEnv["PARAMETER_REGISTRY"] = "index.docker.io"
dockerEnv["PARAMETER_REPO"] = "github/octocat"
dockerEnv["PARAMETER_TAGS"] = "latest,dev"
want := &pipeline.Build{
Version: "1",
ID: "__0",
Metadata: pipeline.Metadata{
Template: false,
},
Steps: pipeline.ContainerSlice{
&pipeline.Container{
ID: "step___0_init",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "#init",
Name: "init",
Number: 1,
Pull: true,
},
&pipeline.Container{
ID: "step___0_clone",
Directory: "/home//",
Environment: environment(nil, nil, nil, nil),
Image: "target/vela-git:v0.3.0",
Name: "clone",
Number: 2,
Pull: true,
},
&pipeline.Container{
ID: "step___0_docker",
Directory: "/home//",
Image: "plugins/docker:18.09",
Environment: dockerEnv,
Name: "docker",
Number: 3,
Pull: true,
Secrets: pipeline.StepSecretSlice{
&pipeline.StepSecret{
Source: "docker_username",
Target: "registry_username",
},
&pipeline.StepSecret{
Source: "docker_password",
Target: "registry_password",
},
},
},
},
Secrets: pipeline.SecretSlice{
&pipeline.Secret{
Name: "docker_username",
Key: "org/repo/docker/username",
Engine: "native",
Type: "repo",
},
&pipeline.Secret{
Name: "docker_password",
Key: "org/repo/docker/password",
Engine: "vault",
Type: "repo",
},
},
}
// run test
yaml, err := ioutil.ReadFile("testdata/invalid_type.yml")
if err != nil {
t.Errorf("Reading yaml file return err: %v", err)
}
compiler, err := New(c)
if err != nil {
t.Errorf("Creating compiler returned err: %v", err)
}
got, err := compiler.Compile(yaml)
if err != nil {
t.Errorf("Compile returned err: %v", err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("Compile is %v, want %v", got, want)
}
}
func TestNative_Compile_NoStepsorStages(t *testing.T) {
// setup types
set := flag.NewFlagSet("test", 0)
c := cli.NewContext(nil, set, nil)
// run test
yaml, err := ioutil.ReadFile("testdata/metadata.yml")
if err != nil {
t.Errorf("Reading yaml file return err: %v", err)
}
compiler, err := New(c)
if err != nil {
t.Errorf("Creating compiler returned err: %v", err)
}
got, err := compiler.Compile(yaml)
if err == nil {
t.Errorf("Compile should have returned err")
}
if got != nil {
t.Errorf("Compile is %v, want %v", got, nil)
}
}
func TestNative_Compile_StepsandStages(t *testing.T) {
// setup types
set := flag.NewFlagSet("test", 0)
c := cli.NewContext(nil, set, nil)
// run test
yaml, err := ioutil.ReadFile("testdata/steps_and_stages.yml")
if err != nil {
t.Errorf("Reading yaml file return err: %v", err)
}
compiler, err := New(c)
if err != nil {
t.Errorf("Creating compiler returned err: %v", err)
}
got, err := compiler.Compile(yaml)
if err == nil {
t.Errorf("Compile should have returned err")
}
if got != nil {
t.Errorf("Compile is %v, want %v", got, nil)
}
}
|
package goheif
import (
"image"
"image/draw"
"io"
"io/ioutil"
"os"
)
// EncodeOptions is heif encode options
type EncodeOptions struct {
Quality int
Compression Compression
LosslessMode LosslessMode
LoggingLevel LoggingLevel
}
// EncodeOption ...
type EncodeOption func(opts *EncodeOptions)
// WithEncodeQuality ...
func WithEncodeQuality(Quality int) EncodeOption {
return func(opts *EncodeOptions) {
opts.Quality = Quality
}
}
// WithEncodeCompression ...
func WithEncodeCompression(Compression Compression) EncodeOption {
return func(opts *EncodeOptions) {
opts.Compression = Compression
}
}
// WithEncodeLosslessMode ...
func WithEncodeLosslessMode(LosslessMode LosslessMode) EncodeOption {
return func(opts *EncodeOptions) {
opts.LosslessMode = LosslessMode
}
}
// WithEncodeLoggingLevel ...
func WithEncodeLoggingLevel(LoggingLevel LoggingLevel) EncodeOption {
return func(opts *EncodeOptions) {
opts.LoggingLevel = LoggingLevel
}
}
// Encode to heif
func Encode(w io.Writer, img image.Image, opt ...EncodeOption) error {
opts := EncodeOptions{
Quality: 100,
Compression: CompressionHEVC,
LosslessMode: LosslessModeEnabled,
LoggingLevel: LoggingLevelBasic,
}
for _, o := range opt {
o(&opts)
}
switch im := img.(type) {
case *image.RGBA, *image.RGBA64, *image.Gray /*, *image.YCbCr*/ : //底层只支持了这几种
default: // 其他的转为 RGBA
img = toRGBA(im)
}
ctx, err := EncodeFromImage(img, opts.Compression, opts.Quality, opts.LosslessMode, opts.LoggingLevel)
if err != nil {
return err
}
// TODO 研究除了写文件还有没有其他办法, 或者考虑用 mmap 内存映射的方式?
tempFile, err := ioutil.TempFile("", "*.heif")
if err != nil {
return err
}
tmpFilename := tempFile.Name()
defer os.Remove(tmpFilename)
if err := ctx.WriteToFile(tmpFilename); err != nil {
return err
}
if _, err := io.Copy(w, tempFile); err != nil {
return err
}
return nil
}
func toRGBA(im image.Image) *image.RGBA {
b := im.Bounds()
m := image.NewRGBA(image.Rect(0, 0, b.Dx(), b.Dy()))
draw.Draw(m, m.Bounds(), im, b.Min, draw.Src)
return m
}
|
package net
import (
"net"
"os"
)
func InIpv4() ([]string, error) {
var ips []string
switch addrs, err := net.InterfaceAddrs(); {
case err != nil:
return nil, err
default:
for _, address := range addrs {
// 检查ip地址判断是否回环地址
if ipnet, ok := address.(*net.IPNet); ok {
if ipnet.IP.To4() != nil && !ipnet.IP.IsLoopback() && ipnet.IP.To4()[0] != 172 {
ip := ipnet.IP.To4().String()
ips = append(ips, ip)
}
}
}
if len(ips) == 0 {
return nil, os.ErrNotExist
}
}
return ips, nil
}
|
package risserver
import (
"context"
"fmt"
"github.com/bio-routing/bio-rd/net"
"github.com/bio-routing/bio-rd/protocols/bgp/server"
"github.com/bio-routing/bio-rd/route"
"github.com/bio-routing/bio-rd/routingtable"
"github.com/bio-routing/bio-rd/routingtable/filter"
"github.com/bio-routing/bio-rd/routingtable/locRIB"
"github.com/prometheus/client_golang/prometheus"
pb "github.com/bio-routing/bio-rd/cmd/ris/api"
bnet "github.com/bio-routing/bio-rd/net"
netapi "github.com/bio-routing/bio-rd/net/api"
routeapi "github.com/bio-routing/bio-rd/route/api"
)
var (
risObserveFIBClients *prometheus.GaugeVec
)
func init() {
risObserveFIBClients = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "bio",
Subsystem: "ris",
Name: "observe_fib_clients",
Help: "number of observe FIB clients per router/vrf/afisafi",
},
[]string{
"router",
"vrf",
"afisafi",
},
)
prometheus.MustRegister(risObserveFIBClients)
}
// Server represents an RoutingInformationService server
type Server struct {
bmp *server.BMPServer
}
// NewServer creates a new server
func NewServer(b *server.BMPServer) *Server {
return &Server{
bmp: b,
}
}
func (s Server) getRIB(rtr string, vrfID uint64, ipVersion netapi.IP_Version) (*locRIB.LocRIB, error) {
r := s.bmp.GetRouter(rtr)
if r == nil {
return nil, fmt.Errorf("Unable to get router %q", rtr)
}
v := r.GetVRF(vrfID)
if v == nil {
return nil, fmt.Errorf("Unable to get VRF %d", vrfID)
}
var rib *locRIB.LocRIB
switch ipVersion {
case netapi.IP_IPv4:
rib = v.IPv4UnicastRIB()
case netapi.IP_IPv6:
rib = v.IPv6UnicastRIB()
default:
return nil, fmt.Errorf("Unknown afi")
}
if rib == nil {
return nil, fmt.Errorf("Unable to get RIB")
}
return rib, nil
}
// LPM provides a longest prefix match service
func (s *Server) LPM(ctx context.Context, req *pb.LPMRequest) (*pb.LPMResponse, error) {
rib, err := s.getRIB(req.Router, req.VrfId, req.Pfx.Address.Version)
if err != nil {
return nil, err
}
routes := rib.LPM(bnet.NewPrefixFromProtoPrefix(req.Pfx))
res := &pb.LPMResponse{
Routes: make([]*routeapi.Route, 0, len(routes)),
}
for _, route := range routes {
res.Routes = append(res.Routes, route.ToProto())
}
return res, nil
}
// Get gets a prefix (exact match)
func (s *Server) Get(ctx context.Context, req *pb.GetRequest) (*pb.GetResponse, error) {
rib, err := s.getRIB(req.Router, req.VrfId, req.Pfx.Address.Version)
if err != nil {
return nil, err
}
route := rib.Get(bnet.NewPrefixFromProtoPrefix(req.Pfx))
if route == nil {
return &pb.GetResponse{
Routes: make([]*routeapi.Route, 0, 0),
}, nil
}
return &pb.GetResponse{
Routes: []*routeapi.Route{
route.ToProto(),
},
}, nil
}
// GetLonger gets all more specifics of a prefix
func (s *Server) GetLonger(ctx context.Context, req *pb.GetLongerRequest) (*pb.GetLongerResponse, error) {
rib, err := s.getRIB(req.Router, req.VrfId, req.Pfx.Address.Version)
if err != nil {
return nil, err
}
routes := rib.GetLonger(bnet.NewPrefixFromProtoPrefix(req.Pfx))
res := &pb.GetLongerResponse{
Routes: make([]*routeapi.Route, 0, len(routes)),
}
for _, route := range routes {
res.Routes = append(res.Routes, route.ToProto())
}
return res, nil
}
// ObserveRIB implements the ObserveRIB RPC
func (s *Server) ObserveRIB(req *pb.ObserveRIBRequest, stream pb.RoutingInformationService_ObserveRIBServer) error {
ipVersion := netapi.IP_IPv4
switch req.Afisafi {
case pb.ObserveRIBRequest_IPv4Unicast:
ipVersion = netapi.IP_IPv4
case pb.ObserveRIBRequest_IPv6Unicast:
ipVersion = netapi.IP_IPv6
default:
return fmt.Errorf("Unknown AFI/SAFI")
}
rib, err := s.getRIB(req.Router, req.VrfId, ipVersion)
if err != nil {
return err
}
risObserveFIBClients.WithLabelValues(req.Router, fmt.Sprintf("%d", req.VrfId), fmt.Sprintf("%d", req.Afisafi)).Inc()
defer risObserveFIBClients.WithLabelValues(req.Router, fmt.Sprintf("%d", req.VrfId), fmt.Sprintf("%d", req.Afisafi)).Dec()
fifo := newUpdateFIFO()
rc := newRIBClient(fifo)
ret := make(chan error)
go func(fifo *updateFIFO) {
var err error
for {
for _, toSend := range fifo.dequeue() {
err = stream.Send(toSend)
if err != nil {
ret <- err
return
}
}
}
}(fifo)
rib.RegisterWithOptions(rc, routingtable.ClientOptions{
MaxPaths: 100,
})
defer rib.Unregister(rc)
err = <-ret
if err != nil {
return fmt.Errorf("Stream ended: %v", err)
}
return nil
}
// DumpRIB implements the DumpRIB RPC
func (s *Server) DumpRIB(req *pb.DumpRIBRequest, stream pb.RoutingInformationService_DumpRIBServer) error {
ipVersion := netapi.IP_IPv4
switch req.Afisafi {
case pb.DumpRIBRequest_IPv4Unicast:
ipVersion = netapi.IP_IPv4
case pb.DumpRIBRequest_IPv6Unicast:
ipVersion = netapi.IP_IPv6
default:
return fmt.Errorf("Unknown AFI/SAFI")
}
rib, err := s.getRIB(req.Router, req.VrfId, ipVersion)
if err != nil {
return err
}
toSend := &pb.DumpRIBReply{
Route: &routeapi.Route{
Paths: make([]*routeapi.Path, 1),
},
}
routes := rib.Dump()
for i := range routes {
toSend.Route = routes[i].ToProto()
err = stream.Send(toSend)
if err != nil {
return err
}
}
return nil
}
// GetRouters implements the GetRouters RPC
func (s *Server) GetRouters(c context.Context, request *pb.GetRoutersRequest) (*pb.GetRoutersResponse, error) {
resp := &pb.GetRoutersResponse{}
routers := s.bmp.GetRouters()
for _, r := range routers {
vrfs := r.GetVRFs()
vrfIDs := make([]uint64, 0, len(vrfs))
for _, vrf := range vrfs {
vrfIDs = append(vrfIDs, vrf.RD())
}
resp.Routers = append(resp.Routers, &pb.Router{
SysName: r.Name(),
VrfIds: vrfIDs,
Address: r.Address().String(),
})
}
return resp, nil
}
type update struct {
advertisement bool
prefix net.Prefix
path *route.Path
}
type ribClient struct {
fifo *updateFIFO
}
func newRIBClient(fifo *updateFIFO) *ribClient {
return &ribClient{
fifo: fifo,
}
}
func (r *ribClient) AddPath(pfx *net.Prefix, path *route.Path) error {
r.fifo.queue(&pb.RIBUpdate{
Advertisement: true,
Route: &routeapi.Route{
Pfx: pfx.ToProto(),
Paths: []*routeapi.Path{
path.ToProto(),
},
},
})
return nil
}
func (r *ribClient) RemovePath(pfx *net.Prefix, path *route.Path) bool {
r.fifo.queue(&pb.RIBUpdate{
Advertisement: false,
Route: &routeapi.Route{
Pfx: pfx.ToProto(),
Paths: []*routeapi.Path{
path.ToProto(),
},
},
})
return false
}
func (r *ribClient) UpdateNewClient(routingtable.RouteTableClient) error {
return nil
}
func (r *ribClient) Register(routingtable.RouteTableClient) {
}
func (r *ribClient) RegisterWithOptions(routingtable.RouteTableClient, routingtable.ClientOptions) {
}
func (r *ribClient) Unregister(routingtable.RouteTableClient) {
}
func (r *ribClient) RouteCount() int64 {
return -1
}
func (r *ribClient) ClientCount() uint64 {
return 0
}
func (r *ribClient) Dump() []*route.Route {
return nil
}
func (r *ribClient) RefreshRoute(*net.Prefix, []*route.Path) {}
func (r *ribClient) ReplaceFilterChain(filter.Chain) {}
// ReplacePath is here to fulfill an interface
func (r *ribClient) ReplacePath(*net.Prefix, *route.Path, *route.Path) {
}
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package database
import (
"crypto/rand"
"encoding/base64"
"fmt"
"github.com/jinzhu/gorm"
)
type APIUserType int
const (
apiKeyBytes = 64 // 64 bytes is 86 chararacters in non-padded base64.
APIUserTypeDevice APIUserType = 0
APIUserTypeAdmin APIUserType = 1
)
// AuthorizedApp represents an application that is authorized to verify
// verification codes and perform token exchanges.
// This is controlled via a generated API key.
//
// Admin Keys are able to issue diagnosis keys and are not able to perticipate
// the verification protocol.
type AuthorizedApp struct {
gorm.Model
Name string `gorm:"type:varchar(100);unique_index"`
APIKey string `gorm:"type:varchar(100);unique_index"`
APIKeyType APIUserType `gorm:"default:0"`
}
func (a *AuthorizedApp) IsAdminType() bool {
return a.APIKeyType == APIUserTypeAdmin
}
func (a *AuthorizedApp) IsDeviceType() bool {
return a.APIKeyType == APIUserTypeDevice
}
// TODO(mikehelmick): Implement revoke API key functionality.
// TableName definition for the authorized apps relation.
func (AuthorizedApp) TableName() string {
return "authorized_apps"
}
// ListAuthorizedApps retrieves all of the configured authorized apps.
// Done without pagination, as the expected number of authorized apps
// is low signal digits.
func (db *Database) ListAuthorizedApps(includeDeleted bool) ([]*AuthorizedApp, error) {
var apps []*AuthorizedApp
scope := db.db
if includeDeleted {
scope = db.db.Unscoped()
}
if err := scope.Order("name ASC").Find(&apps).Error; err != nil {
return nil, fmt.Errorf("query authorized apps: %w", err)
}
return apps, nil
}
// CreateAuthorizedApp generates a new APIKey and assigns it to the specified
// name.
func (db *Database) CreateAuthorizedApp(name string, apiUserType APIUserType) (*AuthorizedApp, error) {
if !(apiUserType == APIUserTypeAdmin || apiUserType == APIUserTypeDevice) {
return nil, fmt.Errorf("invalid API Key user type requested: %v", apiUserType)
}
buffer := make([]byte, apiKeyBytes)
_, err := rand.Read(buffer)
if err != nil {
return nil, fmt.Errorf("rand.Read: %v", err)
}
app := AuthorizedApp{
Name: name,
APIKey: base64.RawStdEncoding.EncodeToString(buffer),
APIKeyType: apiUserType,
}
if err := db.db.Create(&app).Error; err != nil {
return nil, fmt.Errorf("unable to save authorized app: %w", err)
}
return &app, nil
}
// FindAuthorizedAppByAPIKey located an authorized app based on API key.
func (db *Database) FindAuthorizedAppByAPIKey(apiKey string) (*AuthorizedApp, error) {
var app AuthorizedApp
if err := db.db.Where("api_key = ?", apiKey).First(&app).Error; err != nil {
return nil, err
}
return &app, nil
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strings"
)
type readWriter struct {
io.Reader
io.Writer
}
func newReadWriter(r io.Reader, w io.Writer) *readWriter {
return &readWriter{r, w}
}
// pipe reads from r, applies f, and writes to w.
func pipe(w io.Writer, f func(*readWriter)) io.Writer {
r, wOut := io.Pipe()
rw := newReadWriter(r, w)
go f(rw)
return wOut
}
// annotate each line read from "r" with prefix and write to "w".
func annotate(rw *readWriter, prefix func() string) error {
s := bufio.NewScanner(rw)
s.Split(scanLines)
for s.Scan() {
printAnnotated(rw, prefix, s.Text())
}
return s.Err()
}
// ScanLines is a split function for a Scanner that returns each line of text,
// stripped of any trailing end-of-line marker. The returned line may be
// empty. The end-of-line marker is either one carriage return not followed by a
// newline or one optional carriage return followed by one mandatory newline or
// one carriage return. In regular expression notation, it is `\r[^\n]|\r?\n`.
// The last non-empty line of input will be returned even if it has no newline.
func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
r := bytes.IndexByte(data, '\r')
n := bytes.IndexByte(data, '\n')
// We have a full \r\n-terminated line.
if r >= 0 && n == r+1 {
return r + 2, data[0:r], nil
}
// We have a full \r-terminated line.
if r >= 0 && (n < 0 || r < n) {
return r + 1, data[0:r], nil
}
// We have a full \n-terminated line.
if n >= 0 && (r < 0 || n < r) {
return n + 1, data[0:n], nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), data, nil
}
// Request more data.
return 0, nil, nil
}
func printArguments(args []string, prefix func() string) {
printAnnotated(os.Stdout, prefix, strings.Join(args, " "))
}
func printSeparator(sep string, prefix func() string) {
printAnnotated(os.Stdout, prefix, strings.Repeat(sep, 80))
}
func printAnnotated(w io.Writer, prefix func() string, s string) {
fmt.Fprintf(w, "%s%s\n", prefix(), s)
}
|
package main
import (
"os"
"bufio"
"strconv"
"strings"
"sort"
"fmt"
)
func main() {
inputSize := "large"
input, err := os.Open("A-" + inputSize + "-practice.in")
check(err)
defer input.Close()
scanner := bufio.NewScanner(input)
scanner.Scan()
T, _ := strconv.Atoi(scanner.Text())
result := ""
for i := 1; i <= T; i++ {
scanner.Scan()
tmp := strings.Split(string(scanner.Text()), " ")
N, _ := strconv.Atoi(tmp[0])
MAX, _ := strconv.Atoi(tmp[1])
scanner.Scan()
tmp = strings.Split(string(scanner.Text()), " ")
var files = []int{}
for _, file := range tmp {
j, _ := strconv.Atoi(file)
files = append(files, j)
}
sort.Ints(files)
result += "Case #" + strconv.Itoa(i) + ": " + solve(N, MAX, files) + "\n"
}
fmt.Printf(result)
output, err := os.Create("A-" + inputSize + "-output.txt")
check(err)
defer output.Close()
output.WriteString(result)
output.Sync()
}
/* greedily iterate over sorted array and remove first fitting */
func solve(N, MAX int, files []int) string {
needed := 0
for len(files) > 0 {
head := files[0]
files = append(files[:0], files[1:]...)
for i := len(files) - 1; i >= 0; i-- {
if head + files[i] <= MAX {
files = append(files[:i], files[i + 1:]...)
break
}
}
needed++
}
return strconv.Itoa(needed)
}
func check(e error) {
if e != nil {
panic(e)
}
} |
package handlers_test
import (
"bytes"
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/raba-jp/primus/pkg/cli/ui"
"github.com/raba-jp/primus/pkg/exec"
fakeexec "github.com/raba-jp/primus/pkg/exec/testing"
"github.com/raba-jp/primus/pkg/operations/packages/handlers"
"golang.org/x/xerrors"
)
func TestArchLinux_CheckInstall(t *testing.T) {
tests := []struct {
name string
mockExec exec.Interface
want bool
}{
{
name: "success",
mockExec: &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
fake := &fakeexec.FakeCmd{
Stdout: new(bytes.Buffer),
Stderr: new(bytes.Buffer),
RunScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) {
return []byte{}, []byte{}, nil
},
},
}
return fakeexec.InitFakeCmd(fake, cmd, args...)
},
},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := &handlers.ArchLinux{Exec: tt.mockExec}
if res := handler.CheckInstall(context.Background(), "base-devel"); res != tt.want {
t.Fatal("Fail")
}
})
}
}
func TestArchLinux_Install(t *testing.T) {
tests := []struct {
name string
mockExec exec.Interface
hasErr bool
}{
{
name: "success",
mockExec: &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
fake := &fakeexec.FakeCmd{
Stdout: new(bytes.Buffer),
Stderr: new(bytes.Buffer),
RunScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) {
return []byte{}, []byte{}, nil
},
},
}
return fakeexec.InitFakeCmd(fake, cmd, args...)
},
},
},
hasErr: false,
},
{
name: "error: error occurred",
mockExec: &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
fake := &fakeexec.FakeCmd{
Stdout: new(bytes.Buffer),
Stderr: new(bytes.Buffer),
RunScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) {
return []byte{}, []byte{}, xerrors.New("dummy")
},
},
}
return fakeexec.InitFakeCmd(fake, cmd, args...)
},
},
},
hasErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := &handlers.ArchLinux{Exec: tt.mockExec}
if err := handler.Install(context.Background(), false, &handlers.InstallParams{Name: "base-devel", Option: "option"}); !tt.hasErr && err != nil {
t.Fatalf("%v", err)
}
})
}
}
func TestArchLinux_Install__dryrun(t *testing.T) {
tests := []struct {
name string
params *handlers.InstallParams
want string
}{
{
name: "success",
params: &handlers.InstallParams{
Name: "pkg",
Option: "option",
},
want: "pacman -S --noconfirm option pkg\n",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := new(bytes.Buffer)
ui.SetDefaultUI(&ui.CommandLine{Out: buf, Errout: buf})
handler := &handlers.ArchLinux{}
err := handler.Install(context.Background(), true, tt.params)
if err != nil {
t.Fatalf("%v", err)
}
if diff := cmp.Diff(tt.want, buf.String()); diff != "" {
t.Fatalf(diff)
}
})
}
}
func TestArchLinux_Uninstall(t *testing.T) {
tests := []struct {
name string
mockExec exec.Interface
hasErr bool
}{
{
name: "success",
mockExec: &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
fake := &fakeexec.FakeCmd{
Stdout: new(bytes.Buffer),
Stderr: new(bytes.Buffer),
RunScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) {
return []byte{}, []byte{}, nil
},
},
}
return fakeexec.InitFakeCmd(fake, cmd, args...)
},
},
},
hasErr: false,
},
{
name: "error: error occurred",
mockExec: &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
fake := &fakeexec.FakeCmd{
Stdout: new(bytes.Buffer),
Stderr: new(bytes.Buffer),
RunScript: []fakeexec.FakeAction{
func() ([]byte, []byte, error) {
return []byte{}, []byte{}, xerrors.New("dummy")
},
},
}
return fakeexec.InitFakeCmd(fake, cmd, args...)
},
},
},
hasErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
handler := &handlers.ArchLinux{Exec: tt.mockExec}
if err := handler.Uninstall(context.Background(), false, &handlers.UninstallParams{Name: "base-devel"}); !tt.hasErr && err != nil {
t.Fatalf("%v", err)
}
})
}
}
func TestArchLinux_Uninstall__dryrun(t *testing.T) {
tests := []struct {
name string
params *handlers.UninstallParams
want string
}{
{
name: "success",
params: &handlers.UninstallParams{
Name: "pkg",
},
want: "pacman -R --noconfirm pkg\n",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
buf := new(bytes.Buffer)
ui.SetDefaultUI(&ui.CommandLine{Out: buf, Errout: buf})
handler := &handlers.ArchLinux{}
err := handler.Uninstall(context.Background(), true, tt.params)
if err != nil {
t.Fatalf("%v", err)
}
if diff := cmp.Diff(tt.want, buf.String()); diff != "" {
t.Fatalf(diff)
}
})
}
}
|
package main
import (
"fmt"
"log"
"os"
"github.com/ushmodin/criscross/game"
)
func main() {
err := criscross.StorageConnect(os.Getenv("MONGODB"))
if err != nil {
log.Fatal(err)
}
game, err := criscross.NewCrisCrossGame()
if err != nil {
log.Fatal(err)
}
defer criscross.StorageClose()
srv, err := criscross.NewCrisCrossServer(game)
if err != nil {
log.Fatal(err)
}
srv.ListenAndServe(":8080")
fmt.Println("End")
}
|
package main
import (
"bytes"
"fmt"
"github.com/adiabat/btcd/btcec"
"github.com/adiabat/btcd/chaincfg"
"github.com/adiabat/btcd/txscript"
"github.com/adiabat/btcd/wire"
"github.com/adiabat/btcutil"
"github.com/mit-dci/lit/portxo"
)
func (g *GDsession) move() error {
if *g.inFileName == "" {
return fmt.Errorf("move requires input file portxo to move (-in)")
}
if *g.destAdr == "" {
return fmt.Errorf("move requires a destination address (-dest)")
}
portxBytes, err := g.inputHex()
if err != nil {
return err
}
txo, err := portxo.PorTxoFromBytes(portxBytes)
if err != nil {
return err
}
adr, err := btcutil.DecodeAddress(*g.destAdr, g.NetParams)
if err != nil {
return err
}
tx, err := SendOne(*txo, adr, *g.fee, g.NetParams, *g.bchArg)
if err != nil {
return err
}
var buf bytes.Buffer
err = tx.Serialize(&buf)
if err != nil {
return err
}
outString := fmt.Sprintf("%x", string(buf.Bytes()))
return g.output(outString)
}
// SendOne moves one utxo to a new address, returning the transaction
func SendOne(u portxo.PorTxo, adr btcutil.Address,
feeRate int64, param *chaincfg.Params, bch bool) (*wire.MsgTx, error) {
// estimate tx size at 200 bytes
fee := 200 * feeRate
sendAmt := u.Value - fee
tx := wire.NewMsgTx() // make new tx
// add single output
outAdrScript, err := txscript.PayToAddrScript(adr)
if err != nil {
return nil, err
}
// make user specified txout and add to tx
txout := wire.NewTxOut(sendAmt, outAdrScript)
tx.AddTxOut(txout)
tx.AddTxIn(wire.NewTxIn(&u.Op, nil, nil))
// tx.AddTxIn(wire.NewTxIn(&u.Op, nil))
var sigScript []byte
var empty [32]byte
// var wit [][]byte
if u.PrivKey == empty {
return nil, fmt.Errorf("error: porTxo has empty private key field")
}
priv, _ := btcec.PrivKeyFromBytes(btcec.S256(), u.PrivKey[:])
if priv == nil {
return nil, fmt.Errorf("SendCoins: privkey error")
}
// sign into stash
prevAdr, err := btcutil.NewAddressPubKeyHash(
btcutil.Hash160(priv.PubKey().SerializeCompressed()), param)
if err != nil {
return nil, err
}
prevScript, err := txscript.PayToAddrScript(prevAdr)
if err != nil {
return nil, err
}
// check if BCH sigs needed
if bch {
// make hash cache for this tx
hCache := txscript.NewTxSigHashes(tx)
// generate sig.
sigScript, err = txscript.BCHSignatureScript(
tx, hCache, 0, u.Value, u.PkScript, priv, true)
if err != nil {
return nil, err
}
} else {
sigScript, err = txscript.SignatureScript(
tx, 0, prevScript, txscript.SigHashAll, priv, true)
if err != nil {
return nil, err
}
}
// swap sigs into sigScripts in txins
if sigScript != nil {
tx.TxIn[0].SignatureScript = sigScript
}
// if wit != nil {
// tx.TxIn[0].Witness = wit
// tx.TxIn[0].SignatureScript = nil
// }
return tx, nil
}
|
package utils
import (
"fmt"
"net"
"encoding/json"
"go_code/restudy/netstudy/netstudy01/common/message"
)
var users map[string]net.Conn = make(map[string]net.Conn, 1)
//处理客户端发送的请求数据
func Process(conn net.Conn){
defer conn.Close()
var buf []byte = make([]byte, 1096)
n, err := conn.Read(buf)
if err != nil {
fmt.Println("读取数据失败", err)
}
fmt.Printf("读取的数据长度为%d, 内容为%v", n, string(buf[:n]))
var mes message.Message
err = json.Unmarshal(buf[:n], &mes)
if err != nil {
fmt.Println("获取消息失败", err)
return
}
switch mes.MesType {
case message.LoginType:
var loginMes message.LoginMessage
err = json.Unmarshal([]byte(mes.Data), &loginMes)
if err != nil {
fmt.Println("反序列化消息失败", err)
}
loginMesRes := Login(&loginMes, conn)
buf, err = json.Marshal(loginMesRes)
if err != nil {
fmt.Println("序列化登录返回消息失败", err)
}
conn.Write(buf)
}
}
func Login(loginMes *message.LoginMessage, conn net.Conn) (mes message.Message){
mes.MesType = message.LoginResType
var loginResMes message.LoginMessageRes
if (loginMes.UserId == "zhangsan" && loginMes.Pwd == "123456") || (loginMes.UserId == "lisi" && loginMes.Pwd == "123456") {
loginResMes.Code = 200
loginResMes.Desc = "登录成功"
users[loginMes.UserId] = conn
sendOnlineToOthers(loginMes.UserId)
}else {
loginResMes.Code = 500
loginResMes.Desc = "用户名或密码错误"
}
buf, err := json.Marshal(loginResMes)
if err != nil {
fmt.Println("序列化登录返回消息失败", err)
}
mes.Data = string(buf)
return
}
func sendOnlineToOthers(userId string) {
for k, v := range users {
if k == userId {
continue
}
var mes message.Message
var onlineMessage message.OnlineMessage
mes.MesType = message.OnlineMessageType
onlineMessage.UserId = userId
msg, err := json.Marshal(onlineMessage)
fmt.Println("序列化用户上线子信息")
if err != nil {
fmt.Println("序列化用户上线子信息失败", err)
return
}
mes.Data = string(msg)
msg, err = json.Marshal(mes)
fmt.Println("序列化用户上线主信息")
if err != nil {
fmt.Println("序列化用户上线主信息失败", err)
return
}
fmt.Println(string(msg))
v.Write(msg)
}
return
} |
// Copyright (c) 2020 - for information on the respective copyright owner
// see the NOTICE file and/or the repository at
// https://github.com/hyperledger-labs/perun-node
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package payment_test
import (
"context"
"errors"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/hyperledger-labs/perun-node"
"github.com/hyperledger-labs/perun-node/app/payment"
"github.com/hyperledger-labs/perun-node/currency"
"github.com/hyperledger-labs/perun-node/internal/mocks"
)
func Test_SendPayChUpdate(t *testing.T) {
// Returns a mock with API calls set up for currency and parts.
newChAPIMock := func() *mocks.ChAPI {
chAPI := &mocks.ChAPI{}
chAPI.On("Currency").Return(currency.ETH)
chAPI.On("Parts").Return(parts)
return chAPI
}
t.Run("happy_sendPayment", func(t *testing.T) {
var updater perun.StateUpdater
chAPI := newChAPIMock()
chAPI.On("SendChUpdate", context.Background(), mock.MatchedBy(func(gotUpdater perun.StateUpdater) bool {
updater = gotUpdater
return true
})).Return(updatedChInfo, nil)
gotPayChInfo, gotErr := payment.SendPayChUpdate(context.Background(), chAPI, peerAlias, amountToSend)
require.NoError(t, gotErr)
assert.Equal(t, wantUpdatedPayChInfo, gotPayChInfo)
require.NotNil(t, updater)
// TODO: Now that State is not available, how to test the updater function ?
// stateCopy := chInfo.State.Clone()
// updater(stateCopy)
// assert.Equal(t, chUpdateNotif.ProposedChInfo.State.Balances, stateCopy.Allocation.Balances)
})
t.Run("happy_requestPayment", func(t *testing.T) {
chAPI := newChAPIMock()
chAPI.On("SendChUpdate", context.Background(), mock.Anything).Return(updatedChInfo, nil)
gotPayChInfo, gotErr := payment.SendPayChUpdate(context.Background(), chAPI, perun.OwnAlias, amountToSend)
require.NoError(t, gotErr)
require.Equal(t, wantUpdatedPayChInfo, gotPayChInfo)
})
t.Run("error_InvalidAmount", func(t *testing.T) {
chAPI := newChAPIMock()
chAPI.On("SendChUpdate", context.Background(), mock.Anything).Return(perun.ChInfo{}, nil)
invalidAmount := "abc"
_, gotErr := payment.SendPayChUpdate(context.Background(), chAPI, peerAlias, invalidAmount)
require.True(t, errors.Is(gotErr, perun.ErrInvalidAmount))
})
t.Run("error_InvalidPayee", func(t *testing.T) {
chAPI := newChAPIMock()
chAPI.On("SendChUpdate", context.Background(), mock.Anything).Return(perun.ChInfo{}, nil)
invalidPayee := "invalid-payee"
_, gotErr := payment.SendPayChUpdate(context.Background(), chAPI, invalidPayee, amountToSend)
require.True(t, errors.Is(gotErr, perun.ErrInvalidPayee))
})
t.Run("error_SendChUpdate", func(t *testing.T) {
chAPI := newChAPIMock()
chAPI.On("SendChUpdate", context.Background(), mock.Anything).Return(perun.ChInfo{}, assert.AnError)
_, gotErr := payment.SendPayChUpdate(context.Background(), chAPI, peerAlias, amountToSend)
require.Error(t, gotErr)
t.Log(gotErr)
})
}
func Test_GetPayChInfo(t *testing.T) {
t.Run("happy1", func(t *testing.T) {
chAPI := &mocks.ChAPI{}
chAPI.On("GetChInfo").Return(openedChInfo)
gotPayChInfo := payment.GetPayChInfo(chAPI)
assert.Equal(t, wantOpenedPayChInfo, gotPayChInfo)
})
t.Run("happy2", func(t *testing.T) {
chAPI := &mocks.ChAPI{}
chAPI.On("GetChInfo").Return(updatedChInfo)
gotPayChInfo := payment.GetPayChInfo(chAPI)
assert.Equal(t, wantUpdatedPayChInfo, gotPayChInfo)
})
}
func Test_SubPayChUpdates(t *testing.T) {
t.Run("happy", func(t *testing.T) {
var notifier perun.ChUpdateNotifier
var notif payment.PayChUpdateNotif
dummyNotifier := func(gotNotif payment.PayChUpdateNotif) {
notif = gotNotif
}
chAPI := &mocks.ChAPI{}
chAPI.On("SubChUpdates", mock.MatchedBy(func(gotNotifier perun.ChUpdateNotifier) bool {
notifier = gotNotifier
return true
})).Return(nil)
gotErr := payment.SubPayChUpdates(chAPI, dummyNotifier)
assert.NoError(t, gotErr)
require.NotNil(t, notifier)
// Test the notifier function, that interprets the notification for payment app.
t.Run("notifier_typeOpen", func(t *testing.T) {
notifier(chUpdateNotif)
require.Equal(t, wantPayChUpdateNotif, notif)
})
t.Run("notifier_typeFinal", func(t *testing.T) {
chUpdateNotifFinal := chUpdateNotif
chUpdateNotifFinal.Type = perun.ChUpdateTypeFinal
wantPayChUpdateNotifFinal := wantPayChUpdateNotif
wantPayChUpdateNotifFinal.Type = perun.ChUpdateTypeFinal
notifier(chUpdateNotifFinal)
require.Equal(t, wantPayChUpdateNotifFinal, notif)
})
t.Run("notifier_typeClosed", func(t *testing.T) {
chUpdateNotifClosed := chUpdateNotif
chUpdateNotifClosed.Type = perun.ChUpdateTypeClosed
chUpdateNotifClosed.CurrChInfo = chUpdateNotif.ProposedChInfo
wantPayChUpdateNotifClosed := wantPayChUpdateNotif
wantPayChUpdateNotifClosed.Type = perun.ChUpdateTypeClosed
notifier(chUpdateNotifClosed)
require.Equal(t, wantPayChUpdateNotifClosed, notif)
})
t.Run("notifier_typeClosedWithError", func(t *testing.T) {
chUpdateNotifClosed := chUpdateNotif
chUpdateNotifClosed.Type = perun.ChUpdateTypeClosed
chUpdateNotifClosed.CurrChInfo = chUpdateNotif.ProposedChInfo
chUpdateNotifClosed.Error = assert.AnError.Error()
wantPayChUpdateNotifClosed := wantPayChUpdateNotif
wantPayChUpdateNotifClosed.Type = perun.ChUpdateTypeClosed
wantPayChUpdateNotifClosed.Error = assert.AnError.Error()
notifier(chUpdateNotifClosed)
require.Equal(t, wantPayChUpdateNotifClosed, notif)
})
})
t.Run("error", func(t *testing.T) {
chAPI := &mocks.ChAPI{}
chAPI.On("SubChUpdates", mock.Anything).Return(assert.AnError)
dummyNotifier := func(notif payment.PayChUpdateNotif) {}
gotErr := payment.SubPayChUpdates(chAPI, dummyNotifier)
assert.Error(t, gotErr)
t.Log(gotErr)
})
}
func Test_UnsubPayChUpdates(t *testing.T) {
t.Run("happy", func(t *testing.T) {
chAPI := &mocks.ChAPI{}
chAPI.On("UnsubChUpdates").Return(nil)
gotErr := payment.UnsubPayChUpdates(chAPI)
assert.NoError(t, gotErr)
})
t.Run("error", func(t *testing.T) {
chAPI := &mocks.ChAPI{}
chAPI.On("UnsubChUpdates").Return(assert.AnError)
gotErr := payment.UnsubPayChUpdates(chAPI)
assert.Error(t, gotErr)
})
}
// nolint: dupl // not duplicate of Test_RespondPayChProposal.
func Test_RespondPayChUpdate(t *testing.T) {
updateID := "update-id-1"
t.Run("happy_accept", func(t *testing.T) {
accept := true
chAPI := &mocks.ChAPI{}
chAPI.On("RespondChUpdate", context.Background(), updateID, accept).Return(updatedChInfo, nil)
gotUpdatedPayChInfo, gotErr := payment.RespondPayChUpdate(context.Background(), chAPI, updateID, accept)
assert.NoError(t, gotErr)
assert.Equal(t, wantUpdatedPayChInfo, gotUpdatedPayChInfo)
})
t.Run("happy_reject", func(t *testing.T) {
accept := false
chAPI := &mocks.ChAPI{}
chAPI.On("RespondChUpdate", context.Background(), updateID, accept).Return(perun.ChInfo{}, nil)
_, gotErr := payment.RespondPayChUpdate(context.Background(), chAPI, updateID, accept)
assert.NoError(t, gotErr)
})
t.Run("error", func(t *testing.T) {
accept := true
chAPI := &mocks.ChAPI{}
chAPI.On("RespondChUpdate", context.Background(), updateID, accept).Return(perun.ChInfo{}, assert.AnError)
_, gotErr := payment.RespondPayChUpdate(context.Background(), chAPI, updateID, accept)
assert.Error(t, gotErr)
t.Log(gotErr)
})
}
func Test_ClosePayCh(t *testing.T) {
t.Run("happy", func(t *testing.T) {
chAPI := &mocks.ChAPI{}
chAPI.On("Close", context.Background()).Return(updatedChInfo, nil)
gotPayChInfo, err := payment.ClosePayCh(context.Background(), chAPI)
require.NoError(t, err)
assert.Equal(t, wantUpdatedPayChInfo, gotPayChInfo)
})
t.Run("error", func(t *testing.T) {
chAPI := &mocks.ChAPI{}
chAPI.On("Close", context.Background()).Return(updatedChInfo, assert.AnError)
_, gotErr := payment.ClosePayCh(context.Background(), chAPI)
require.Error(t, gotErr)
t.Log(gotErr)
})
}
|
package frame256x288
import (
"bytes"
"encoding/base64"
"fmt"
"image/png"
"strings"
)
// String returns frame serialized in “IMAGE:<base64-encoded-png>” format.
//
// The usefulness of this serialized format is, if you just output that on
// the Go Playground — https://play.golang.org/ — then it will display it
// as an image.
func (receiver Slice) String() string {
var buffer strings.Builder
buffer.WriteString("IMAGE:")
{
var pngBuffer bytes.Buffer
err := png.Encode(&pngBuffer, receiver)
if nil != err {
return fmt.Sprintf("ERROR:%s", err)
}
encoded := base64.StdEncoding.EncodeToString(pngBuffer.Bytes())
buffer.WriteString(encoded)
}
return buffer.String()
}
|
package token
import (
"net/http"
"os"
"time"
"app/database"
"app/models"
"app/utils/response"
jwt "github.com/dgrijalva/jwt-go"
)
func Get(writer http.ResponseWriter, request *http.Request) {
jwtToken := jwt.New(jwt.SigningMethodHS256)
claims := jwtToken.Claims.(jwt.MapClaims)
claims["authorized"] = true
claims["expired_at"] = time.Now().Add(time.Minute * 30).Unix()
tokenString, err := jwtToken.SignedString([]byte(os.Getenv("JWT_TOKEN")))
if err != nil {
response.InternalServerError(writer)
return
}
token := &models.Token{
Token: tokenString,
}
session, collection := database.GetCollection(models.TableToken)
defer session.Close()
if err := collection.Insert(token); err != nil {
response.InternalServerError(writer)
return
}
response.Ok(writer, token)
}
|
package common
import (
"fmt"
"os"
)
//判断文件是否存在
func FileExist(name string) bool {
_, err := os.Stat(name)
if err!=nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
func CheckFileExists(path string) bool {
fmt.Println(path)
if _, err := os.Stat(path); err != nil {
if os.IsExist(err) {
return false
}
}
return true
}
|
package gen
import (
"fmt"
"errors"
"gopkg.in/yaml.v2"
"path/filepath"
)
const AppTemplateFileMode = 0755
var (
ProjectPath string
TemplatePath string
)
var transactions AppTransactionStack
func CreateProject(template AppTemplate) error {
var err error
//Project section is required
if project, ok := template[KeyWordProject]; ok == true {
TemplatePath, err = filepath.Abs("")
if err != nil {
return err
}
TemplatePath += "/gen/tml"
data := project.(AppTemplate)
ProjectPath = fmt.Sprintf("%s/%s", data[KeyWordPath], data[KeyWordName])
transactions = append(transactions, &AppTransactionCreateDir{Path: ProjectPath, Mode: AppTemplateFileMode})
} else {
return errors.New("template has no project section")
}
return nil
}
func RenderConfig(template AppTemplate) error {
//environment section is not required
if environment, ok := template[KeyWordEnvironment]; ok == true {
configPath := ProjectPath + "/config"
yamlConfigPath := configPath + "/yaml"
//Create config dirs
transactions = append(transactions, &AppTransactionCreateDir{Path: configPath, Mode: AppTemplateFileMode})
transactions = append(transactions, &AppTransactionCreateDir{Path: yamlConfigPath, Mode: AppTemplateFileMode})
//Create project config struct
var wholeTemplate = make(AppTemplate)
//Create config files
for key, conf := range environment.(AppTemplate) {
wholeTemplate.Merge(conf.(AppTemplate))
if conf == nil {
continue
}
filePath := yamlConfigPath + "/" + key.(string) + ".yaml"
transactions = append(transactions, &AppTransactionCreateFile{Path: filePath})
data, err := yaml.Marshal(conf)
if err != nil {
return err
}
transactions = append(transactions, &AppTransactionAppendFile{Path: filePath, Data: data})
}
//Render map[interface{}]interface{} to string
str := CreateTypeStructure(wholeTemplate, KeyWordSettings, 0)
configFilePath := configPath + "/" + KeyWordConfig + ".go"
transactions = append(transactions, &AppTransactionCreateFile{Path: configFilePath})
str = fmt.Sprintf("package %s\n\n%s", KeyWordConfig, str)
transactions = append(transactions, &AppTransactionAppendFile{Path: configFilePath, Data: []byte(str)})
envPath := configPath + "/environment.go"
envTemplatePath := TemplatePath + "/config/environment.tml"
templateData := struct {
Package string
ConfigType string
}{Package: "config", ConfigType: "settings"}
transactions = append(transactions, &AppTransactionCreateFileFromTemplate{
Path: envPath,
TemplatePath: envTemplatePath,
Data: templateData,
})
}
return nil
}
func CreateMainFile(template AppTemplate) error {
return nil
}
func RenderLogger(template AppTemplate) error {
//for key, value := range template {
//
//}
helperDir := ProjectPath + "/helper"
transactions = append(transactions, &AppTransactionCreateDir{Path: helperDir, Mode: AppTemplateFileMode})
loggingDir := helperDir + "/logging"
transactions = append(transactions, &AppTransactionCreateDir{Path: loggingDir, Mode: AppTemplateFileMode})
loggerPath := loggingDir + "/logger.go"
templatePath := TemplatePath + "/helper/logging/logger.tml"
templateData := struct {
Loggers []string
}{Loggers: []string{"Query"}}
transactions = append(transactions, &AppTransactionCreateFileFromTemplate{
Path: loggerPath,
TemplatePath: templatePath,
Data: templateData,
})
return nil
}
func FormatProject() error {
transactions = append(transactions, &AppTransactionFormatProject{Path: ProjectPath + "/..."})
return nil
}
//Function performs parse of map[string]interface and populate transaction stack
//After that all transaction executed by order from 0 to n
func ParseTemplate(template AppTemplate) error {
//Create project
if err := CreateProject(template); err != nil {
return err
}
//Render config
if err := RenderConfig(template); err != nil {
return err
}
//Render logger
if err := RenderLogger(template); err != nil {
return err
}
//Create Main file
if err := CreateMainFile(template); err != nil {
return err
}
//format project
if err := FormatProject(); err != nil {
return err
}
//Exec all transaction
if err := ExecTransactions(transactions); err != nil {
return err
}
return nil
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package batcheval_test
import (
"bytes"
"context"
"os"
"regexp"
"sort"
"strings"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/batcheval"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/storage"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/errors"
"github.com/kr/pretty"
)
// createTestPebbleEngine returns a new in-memory Pebble storage engine.
func createTestPebbleEngine() storage.Engine {
return storage.NewInMemForTesting(context.Background(), roachpb.Attributes{}, 1<<20)
}
var engineImpls = []struct {
name string
create func() storage.Engine
}{
{"pebble", createTestPebbleEngine},
}
func singleKVSSTable(key storage.MVCCKey, value []byte) ([]byte, error) {
sstFile := &storage.MemFile{}
sst := storage.MakeBackupSSTWriter(sstFile)
defer sst.Close()
if err := sst.Put(key, value); err != nil {
return nil, err
}
if err := sst.Finish(); err != nil {
return nil, err
}
return sstFile.Data(), nil
}
func TestDBAddSSTable(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
t.Run("store=in-memory", func(t *testing.T) {
s, _, db := serverutils.StartServer(t, base.TestServerArgs{Insecure: true})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
tr := s.ClusterSettings().Tracer
runTestDBAddSSTable(ctx, t, db, tr, nil)
})
t.Run("store=on-disk", func(t *testing.T) {
dir, dirCleanupFn := testutils.TempDir(t)
defer dirCleanupFn()
storeSpec := base.DefaultTestStoreSpec
storeSpec.InMemory = false
storeSpec.Path = dir
s, _, db := serverutils.StartServer(t, base.TestServerArgs{
Insecure: true,
StoreSpecs: []base.StoreSpec{storeSpec},
})
ctx := context.Background()
defer s.Stopper().Stop(ctx)
store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID())
if err != nil {
t.Fatal(err)
}
tr := s.ClusterSettings().Tracer
runTestDBAddSSTable(ctx, t, db, tr, store)
})
}
// if store != nil, assume it is on-disk and check ingestion semantics.
func runTestDBAddSSTable(
ctx context.Context, t *testing.T, db *kv.DB, tr *tracing.Tracer, store *kvserver.Store,
) {
tr.TestingRecordAsyncSpans() // we assert on async span traces in this test
{
key := storage.MVCCKey{Key: []byte("bb"), Timestamp: hlc.Timestamp{WallTime: 2}}
data, err := singleKVSSTable(key, roachpb.MakeValueFromString("1").RawBytes)
if err != nil {
t.Fatalf("%+v", err)
}
// Key is before the range in the request span.
if err := db.AddSSTable(
ctx, "d", "e", data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */
); !testutils.IsError(err, "not in request range") {
t.Fatalf("expected request range error got: %+v", err)
}
// Key is after the range in the request span.
if err := db.AddSSTable(
ctx, "a", "b", data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */
); !testutils.IsError(err, "not in request range") {
t.Fatalf("expected request range error got: %+v", err)
}
// Do an initial ingest.
ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording")
defer cancel()
if err := db.AddSSTable(
ingestCtx, "b", "c", data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */
); err != nil {
t.Fatalf("%+v", err)
}
formatted := collect().String()
if err := testutils.MatchEach(formatted,
"evaluating AddSSTable",
"sideloadable proposal detected",
"ingested SSTable at index",
); err != nil {
t.Fatal(err)
}
if store != nil {
// Look for the ingested path and verify it still exists.
re := regexp.MustCompile(`ingested SSTable at index \d+, term \d+: (\S+)`)
match := re.FindStringSubmatch(formatted)
if len(match) != 2 {
t.Fatalf("failed to extract ingested path from message %q,\n got: %v", formatted, match)
}
// The on-disk paths have `.ingested` appended unlike in-memory.
suffix := ".ingested"
if _, err := os.Stat(strings.TrimSuffix(match[1], suffix)); err != nil {
t.Fatalf("%q file missing after ingest: %+v", match[1], err)
}
}
if r, err := db.Get(ctx, "bb"); err != nil {
t.Fatalf("%+v", err)
} else if expected := []byte("1"); !bytes.Equal(expected, r.ValueBytes()) {
t.Errorf("expected %q, got %q", expected, r.ValueBytes())
}
}
// Check that ingesting a key with an earlier mvcc timestamp doesn't affect
// the value returned by Get.
{
key := storage.MVCCKey{Key: []byte("bb"), Timestamp: hlc.Timestamp{WallTime: 1}}
data, err := singleKVSSTable(key, roachpb.MakeValueFromString("2").RawBytes)
if err != nil {
t.Fatalf("%+v", err)
}
if err := db.AddSSTable(
ctx, "b", "c", data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */
); err != nil {
t.Fatalf("%+v", err)
}
if r, err := db.Get(ctx, "bb"); err != nil {
t.Fatalf("%+v", err)
} else if expected := []byte("1"); !bytes.Equal(expected, r.ValueBytes()) {
t.Errorf("expected %q, got %q", expected, r.ValueBytes())
}
if store != nil {
metrics := store.Metrics()
if expected, got := int64(2), metrics.AddSSTableApplications.Count(); expected != got {
t.Fatalf("expected %d sst ingestions, got %d", expected, got)
}
}
}
// Key range in request span is not empty. First time through a different
// key is present. Second time through checks the idempotency.
{
key := storage.MVCCKey{Key: []byte("bc"), Timestamp: hlc.Timestamp{WallTime: 1}}
data, err := singleKVSSTable(key, roachpb.MakeValueFromString("3").RawBytes)
if err != nil {
t.Fatalf("%+v", err)
}
var metrics *kvserver.StoreMetrics
var before int64
if store != nil {
metrics = store.Metrics()
before = metrics.AddSSTableApplicationCopies.Count()
}
for i := 0; i < 2; i++ {
ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording")
defer cancel()
if err := db.AddSSTable(
ingestCtx, "b", "c", data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */
); err != nil {
t.Fatalf("%+v", err)
}
if err := testutils.MatchEach(collect().String(),
"evaluating AddSSTable",
"sideloadable proposal detected",
"ingested SSTable at index",
); err != nil {
t.Fatal(err)
}
if r, err := db.Get(ctx, "bb"); err != nil {
t.Fatalf("%+v", err)
} else if expected := []byte("1"); !bytes.Equal(expected, r.ValueBytes()) {
t.Errorf("expected %q, got %q", expected, r.ValueBytes())
}
if r, err := db.Get(ctx, "bc"); err != nil {
t.Fatalf("%+v", err)
} else if expected := []byte("3"); !bytes.Equal(expected, r.ValueBytes()) {
t.Errorf("expected %q, got %q", expected, r.ValueBytes())
}
}
if store != nil {
if expected, got := int64(4), metrics.AddSSTableApplications.Count(); expected != got {
t.Fatalf("expected %d sst ingestions, got %d", expected, got)
}
// The second time though we had to make a copy of the SST since rocks saw
// existing data (from the first time), and rejected the no-modification
// attempt.
if after := metrics.AddSSTableApplicationCopies.Count(); before != after {
t.Fatalf("expected sst copies not to increase, %d before %d after", before, after)
}
}
}
// ... and doing the same thing but via write-batch works the same.
{
key := storage.MVCCKey{Key: []byte("bd"), Timestamp: hlc.Timestamp{WallTime: 1}}
data, err := singleKVSSTable(key, roachpb.MakeValueFromString("3").RawBytes)
if err != nil {
t.Fatalf("%+v", err)
}
var metrics *kvserver.StoreMetrics
var before int64
if store != nil {
metrics = store.Metrics()
before = metrics.AddSSTableApplications.Count()
}
for i := 0; i < 2; i++ {
ingestCtx, collect, cancel := tracing.ContextWithRecordingSpan(ctx, tr, "test-recording")
defer cancel()
if err := db.AddSSTable(
ingestCtx, "b", "c", data, false /* disallowShadowing */, nil /* stats */, true, /* ingestAsWrites */
); err != nil {
t.Fatalf("%+v", err)
}
if err := testutils.MatchEach(collect().String(),
"evaluating AddSSTable",
"via regular write batch",
); err != nil {
t.Fatal(err)
}
if r, err := db.Get(ctx, "bb"); err != nil {
t.Fatalf("%+v", err)
} else if expected := []byte("1"); !bytes.Equal(expected, r.ValueBytes()) {
t.Errorf("expected %q, got %q", expected, r.ValueBytes())
}
if r, err := db.Get(ctx, "bd"); err != nil {
t.Fatalf("%+v", err)
} else if expected := []byte("3"); !bytes.Equal(expected, r.ValueBytes()) {
t.Errorf("expected %q, got %q", expected, r.ValueBytes())
}
}
if store != nil {
if expected, got := before, metrics.AddSSTableApplications.Count(); expected != got {
t.Fatalf("expected %d sst ingestions, got %d", expected, got)
}
}
}
// Invalid key/value entry checksum.
{
key := storage.MVCCKey{Key: []byte("bb"), Timestamp: hlc.Timestamp{WallTime: 1}}
value := roachpb.MakeValueFromString("1")
value.InitChecksum([]byte("foo"))
data, err := singleKVSSTable(key, value.RawBytes)
if err != nil {
t.Fatalf("%+v", err)
}
if err := db.AddSSTable(
ctx, "b", "c", data, false /* disallowShadowing */, nil /* stats */, false, /* ingestAsWrites */
); !testutils.IsError(err, "invalid checksum") {
t.Fatalf("expected 'invalid checksum' error got: %+v", err)
}
}
}
type strKv struct {
k string
ts int64
v string
}
func mvccKVsFromStrs(in []strKv) []storage.MVCCKeyValue {
kvs := make([]storage.MVCCKeyValue, len(in))
for i := range kvs {
kvs[i].Key.Key = []byte(in[i].k)
kvs[i].Key.Timestamp.WallTime = in[i].ts
if in[i].v != "" {
kvs[i].Value = roachpb.MakeValueFromBytes([]byte(in[i].v)).RawBytes
} else {
kvs[i].Value = nil
}
}
sort.Slice(kvs, func(i, j int) bool { return kvs[i].Key.Less(kvs[j].Key) })
return kvs
}
func TestAddSSTableMVCCStats(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
for _, engineImpl := range engineImpls {
t.Run(engineImpl.name, func(t *testing.T) {
e := engineImpl.create()
defer e.Close()
for _, kv := range mvccKVsFromStrs([]strKv{
{"A", 1, "A"},
{"a", 1, "a"},
{"a", 6, ""},
{"b", 5, "bb"},
{"c", 6, "ccccccccccccccccccccccccccccccccccccccccccccc"}, // key 4b, 50b, live 64b
{"d", 1, "d"},
{"d", 2, ""},
{"e", 1, "e"},
{"z", 2, "zzzzzz"},
}) {
if err := e.PutMVCC(kv.Key, kv.Value); err != nil {
t.Fatalf("%+v", err)
}
}
sstKVs := mvccKVsFromStrs([]strKv{
{"a", 2, "aa"}, // mvcc-shadowed within SST.
{"a", 4, "aaaaaa"}, // mvcc-shadowed by existing delete.
{"c", 6, "ccc"}, // same TS as existing, LSM-shadows existing.
{"d", 4, "dddd"}, // mvcc-shadow existing deleted d.
{"e", 4, "eeee"}, // mvcc-shadow existing 1b.
{"j", 2, "jj"}, // no colission – via MVCC or LSM – with existing.
})
var delta enginepb.MVCCStats
// the sst will think it added 4 keys here, but a, c, and e shadow or are shadowed.
delta.LiveCount = -3
delta.LiveBytes = -109
// the sst will think it added 5 keys, but only j is new so 4 are over-counted.
delta.KeyCount = -4
delta.KeyBytes = -20
// the sst will think it added 6 values, but since one was a perfect (key+ts)
// collision, it *replaced* the existing value and is over-counted.
delta.ValCount = -1
delta.ValBytes = -50
// Add in a random metadata key.
ts := hlc.Timestamp{WallTime: 7}
txn := roachpb.MakeTransaction(
"test",
nil, // baseKey
roachpb.NormalUserPriority,
ts,
base.DefaultMaxClockOffset.Nanoseconds(),
)
if err := storage.MVCCPut(
ctx, e, nil, []byte("i"), ts,
roachpb.MakeValueFromBytes([]byte("it")),
&txn,
); err != nil {
if !errors.HasType(err, (*roachpb.WriteIntentError)(nil)) {
t.Fatalf("%+v", err)
}
}
// After EvalAddSSTable, cArgs.Stats contains a diff to the existing
// stats. Make sure recomputing from scratch gets the same answer as
// applying the diff to the stats
beforeStats := func() enginepb.MVCCStats {
iter := e.NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{UpperBound: roachpb.KeyMax})
defer iter.Close()
beforeStats, err := storage.ComputeStatsForRange(iter, keys.LocalMax, roachpb.KeyMax, 10)
if err != nil {
t.Fatalf("%+v", err)
}
return beforeStats
}()
mkSST := func(kvs []storage.MVCCKeyValue) []byte {
sstFile := &storage.MemFile{}
sst := storage.MakeBackupSSTWriter(sstFile)
defer sst.Close()
for _, kv := range kvs {
if err := sst.Put(kv.Key, kv.Value); err != nil {
t.Fatalf("%+v", err)
}
}
if err := sst.Finish(); err != nil {
t.Fatalf("%+v", err)
}
return sstFile.Data()
}
sstBytes := mkSST(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: keys.MinKey, EndKey: keys.MaxKey},
Data: sstBytes,
},
Stats: &enginepb.MVCCStats{},
}
if _, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil); err != nil {
t.Fatalf("%+v", err)
}
evaledStats := beforeStats
evaledStats.Add(*cArgs.Stats)
if err := e.WriteFile("sst", sstBytes); err != nil {
t.Fatalf("%+v", err)
}
if err := e.IngestExternalFiles(ctx, []string{"sst"}); err != nil {
t.Fatalf("%+v", err)
}
afterStats := func() enginepb.MVCCStats {
iter := e.NewMVCCIterator(storage.MVCCKeyAndIntentsIterKind, storage.IterOptions{UpperBound: roachpb.KeyMax})
defer iter.Close()
afterStats, err := storage.ComputeStatsForRange(iter, keys.LocalMax, roachpb.KeyMax, 10)
if err != nil {
t.Fatalf("%+v", err)
}
return afterStats
}()
evaledStats.Add(delta)
evaledStats.ContainsEstimates = 0
if !afterStats.Equal(evaledStats) {
t.Errorf("mvcc stats mismatch: diff(expected, actual): %s", pretty.Diff(afterStats, evaledStats))
}
cArgsWithStats := batcheval.CommandArgs{
Header: roachpb.Header{Timestamp: hlc.Timestamp{WallTime: 7}},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: keys.MinKey, EndKey: keys.MaxKey},
Data: mkSST([]storage.MVCCKeyValue{{
Key: storage.MVCCKey{Key: roachpb.Key("zzzzzzz"), Timestamp: ts},
Value: roachpb.MakeValueFromBytes([]byte("zzz")).RawBytes,
}}),
MVCCStats: &enginepb.MVCCStats{KeyCount: 10},
},
Stats: &enginepb.MVCCStats{},
}
if _, err := batcheval.EvalAddSSTable(ctx, e, cArgsWithStats, nil); err != nil {
t.Fatalf("%+v", err)
}
expected := enginepb.MVCCStats{ContainsEstimates: 1, KeyCount: 10}
if got := *cArgsWithStats.Stats; got != expected {
t.Fatalf("expected %v got %v", expected, got)
}
})
}
}
func TestAddSSTableDisallowShadowing(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
ctx := context.Background()
for _, engineImpl := range engineImpls {
t.Run(engineImpl.name, func(t *testing.T) {
e := engineImpl.create()
defer e.Close()
for _, kv := range mvccKVsFromStrs([]strKv{
{"a", 2, "aa"},
{"b", 1, "bb"},
{"b", 6, ""},
{"g", 5, "gg"},
{"r", 1, "rr"},
{"y", 1, "yy"},
{"y", 2, ""},
{"y", 5, "yyy"},
{"z", 2, "zz"},
}) {
if err := e.PutMVCC(kv.Key, kv.Value); err != nil {
t.Fatalf("%+v", err)
}
}
getSSTBytes := func(sstKVs []storage.MVCCKeyValue) []byte {
sstFile := &storage.MemFile{}
sst := storage.MakeBackupSSTWriter(sstFile)
defer sst.Close()
for _, kv := range sstKVs {
if err := sst.Put(kv.Key, kv.Value); err != nil {
t.Fatalf("%+v", err)
}
}
if err := sst.Finish(); err != nil {
t.Fatalf("%+v", err)
}
return sstFile.Data()
}
getStats := func(startKey, endKey roachpb.Key, data []byte) enginepb.MVCCStats {
dataIter, err := storage.NewMemSSTIterator(data, true)
if err != nil {
return enginepb.MVCCStats{}
}
defer dataIter.Close()
stats, err := storage.ComputeStatsForRange(dataIter, startKey, endKey, 0)
if err != nil {
t.Fatalf("%+v", err)
}
return stats
}
// Test key collision when ingesting a key in the start of existing data, and
// SST. The colliding key is also equal to the header start key.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"a", 7, "aa"}, // colliding key has a higher timestamp than existing version.
})
sstBytes := getSSTBytes(sstKVs)
stats := getStats(roachpb.Key("a"), roachpb.Key("b"), sstBytes)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("a"), EndKey: roachpb.Key("b")},
Data: sstBytes,
DisallowShadowing: true,
MVCCStats: &stats,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"a\"") {
t.Fatalf("%+v", err)
}
}
// Test key collision when ingesting a key in the middle of existing data, and
// start of the SST. The key is equal to the header start key.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"g", 4, "ggg"}, // colliding key has a lower timestamp than existing version.
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("g"), EndKey: roachpb.Key("h")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"g\"") {
t.Fatalf("%+v", err)
}
}
// Test key collision when ingesting a key at the end of the existing data and
// SST. The colliding key is not equal to header start key.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"f", 2, "f"},
{"h", 4, "h"},
{"s", 1, "s"},
{"z", 3, "z"}, // colliding key has a higher timestamp than existing version.
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("f"), EndKey: roachpb.Key("zz")},
Data: sstBytes,
DisallowShadowing: true,
},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"z\"") {
t.Fatalf("%+v", err)
}
}
// Test for no key collision where the key range being ingested into is
// non-empty. The header start and end keys are not existing keys.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"c", 2, "bb"},
{"h", 6, "hh"},
})
sstBytes := getSSTBytes(sstKVs)
stats := getStats(roachpb.Key("c"), roachpb.Key("i"), sstBytes)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("c"), EndKey: roachpb.Key("i")},
Data: sstBytes,
DisallowShadowing: true,
MVCCStats: &stats,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if err != nil {
t.Fatalf("%+v", err)
}
}
// Test that a collision is not reported when ingesting a key for which we
// find a tombstone from an MVCC delete, and the sst key has a ts >= tombstone
// ts. Also test that iteration continues from the next key in the existing
// data after skipping over all the versions of the deleted key.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"b", 7, "bb"}, // colliding key has a higher timestamp than its deleted version.
{"b", 1, "bbb"}, // older version of deleted key (should be skipped over).
{"f", 3, "ff"},
{"y", 3, "yyyy"}, // colliding key.
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("b"), EndKey: roachpb.Key("z")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"y\"") {
t.Fatalf("%+v", err)
}
}
// Test that a collision is reported when ingesting a key for which we find a
// tombstone from an MVCC delete, but the sst key has a ts < tombstone ts.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"b", 4, "bb"}, // colliding key has a lower timestamp than its deleted version.
{"f", 3, "ff"},
{"y", 3, "yyyy"},
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("b"), EndKey: roachpb.Key("z")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"b\"") {
t.Fatalf("%+v", err)
}
}
// Test key collision when ingesting a key which has been deleted, and readded
// in the middle of the existing data. The colliding key is in the middle of
// the SST, and is the earlier of the two possible collisions.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"f", 2, "ff"},
{"y", 4, "yyy"}, // colliding key has a lower timestamp than the readded version.
{"z", 3, "zzz"},
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("f"), EndKey: roachpb.Key("zz")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"y\"") {
t.Fatalf("%+v", err)
}
}
// Test key collision when ingesting a key which has a write intent in the
// existing data.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"f", 2, "ff"},
{"q", 4, "qq"},
{"t", 3, "ttt"}, // has a write intent in the existing data.
})
// Add in a write intent.
ts := hlc.Timestamp{WallTime: 7}
txn := roachpb.MakeTransaction(
"test",
nil, // baseKey
roachpb.NormalUserPriority,
ts,
base.DefaultMaxClockOffset.Nanoseconds(),
)
if err := storage.MVCCPut(
ctx, e, nil, []byte("t"), ts,
roachpb.MakeValueFromBytes([]byte("tt")),
&txn,
); err != nil {
if !errors.HasType(err, (*roachpb.WriteIntentError)(nil)) {
t.Fatalf("%+v", err)
}
}
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("f"), EndKey: roachpb.Key("u")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "conflicting intents on \"t") {
t.Fatalf("%+v", err)
}
}
// Test key collision when ingesting a key which has an inline value in the
// existing data.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"f", 2, "ff"},
{"i", 4, "ii"}, // has an inline value in existing data.
{"j", 3, "jj"},
})
// Add in an inline value.
ts := hlc.Timestamp{}
if err := storage.MVCCPut(
ctx, e, nil, []byte("i"), ts,
roachpb.MakeValueFromBytes([]byte("i")),
nil,
); err != nil {
t.Fatalf("%+v", err)
}
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("f"), EndKey: roachpb.Key("k")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "inline values are unsupported when checking for key collisions") {
t.Fatalf("%+v", err)
}
}
// Test ingesting a key with the same timestamp and value. This should not
// trigger a collision error.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"e", 4, "ee"},
{"f", 2, "ff"},
{"y", 5, "yyy"}, // key has the same timestamp and value as the one present in the existing data.
})
sstBytes := getSSTBytes(sstKVs)
stats := getStats(roachpb.Key("e"), roachpb.Key("zz"), sstBytes)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("e"), EndKey: roachpb.Key("zz")},
Data: sstBytes,
DisallowShadowing: true,
MVCCStats: &stats,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if err != nil {
t.Fatalf("%+v", err)
}
}
// Test ingesting a key with different timestamp but same value. This should
// trigger a collision error.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"f", 2, "ff"},
{"y", 6, "yyy"}, // key has a higher timestamp but same value as the one present in the existing data.
{"z", 3, "zzz"},
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("f"), EndKey: roachpb.Key("zz")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"y\"") {
t.Fatalf("%+v", err)
}
}
// Test ingesting a key with the same timestamp but different value. This should
// trigger a collision error.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"f", 2, "ff"},
{"y", 5, "yyyy"}, // key has the same timestamp but different value as the one present in the existing data.
{"z", 3, "zzz"},
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("f"), EndKey: roachpb.Key("zz")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"y\"") {
t.Fatalf("%+v", err)
}
}
// Test that a collision after a key with the same timestamp and value causes
// a collision error.
{
sstKVs := mvccKVsFromStrs([]strKv{
{"f", 2, "ff"},
{"y", 5, "yyy"}, // key has the same timestamp and value as the one present in the existing data - not a collision.
{"z", 3, "zzz"}, // shadow key
})
sstBytes := getSSTBytes(sstKVs)
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("e"), EndKey: roachpb.Key("zz")},
Data: sstBytes,
DisallowShadowing: true,
},
Stats: &enginepb.MVCCStats{},
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if !testutils.IsError(err, "ingested key collides with an existing one: \"z\"") {
t.Fatalf("%+v", err)
}
}
// This test ensures accuracy of MVCCStats in the situation that successive
// SSTs being ingested via AddSSTable have "perfectly shadowing" keys (same ts
// and value). Such KVs are not considered as collisions and so while they are
// skipped during ingestion, their stats would previously be double counted.
// To mitigate this problem we now return the stats of such skipped KVs while
// evaluating the AddSSTable command, and accumulate accurate stats in the
// CommandArgs Stats field by using:
// cArgs.Stats + ingested_stats - skipped_stats.
{
// Successfully evaluate the first SST as there are no key collisions.
sstKVs := mvccKVsFromStrs([]strKv{
{"c", 2, "bb"},
{"h", 6, "hh"},
})
sstBytes := getSSTBytes(sstKVs)
stats := getStats(roachpb.Key("c"), roachpb.Key("i"), sstBytes)
// Accumulate stats across SST ingestion.
commandStats := enginepb.MVCCStats{}
cArgs := batcheval.CommandArgs{
Header: roachpb.Header{
Timestamp: hlc.Timestamp{WallTime: 7},
},
Args: &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("c"), EndKey: roachpb.Key("i")},
Data: sstBytes,
DisallowShadowing: true,
MVCCStats: &stats,
},
Stats: &commandStats,
}
_, err := batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if err != nil {
t.Fatalf("%+v", err)
}
firstSSTStats := commandStats
// Insert KV entries so that we can correctly identify keys to skip when
// ingesting the perfectly shadowing KVs (same ts and same value) in the
// second SST.
for _, kv := range sstKVs {
if err := e.PutMVCC(kv.Key, kv.Value); err != nil {
t.Fatalf("%+v", err)
}
}
// Evaluate the second SST. Both the KVs are perfectly shadowing and should
// not contribute to the stats.
secondSSTKVs := mvccKVsFromStrs([]strKv{
{"c", 2, "bb"}, // key has the same timestamp and value as the one present in the existing data.
{"h", 6, "hh"}, // key has the same timestamp and value as the one present in the existing data.
})
secondSSTBytes := getSSTBytes(secondSSTKVs)
secondStats := getStats(roachpb.Key("c"), roachpb.Key("i"), secondSSTBytes)
cArgs.Args = &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("c"), EndKey: roachpb.Key("i")},
Data: secondSSTBytes,
DisallowShadowing: true,
MVCCStats: &secondStats,
}
_, err = batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if err != nil {
t.Fatalf("%+v", err)
}
// Check that there has been no double counting of stats.
if !firstSSTStats.Equal(*cArgs.Stats) {
t.Errorf("mvcc stats should not have changed as all keys in second SST are shadowing: %s",
pretty.Diff(firstSSTStats, *cArgs.Stats))
}
// Evaluate the third SST. Two of the three KVs are perfectly shadowing, but
// there is one valid KV which should contribute to the stats.
thirdSSTKVs := mvccKVsFromStrs([]strKv{
{"c", 2, "bb"}, // key has the same timestamp and value as the one present in the existing data.
{"e", 2, "ee"},
{"h", 6, "hh"}, // key has the same timestamp and value as the one present in the existing data.
})
thirdSSTBytes := getSSTBytes(thirdSSTKVs)
thirdStats := getStats(roachpb.Key("c"), roachpb.Key("i"), thirdSSTBytes)
cArgs.Args = &roachpb.AddSSTableRequest{
RequestHeader: roachpb.RequestHeader{Key: roachpb.Key("c"), EndKey: roachpb.Key("i")},
Data: thirdSSTBytes,
DisallowShadowing: true,
MVCCStats: &thirdStats,
}
_, err = batcheval.EvalAddSSTable(ctx, e, cArgs, nil)
if err != nil {
t.Fatalf("%+v", err)
}
// This is the stats contribution of the KV {"e", 2, "ee"}. This should be
// the only addition to the cumulative stats, as the other two KVs are
// perfect shadows of existing data.
var delta enginepb.MVCCStats
delta.LiveCount = 1
delta.LiveBytes = 21
delta.KeyCount = 1
delta.KeyBytes = 14
delta.ValCount = 1
delta.ValBytes = 7
// Check that there has been no double counting of stats.
firstSSTStats.Add(delta)
if !firstSSTStats.Equal(*cArgs.Stats) {
t.Errorf("mvcc stats are not accurate: %s",
pretty.Diff(firstSSTStats, *cArgs.Stats))
}
}
})
}
}
|
package ges_test
import (
"net/url"
"time"
. "gopkg.in/check.v1"
)
func (s *GesSuite) TestIndexCreation(c *C) {
err := s.conn.CreateIndex(indexName, nil)
c.Assert(err, IsNil)
}
func (s *GesSuite) TestIndexCreationWhenIndexExists(c *C) {
s.conn.CreateIndex(indexName, nil)
err := s.conn.CreateIndex(indexName, nil)
c.Assert(err, ErrorMatches, "IndexAlreadyExistsException.*")
}
func (s *GesSuite) TestIndexExists(c *C) {
s.conn.CreateIndex(indexName, nil)
exists, err := s.conn.IndexExists(indexName)
c.Assert(err, IsNil)
c.Assert(exists, Equals, true)
}
func (s *GesSuite) TestIndexDoesntExists(c *C) {
exists, err := s.conn.IndexExists(indexName)
c.Assert(err, ErrorMatches, "not found*")
c.Assert(exists, Equals, false)
}
func (s *GesSuite) TestDeleteIndex(c *C) {
s.conn.CreateIndex(indexName, nil)
err := s.conn.DeleteIndex(indexName)
c.Assert(err, IsNil)
}
func (s *GesSuite) TestDeleteIndexWhenIndexDoesntExists(c *C) {
err := s.conn.DeleteIndex(indexName)
c.Assert(err, ErrorMatches, "IndexMissingException.*")
}
func (s *GesSuite) TestCloseIndex(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
err := s.conn.CloseIndex(indexName)
c.Assert(err, IsNil)
}
func (s *GesSuite) TestOpenIndex(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
err := s.conn.OpenIndex(indexName)
c.Assert(err, IsNil)
}
func (s *GesSuite) TestPutMapping(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
err := s.conn.PutMapping(indexName, indexType, s.mapping, url.Values{})
c.Assert(err, IsNil)
}
func (s *GesSuite) TestGetMapping(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
s.conn.PutMapping(indexName, indexType, s.mapping, url.Values{})
m, err := s.conn.GetMapping(indexName, indexType)
c.Assert(err, IsNil)
c.Assert(m[indexName].(map[string]interface{})["mappings"], DeepEquals, s.mapping)
}
func (s *GesSuite) TestTypeDoesntExist(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
exists, err := s.conn.TypeExists(indexName, indexType)
c.Assert(err, ErrorMatches, "not found*")
c.Assert(exists, Equals, false)
}
func (s *GesSuite) TestTypeExist(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
s.conn.PutMapping(indexName, indexType, s.mapping, url.Values{})
exists, err := s.conn.TypeExists(indexName, indexType)
c.Assert(err, IsNil)
c.Assert(exists, Equals, true)
}
func (s *GesSuite) TestDeleteMapping(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
s.conn.PutMapping(indexName, indexType, s.mapping, url.Values{})
err := s.conn.DeleteMapping(indexName, indexType)
c.Assert(err, IsNil)
}
func (s *GesSuite) TestUpdateIndexSettings(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
settings := map[string]interface{}{
"index": map[string]interface{}{
"number_of_replicas": 4,
},
}
err := s.conn.UpdateIndexSettings(indexName, settings)
c.Assert(err, IsNil)
}
func (s *GesSuite) TestGetIndexSettings(c *C) {
s.conn.CreateIndex(indexName, nil)
time.Sleep(15 * time.Millisecond)
settings := map[string]interface{}{
"index": map[string]interface{}{
"number_of_replicas": 4,
},
}
s.conn.UpdateIndexSettings(indexName, settings)
indexSettings, err := s.conn.GetIndexSettings(indexName, url.Values{})
c.Assert(err, IsNil)
c.Assert(indexSettings[indexName].(map[string]interface{})["settings"].(map[string]interface{})["index"].(map[string]interface{})["number_of_replicas"], DeepEquals, "4")
}
|
package handler
import (
"strconv"
"github.com/futurehomeno/fimpgo"
log "github.com/sirupsen/logrus"
sensibo "github.com/tskaard/sensibo/sensibo-api"
)
func (fc *FimpSensiboHandler) sendTemperatureMsg(addr string, temp float64, oldMsg *fimpgo.FimpMessage, channel int) { // channel; 0 = ch_0, 1 = ch_1, -1 = no channel
props := make(map[string]string)
props["unit"] = "C"
msg := fimpgo.NewMessage("evt.sensor.report", "sensor_temp", "float", temp, props, nil, oldMsg)
msg.Source = "sensibo"
var adr *fimpgo.Address
if channel == 0 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:sensor_temp/ad:" + addr + "_0")
} else if channel == 1 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:sensor_temp/ad:" + addr + "_1")
} else if channel == -1 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:sensor_temp/ad:" + addr)
}
fc.mqt.Publish(adr, msg)
log.Debug("Temperature message sent")
}
func (fc *FimpSensiboHandler) sendHumidityMsg(addr string, humid float64, oldMsg *fimpgo.FimpMessage, channel int) { // channel; 0 = ch_0, 1 = ch_1, -1 = no channel
props := make(map[string]string)
props["unit"] = "%"
msg := fimpgo.NewMessage("evt.sensor.report", "sensor_humid", "float", humid, props, nil, oldMsg)
msg.Source = "sensibo"
var adr *fimpgo.Address
if channel == 0 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:sensor_humid/ad:" + addr + "_0")
} else if channel == 1 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:sensor_humid/ad:" + addr + "_1")
} else if channel == -1 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:sensor_humid/ad:" + addr)
}
fc.mqt.Publish(adr, msg)
log.Debug("Humidity message sent")
}
func (fc *FimpSensiboHandler) SendMotionMsg(addr string, motion bool, oldMsg *fimpgo.FimpMessage) { // channel is always _1 for motion (on sensibo)
msg := fimpgo.NewMessage("evt.presence.report", "sensor_presence", "bool", motion, nil, nil, oldMsg)
msg.Source = "sensibo"
adr, _ := fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:sensor_presence/ad:" + addr + "_1")
fc.mqt.Publish(adr, msg)
log.Debug("Motion message sent")
}
func (fc *FimpSensiboHandler) sendThermostatModeMsg(addr string, mode string, oldMsg *fimpgo.FimpMessage, channel int) { // channel; 0 = ch_0, 1 = ch_1, -1 = no channel
msg := fimpgo.NewStringMessage("evt.mode.report", "thermostat", mode, nil, nil, oldMsg)
msg.Source = "sensibo"
var adr *fimpgo.Address
if channel == 0 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:thermostat/ad:" + addr + "_0")
} else if channel == -1 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:thermostat/ad:" + addr)
}
fc.mqt.Publish(adr, msg)
log.Debug("Thermostat mode message sent")
}
func (fc *FimpSensiboHandler) sendFanCtrlMsg(addr string, fanMode string, oldMsg *fimpgo.FimpMessage, channel int) { // channel; 0 = ch_0, 1 = ch_1, -1 = no channel
msg := fimpgo.NewStringMessage("evt.mode.report", "fan_ctrl", fanMode, nil, nil, oldMsg)
msg.Source = "sensibo"
var adr *fimpgo.Address
if channel == 0 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:fan_ctrl/ad:" + addr + "_0")
} else if channel == -1 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:fan_ctrl/ad:" + addr)
}
fc.mqt.Publish(adr, msg)
log.Debug("Fan ctrl mode message sent")
}
func (fc *FimpSensiboHandler) sendSetpointMsg(addr string, acState sensibo.AcState, oldMsg *fimpgo.FimpMessage, channel int) { // channel; 0 = ch_0, 1 = ch_1, -1 = no channel
val := make(map[string]string)
val["temp"] = strconv.Itoa(acState.TargetTemperature)
val["type"] = acState.Mode
if acState.TemperatureUnit != "" {
val["unit"] = acState.TemperatureUnit
}
msg := fimpgo.NewStrMapMessage("evt.setpoint.report", "thermostat", val, nil, nil, oldMsg)
msg.Source = "sensibo"
var adr *fimpgo.Address
if channel == 0 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:thermostat/ad:" + addr + "_0")
} else if channel == -1 {
adr, _ = fimpgo.NewAddressFromString("pt:j1/mt:evt/rt:dev/rn:sensibo/ad:1/sv:thermostat/ad:" + addr)
}
fc.mqt.Publish(adr, msg)
log.Debug("Thermostat setpoint message sent")
}
func (fc *FimpSensiboHandler) sendAcState(addr string, acState sensibo.AcState, oldMsg *fimpgo.FimpMessage, channel int) { // channel; 0 = ch_0, 1 = ch_1, -1 = no channel
if acState.Mode == "" {
log.Error("AcState does not include Mode")
} else {
fc.state.Mode = acState.Mode
mode := acState.Mode
fc.sendThermostatModeMsg(addr, mode, oldMsg, channel)
}
if acState.TargetTemperature == 0 {
log.Error("Setpoint temperature is not included in acState")
} else {
fc.sendSetpointMsg(addr, acState, oldMsg, channel)
}
if acState.FanLevel == "" {
log.Error("Fan Level is not included in acState")
} else {
fc.state.FanMode = acState.FanLevel
fanMode := acState.FanLevel
fc.sendFanCtrlMsg(addr, fanMode, oldMsg, channel)
}
}
|
package gopay
import (
"encoding/json"
"github.com/parnurzeal/gorequest"
"log"
"time"
)
type aliPayClient struct {
AppId string
privateKey string
ReturnUrl string
NotifyUrl string
Charset string
SignType string
isProd bool
}
//初始化支付宝客户端
// appId:应用ID
// privateKey:应用私钥
// isProd:是否是正式环境
func NewAliPayClient(appId, privateKey string, isProd bool) (client *aliPayClient) {
client = new(aliPayClient)
client.AppId = appId
client.privateKey = privateKey
client.isProd = isProd
return client
}
//alipay.trade.fastpay.refund.query(统一收单交易退款查询)
func (this *aliPayClient) AliPayTradeFastPayRefundQuery() {
}
//alipay.trade.order.settle(统一收单交易结算接口)
func (this *aliPayClient) AliPayTradeOrderSettle() {
}
//alipay.trade.close(统一收单交易关闭接口)
func (this *aliPayClient) AliPayTradeClose() {
}
//alipay.trade.cancel(统一收单交易撤销接口)
func (this *aliPayClient) AliPayTradeCancel() {
}
//alipay.trade.refund(统一收单交易退款接口)
func (this *aliPayClient) AliPayTradeRefund() {
}
//alipay.trade.precreate(统一收单线下交易预创建)
func (this *aliPayClient) AliPayTradePrecreate() {
}
//alipay.trade.create(统一收单交易创建接口)
func (this *aliPayClient) AliPayTradeCreate() {
}
//alipay.trade.pay(统一收单交易支付接口)
func (this *aliPayClient) AliPayTradePay() {
}
//alipay.trade.query(统一收单线下交易查询)
func (this *aliPayClient) AliPayTradeQuery() {
}
//alipay.trade.app.pay(app支付接口2.0)
func (this *aliPayClient) AliPayTradeAppPay(body BodyMap) (payParam string, err error) {
var bytes []byte
bytes, err = this.doAliPay(body, "alipay.trade.app.pay")
if err != nil {
return null, err
}
payParam = string(bytes)
return payParam, nil
}
//alipay.trade.wap.pay(手机网站支付接口2.0)
func (this *aliPayClient) AliPayTradeWapPay(body BodyMap) (payUrl string, err error) {
var bytes []byte
bytes, err = this.doAliPay(body, "alipay.trade.wap.pay")
if err != nil {
//log.Println("err::", err.Error())
return null, err
}
payUrl = string(bytes)
//fmt.Println("URL::", payUrl)
return payUrl, nil
}
//alipay.trade.orderinfo.sync(支付宝订单信息同步接口)
func (this *aliPayClient) AliPayTradeOrderinfoSync() {
}
//alipay.trade.page.pay(统一收单下单并支付页面接口)
func (this *aliPayClient) AliPayTradePagePay() {
}
//zhima.credit.score.brief.get(芝麻分普惠版)
func (this *aliPayClient) ZhimaCreditScoreBriefGet() {
}
//zhima.credit.score.get(芝麻分)
func (this *aliPayClient) ZhimaCreditScoreGet() {
}
//向支付宝发送请求
func (this *aliPayClient) doAliPay(body BodyMap, method string) (bytes []byte, err error) {
//===============转换body参数===================
bodyStr, err := json.Marshal(body)
if err != nil {
log.Println("json.Marshal:", err)
return nil, err
}
//fmt.Println(string(bodyStr))
//===============生成参数===================
reqBody := make(BodyMap)
reqBody.Set("app_id", this.AppId)
reqBody.Set("method", method)
reqBody.Set("format", "JSON")
if this.ReturnUrl != null {
reqBody.Set("return_url", this.ReturnUrl)
}
if this.Charset == null {
reqBody.Set("charset", "utf-8")
} else {
reqBody.Set("charset", this.Charset)
}
if this.SignType == null {
reqBody.Set("sign_type", "RSA2")
} else {
reqBody.Set("sign_type", this.SignType)
}
reqBody.Set("timestamp", time.Now().Format(TimeLayout))
reqBody.Set("version", "1.0")
if this.NotifyUrl != null {
reqBody.Set("notify_url", this.NotifyUrl)
}
reqBody.Set("biz_content", string(bodyStr))
//===============获取签名===================
pKey := FormatPrivateKey(this.privateKey)
sign, err := getRsaSign(reqBody, pKey)
if err != nil {
return nil, err
}
reqBody.Set("sign", sign)
//fmt.Println("rsaSign:", sign)
//===============发起请求===================
urlParam := FormatAliPayURLParam(reqBody)
//fmt.Println("urlParam:", urlParam)
if method == "alipay.trade.app.pay" {
return []byte(urlParam), nil
}
var url string
agent := gorequest.New()
if !this.isProd {
//沙箱环境
url = zfb_sanbox_base_url
//fmt.Println(url)
agent.Post(url)
} else {
//正式环境
url = zfb_base_url
//fmt.Println(url)
agent.Post(url)
}
rsp, b, errs := agent.
Type("form-data").
SendString(urlParam).
EndBytes()
if len(errs) > 0 {
return nil, errs[0]
}
if method == "alipay.trade.wap.pay" {
//fmt.Println("rsp:::", rsp.Request.URL)
return []byte(rsp.Request.URL.String()), nil
}
return b, nil
}
|
package goauth2
import (
"context"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"github.com/morikuni/failure"
"github.com/sters/neko/gclient"
)
const (
oauthURI = "https://accounts.google.com/o/oauth2/v2/auth"
authorizationURI = "https://www.googleapis.com/oauth2/v4/token"
redirectURI = "urn:ietf:wg:oauth:2.0:oob" // fixed for desktop app
responseType = "code" // fixed for desktop app
accessType = "offline" // fixed for desktop app
grantTypeAuthorizationCode = "authorization_code"
grantTypeRefreshToken = "refresh_token"
)
type (
AuthorizationResponse struct {
AccessToken string `json:"access_token"`
IDToken string `json:"id_token"`
ExpiresIn int64 `json:"expires_in"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
}
)
type Client struct {
c *http.Client
clientID string
clientSecret string
accessToken string
accessTokenExpire int64
refreshToken string
scope string
}
func (c *Client) GetRefreshToken() string {
return c.refreshToken
}
func (c *Client) GetAccessToken() string {
return c.accessToken
}
func NewClient(clientID string, clientSecret string) *Client {
return &Client{
clientID: clientID,
clientSecret: clientSecret,
}
}
func (c *Client) WithAccessToken(accessToken string) {
c.accessToken = accessToken
}
func (c *Client) WithScope(scope string) {
c.scope = scope
}
func (c *Client) WithScopes(scopes ...string) {
builder := strings.Builder{}
for _, scope := range scopes {
builder.WriteString(scope)
builder.WriteString("&")
}
s := builder.String()
c.scope = s[:len(s)-1]
}
func (c *Client) WithHTTPClient(client *http.Client) {
c.c = client
}
func (c *Client) GetOAuthURI() string {
builder := strings.Builder{}
builder.WriteString(oauthURI)
builder.WriteString("?")
builder.WriteString("&client_id=")
builder.WriteString(c.clientID)
builder.WriteString("&redirect_uri=")
builder.WriteString(redirectURI)
builder.WriteString("&scope=")
builder.WriteString(c.scope)
builder.WriteString("&access_type=")
builder.WriteString(accessType)
builder.WriteString("&response_type=")
builder.WriteString(responseType)
return builder.String()
}
func (c *Client) Authorization(ctx context.Context, authorizationCode string) error {
params := url.Values{}
params.Add("code", authorizationCode)
params.Add("client_id", c.clientID)
params.Add("client_secret", c.clientSecret)
params.Add("redirect_uri", redirectURI)
params.Add("grant_type", grantTypeAuthorizationCode)
params.Add("access_type", accessType)
req, err := http.NewRequest(
http.MethodPost,
authorizationURI,
strings.NewReader(params.Encode()),
)
if err != nil {
return failure.Wrap(err)
}
req = req.WithContext(ctx)
req.Header.Add(gclient.ContentTypeHeader, gclient.ContentTypeForm)
rawResponse, err := c.c.Do(req)
if err != nil {
return failure.Wrap(err)
}
defer rawResponse.Body.Close()
responseBuf, err := ioutil.ReadAll(rawResponse.Body)
if err != nil {
return failure.Wrap(err)
}
var response AuthorizationResponse
err = json.Unmarshal(responseBuf, &response)
if err != nil {
log.Println(rawResponse)
return failure.Wrap(err)
}
c.accessToken = response.AccessToken
c.accessTokenExpire = response.ExpiresIn
c.refreshToken = response.RefreshToken
return nil
}
func (c *Client) Refresh(ctx context.Context, refreshToken string) error {
c.refreshToken = refreshToken
params := url.Values{}
params.Set("client_id", c.clientID)
params.Set("client_secret", c.clientSecret)
params.Set("grant_type", grantTypeRefreshToken)
params.Set("refresh_token", c.refreshToken)
req, err := http.NewRequest(
http.MethodPost,
authorizationURI,
strings.NewReader(params.Encode()),
)
if err != nil {
return failure.Wrap(err)
}
req = req.WithContext(ctx)
req.Header.Add(gclient.ContentTypeHeader, gclient.ContentTypeForm)
rawResponse, err := c.c.Do(req)
if err != nil {
return failure.Wrap(err)
}
defer rawResponse.Body.Close()
responseBuf, err := ioutil.ReadAll(rawResponse.Body)
if err != nil {
return failure.Wrap(err)
}
var response AuthorizationResponse
err = json.Unmarshal(responseBuf, &response)
if err != nil {
log.Println(rawResponse)
return failure.Wrap(err)
}
if response.AccessToken != "" {
c.accessToken = response.AccessToken
c.accessTokenExpire = response.ExpiresIn
}
if response.RefreshToken != "" {
c.refreshToken = response.RefreshToken
}
return nil
}
|
package main
import "fmt"
type list struct {
e string
left *list
right *list
}
// insert adds the element e at index i in the list l
func (l *list) insert(i int, e string) {
node := new(list)
node.e = e
aux := l
for j := 0; l != nil && j <= i; l, j = l.right, j+1 {
aux = l
}
node.left, node.right = aux, aux.right
aux.right = node
if aux.right != nil {
aux.right.left = node
}
}
// delete removes the element at index i in the list l
func (l *list) delete(i int) {
aux := l
for j := 0; l != nil && j <= i; l, j = l.right, j+1 {
aux = l
}
if l != nil {
aux.right = l.right
if aux.right != nil {
aux.right.left = aux
}
}
}
func (l *list) print() {
for l = l.right; l != nil; l = l.right {
fmt.Printf("\"%v\" ", l.e)
}
fmt.Printf("\n")
}
func main() {
list1 := new(list)
list1.insert(0, "Helsinki")
list1.insert(1, "Oulu")
list1.insert(2, "Nokia")
list1.insert(3, "Turku")
list1.insert(4, "Tampere")
list1.print()
list1.delete(1)
list1.print()
list1.delete(2)
list1.print()
}
|
package optioner
// Some[T] creates an Option[T] with the given value.
func Some[T any](v T) Option[T] {
var o Option[T]
o.v = &v
return o
}
// None[T] creates an Option[T] with no value.
func None[T any]() Option[T] {
var o Option[T]
return o
}
// Of[T] creates a Option[T] that may or may not have a value.
// If *T is nil the return Option[T] is empty (equivalent to None[T]),
// If *T is non-nil return Option[T] will contain value (equivalent to Some[T]).
//
// This is an alias for Option[T]
func Of[T any](v *T) Option[T] {
return Option[T]{
v: v,
}
}
|
package watcher
import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"math/big"
)
type EthereumTx interface {
Tx
Index() uint
EthereumTx() *types.Transaction
Gas() *big.Int
GasPrice() *big.Int
GasFeeCap() *big.Int
GasTipCap() *big.Int
}
type ethereumTx struct {
tx *types.Transaction
sender *common.Address
block *big.Int
index uint
}
func (e *ethereumTx) Index() uint {
return e.index
}
func (e *ethereumTx) Gas() *big.Int {
return big.NewInt(int64(e.tx.Gas()))
}
func (e *ethereumTx) GasPrice() *big.Int {
return e.tx.GasPrice()
}
func (e *ethereumTx) GasFeeCap() *big.Int {
return e.tx.GasFeeCap()
}
func (e *ethereumTx) GasTipCap() *big.Int {
return e.tx.GasTipCap()
}
func (e *ethereumTx) EthereumTx() *types.Transaction {
return e.tx
}
func (e *ethereumTx) Block() *big.Int {
return e.block
}
func (e *ethereumTx) Sender() []byte {
return e.sender.Bytes()
}
func (e *ethereumTx) Receiver() []byte {
return e.tx.To().Bytes()
}
func (e *ethereumTx) ID() []byte {
return e.tx.Hash().Bytes()
}
func (e *ethereumTx) Amount() *big.Int {
return e.tx.Value()
}
func (e *ethereumTx) Net() string {
return "ethereum"
}
func (e *ethereumTx) Kind() string {
return "ethereum"
}
func NewEthereumTx(tx *types.Transaction, sender *common.Address, block *big.Int, index uint) (EthereumTx, error) {
if tx == nil {
panic(errors.New("nil tx ptr"))
}
o := ðereumTx{}
o.tx = tx
o.sender = sender
o.block = block
o.index = index
return o, nil
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// https://www.hackerrank.com/challenges/largest-rectangle/problem
type area struct {
H int32
W int
}
func (a *area) c(j int) int32 {
return a.H * int32(j-a.W)
}
type stack struct {
Elements []area
Size int
}
func (s *stack) Push(x area) {
s.Elements = append(s.Elements, x)
s.Size++
}
func (s *stack) Pop() area {
el := s.Elements[s.Size-1]
s.Elements = s.Elements[:s.Size-1]
s.Size--
return el
}
func (s *stack) Top() area {
return s.Elements[s.Size-1]
}
func (s *stack) Empty() bool {
return s.Size == 0
}
func max(a, b int32) int32 {
if a > b {
return a
}
return b
}
func (s *stack) Solve(n, max_area int32, i, w int) int32 {
if s.Empty() || s.Top().H <= n {
if w > -1 {
s.Push(area{n, w})
} else {
s.Push(area{n, i})
}
return max_area
}
p := s.Pop()
max_area = max(p.c(i), max_area)
return s.Solve(n, max_area, i, p.W)
}
// Complete the largestRectangle function below.
func largestRectangle(h []int32) int64 {
s := stack{}
var max_area int32
max_area = -1
hsize := 0
s.Push(area{0, 0})
for i, n := range h {
if s.Empty() || n > s.Top().H {
s.Push(area{n, i})
} else {
max_area = s.Solve(n, max_area, i, -1)
}
hsize++
}
for _, el := range s.Elements {
max_area = max(max_area, el.c(hsize))
}
return int64(max_area)
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 1024*1024)
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
checkError(err)
defer stdout.Close()
writer := bufio.NewWriterSize(stdout, 1024*1024)
nTemp, err := strconv.ParseInt(readLine(reader), 10, 64)
checkError(err)
n := int32(nTemp)
hTemp := strings.Split(readLine(reader), " ")
var h []int32
for i := 0; i < int(n); i++ {
hItemTemp, err := strconv.ParseInt(hTemp[i], 10, 64)
checkError(err)
hItem := int32(hItemTemp)
h = append(h, hItem)
}
result := largestRectangle(h)
fmt.Fprintf(writer, "%d\n", result)
writer.Flush()
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
package main
// Leetcode m13. (medium)
func movingCount(m int, n int, k int) int {
res := 0
visited := make([][]bool, m)
for i := range visited {
visited[i] = make([]bool, n)
}
queue := [][4]int{[4]int{0, 0, 0, 0}}
for len(queue) > 0 {
arr := queue[0]
queue = queue[1:]
x, y, sumX, sumY := arr[0], arr[1], arr[2], arr[3]
if x >= m || y >= n || sumX+sumY > k || visited[x][y] {
continue
}
visited[x][y] = true
res++
nextX := 0
if (x+1)%10 == 0 {
nextX = sumX - 8
} else {
nextX = sumX + 1
}
nextY := 0
if (y+1)%10 == 0 {
nextY = sumY - 8
} else {
nextY = sumY + 1
}
queue = append(queue, [4]int{x + 1, y, nextX, sumY})
queue = append(queue, [4]int{x, y + 1, sumX, nextY})
}
return res
}
|
package main
import (
"fmt"
"math/rand"
)
func main() {
const ListSize = 100
channel := make(chan int)
for i := 0; i < ListSize; i++ {
go generateRandom(channel)
}
alist := []int{}
for i := 0; i < ListSize; i++ {
alist = append(alist, <-channel)
}
for i := 0; i < len(alist); i++ {
fmt.Println(alist[i])
}
fmt.Println("sorting list items")
sorted := quicksort(alist)
for i := 0; i < len(sorted); i++ {
fmt.Println(sorted[i])
}
}
func generateRandom(channel chan<- int) {
channel <- rand.Int()
}
func quicksort(unsorted []int) (sorted []int) {
if len(unsorted) == 0 || len(unsorted) == 1 {
return unsorted
}
pivot := unsorted[0]
left := []int{}
right := []int{}
for i := 1; i < len(unsorted); i++ {
item := unsorted[i]
if item <= pivot {
left = append(left, item)
} else {
right = append(right, item)
}
}
tmp := append(quicksort(left), pivot)
tmp = append(tmp, quicksort(right)...)
return tmp
}
|
package alldebrid
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
)
//MagnetsUploadResponse is the response of the upload call
type MagnetsUploadResponse struct {
Status string `json:"status"`
Data magnetsUploadResponseData `json:"data,omitempty"`
Error alldebridError `json:"error,omitempty"`
}
type magnetsUploadResponseData struct {
Magnets []magnetData `json:"magnets"`
}
type magnetData struct {
Magnet string `json:"magnet"`
Hash string `json:"hash,omitempty"`
Name string `json:"name,omitempty"`
Size int `json:"size,omitempty"`
Ready bool `json:"ready,omitempty"`
ID int `json:"id,omitempty"`
Error alldebridError `json:"error,omitempty"`
}
//StatusMagnetResponse is the response of the status call
type StatusMagnetResponse struct {
Status string `json:"status"`
Data statusMagnetResponseData `json:"data,omitempty"`
Error alldebridError `json:"error,omitempty"`
}
type statusMagnetResponseData struct {
Magnets []statusMagnet `json:"magnets"`
Type string `json:"type"`
}
type statusMagnet struct {
ID int `json:"id"`
Filename string `json:"filename"`
Size int `json:"size"`
Hash string `json:"hash"`
Status string `json:"status"`
StatusCode int `json:"statusCode"`
Downloaded int `json:"downloaded"`
Uploaded int `json:"uploaded"`
Seeders int `json:"seeders"`
DownloadSpeed int `json:"downloadSpeed"`
UploadSpeed int `json:"uploadSpeed"`
UploadDate int `json:"uploadDate"`
Links []statusMagnetResponseLinks `json:"links"`
}
type statusMagnetResponseLinks struct {
Link string `json:"link"`
Filename string `json:"filename"`
Size int `json:"size"`
Files interface{} `json:"files"`
}
//DeleteMagnetResponse is the response of the delete call
type DeleteMagnetResponse struct {
Status string `json:"status"`
Data magnetResponseData `json:"data,omitempty"`
Error alldebridError `json:"error,omitempty"`
}
type magnetResponseData struct {
Message string `json:"message"`
}
//RestartMagnetResponse is the response of the restart call
type RestartMagnetResponse struct {
Status string `json:"status"`
Data magnetResponseData `json:"data,omitempty"`
Error alldebridError `json:"error,omitempty"`
}
//InstantAvailabilityResponse is the response of the instant availability call
type InstantAvailabilityResponse struct {
Status string `json:"status"`
Data instantAvailabilityResponseData `json:"data,omitempty"`
Error alldebridError `json:"error,omitempty"`
}
type instantAvailabilityResponseData struct {
Magnets []instantAvailabilityMagnet `json:"magnets"`
}
type instantAvailabilityMagnet struct {
Magnet string `json:"magnet"`
Hash string `json:"hash"`
Instant bool `json:"instant"`
}
// UploadMagnet sends magnet(s) to AllDebrid
func (c *Client) UploadMagnet(magnets []string) (MagnetsUploadResponse, error) {
client := &http.Client{}
ms := url.Values{}
for _, magnet := range magnets {
ms.Add("magnets[]", magnet)
}
resp, err := client.PostForm(fmt.Sprintf(magnetupload, getMagnetEndpoint(), c.ic.appName, c.ic.apikey), ms)
if err != nil {
return MagnetsUploadResponse{}, err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var uploadResponse MagnetsUploadResponse
err = decoder.Decode(&uploadResponse)
if err != nil {
return MagnetsUploadResponse{}, err
}
if uploadResponse.Status != "success" {
return MagnetsUploadResponse{}, errors.New(uploadResponse.Error.Message)
}
return uploadResponse, nil
}
//StatusMagnet returns the status of an Alldebrid download
func (c *Client) StatusMagnet(id string) (StatusMagnetResponse, error) {
resp, err := http.Get(fmt.Sprintf(magnetstatus, getMagnetEndpoint(), c.ic.appName, c.ic.apikey, id))
if err != nil {
return StatusMagnetResponse{}, err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var statusResponse StatusMagnetResponse
err = decoder.Decode(&statusResponse)
if err != nil {
return StatusMagnetResponse{}, err
}
if statusResponse.Status != "success" {
return StatusMagnetResponse{}, errors.New(statusResponse.Error.Message)
}
return statusResponse, nil
}
//DeleteMagnet removes a download from alldebrid
func (c *Client) DeleteMagnet(id string) (DeleteMagnetResponse, error) {
resp, err := http.Get(fmt.Sprintf(magnetdelete, getMagnetEndpoint(), c.ic.appName, c.ic.apikey, id))
if err != nil {
return DeleteMagnetResponse{}, err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var deleteResponse DeleteMagnetResponse
err = decoder.Decode(&deleteResponse)
if err != nil {
return DeleteMagnetResponse{}, err
}
if deleteResponse.Status != "success" {
return DeleteMagnetResponse{}, errors.New(deleteResponse.Error.Message)
}
return deleteResponse, nil
}
//RestartMagnet will restart a failed torrent
func (c *Client) RestartMagnet(id string) (RestartMagnetResponse, error) {
resp, err := http.Get(fmt.Sprintf(magnetrestart, getMagnetEndpoint(), c.ic.appName, c.ic.apikey, id))
if err != nil {
return RestartMagnetResponse{}, err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var restartResponse RestartMagnetResponse
err = decoder.Decode(&restartResponse)
if err != nil {
return RestartMagnetResponse{}, err
}
if restartResponse.Status != "success" {
return RestartMagnetResponse{}, errors.New(restartResponse.Error.Message)
}
return restartResponse, nil
}
//InstantAvailability sends magnet(s) to AllDebrid to know if thery are already available
func (c *Client) InstantAvailability(magnets []string) (InstantAvailabilityResponse, error) {
client := &http.Client{}
ms := url.Values{}
for _, magnet := range magnets {
ms.Add("magnets[]", magnet)
}
resp, err := client.PostForm(fmt.Sprintf(magnetinstant, getMagnetEndpoint(), c.ic.appName, c.ic.apikey), ms)
if err != nil {
return InstantAvailabilityResponse{}, err
}
defer resp.Body.Close()
decoder := json.NewDecoder(resp.Body)
var instantResponse InstantAvailabilityResponse
err = decoder.Decode(&instantResponse)
if err != nil {
return InstantAvailabilityResponse{}, err
}
if instantResponse.Status != "success" {
return InstantAvailabilityResponse{}, errors.New(instantResponse.Error.Message)
}
return instantResponse, nil
}
|
//
// Package - transpiled by c4go
//
// If you have found any issues, please raise an issue at:
// https://github.com/Konstantin8105/c4go/
//
package pkg
// init_cache - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:21
func init_cache() {
package_path_cache = kh_init_ptr()
package_id_cache = kh_init_ptr()
}
// abs_path - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:26
func abs_path(rel_path []byte) []byte {
return realpath(rel_path, nil)
}
// package_name - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:30
func package_name(rel_path []byte) []byte {
var buffer []byte = noarch.Strdup(rel_path)
var filename []byte = __xpg_basename(buffer)
// trim the .c extension
filename[noarch.Strlen(filename)-int32(2)] = byte(0)
var c []byte
for c = filename; int32(c[0]) != 0; func() []byte {
tempVarUnary := c
defer func() {
c = c[0+1:]
}()
return tempVarUnary
}() {
switch int32(c[0]) {
case '.':
fallthrough
case '-':
fallthrough
case ' ':
c[0] = '_'
default:
break
}
}
var name []byte = noarch.Strdup(filename)
_ = buffer
return name
}
// assert_name - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:54
func assert_name(relative_path []byte, error_ [][]byte) int32 {
var len_ noarch.SsizeT = noarch.SsizeT(noarch.Strlen(relative_path))
var suffix_l uint32 = uint32(noarch.Strlen([]byte(".module.c\x00")))
if uint32(len_) < suffix_l || noarch.Strcmp([]byte(".module.c\x00"), c4goPointerArithByteSlice(relative_path, int(0+len_-suffix_l))) != 0 {
if error_ != nil {
asprintf(error_, []byte("Unsupported input filename '%s', Expecting '<file>.module.c'\x00"), relative_path)
}
return 1
}
return 0
}
// index_generated_name - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:65
func index_generated_name(path []byte) []byte {
var len_ noarch.SsizeT = noarch.SsizeT(noarch.Strlen(path))
var suffix_l uint32 = uint32(noarch.Strlen([]byte(".module.c\x00")))
var buffer []byte = make([]byte, uint32(len_)-suffix_l+uint32(noarch.Strlen([]byte(".c\x00")))+1)
noarch.Strncpy(buffer, path, int32(uint32(len_)-suffix_l))
noarch.Strcpy(c4goPointerArithByteSlice(buffer, int(0+(len_-suffix_l))), []byte(".c\x00"))
return buffer
}
// index_parse - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:76
func index_parse(input []stream_t, out []stream_t, rel []byte, key []byte, generated []byte, error_ [][]byte, force int32, silent int32) []package_t {
if len(package_path_cache) == 0 {
init_cache()
}
if package_new == nil {
package_new = index_new
}
var p []package_t = make([]package_t, 1)
p[0].deps = kh_init_ptr()
p[0].exports = kh_init_ptr()
p[0].ordered = nil
p[0].n_exports = 0
p[0].symbols = kh_init_ptr()
p[0].source_abs = key
p[0].generated = generated
p[0].out = out
p[0].name = package_name(p[0].generated)
p[0].force = force
p[0].silent = silent
hash_set(package_path_cache, key, p)
p[0].errors = uint32(grammer_parse(input, rel, p, error_))
return p
}
// index_new - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:109
func index_new(relative_path []byte, error_ [][]byte, force int32, silent int32) []package_t {
if len(package_path_cache) == 0 {
init_cache()
}
if package_new == nil {
package_new = index_new
}
if int32((assert_name(relative_path, error_))) != 0 {
return nil
}
var key []byte = abs_path(relative_path)
if len(key) == 0 {
error_[0] = noarch.Strerror((noarch.ErrnoLocation())[0])
return nil
}
var cached []package_t = hash_get(package_path_cache, key).([]package_t)
if len(cached) != 0 {
_ = key
return cached
}
var input []stream_t = file_open(relative_path, 0)
if input[0].error_.code != 0 {
error_[0] = noarch.Strdup(input[0].error_.message)
_ = key
return nil
}
var generated []byte = index_generated_name(key)
var out []stream_t
if int32((force)) != 0 || noarch.Not(silent) && int32((utils_newer(relative_path, generated))) != 0 {
out = atomic_stream_open(generated)
if out[0].error_.code != 0 {
_ = key
noarch.Fprintf(noarch.Stderr, []byte("ERROR: '%s'\n\x00"), out[0].error_.message)
if error_ != nil {
error_[0] = noarch.Strdup(out[0].error_.message)
}
atomic_stream_abort(out)
return nil
}
}
var p []package_t = index_parse(input, out, relative_path, key, generated, error_, force, silent)
if len(p) == 0 || len(error_[0]) != 0 {
_ = key
if out != nil {
atomic_stream_abort(out)
}
return nil
}
if out != nil {
stream_close(out)
}
return p
}
// index_free - transpiled function from /home/istvan/packages/downloaded/cbuild/package/index.c:159
func index_free(pkg []package_t) {
if len(pkg) == 0 {
return
}
if package_path_cache != nil {
hash_del(package_path_cache, pkg[0].source_abs)
}
var i int32
{
// exports
for i = 0; uint32(i) < uint32(pkg[0].n_exports); i++ {
package_export_free(pkg[0].ordered[i].([]package_export_t))
}
}
_ = pkg[0].ordered
kh_destroy_ptr(pkg[0].exports)
kh_destroy_ptr(pkg[0].symbols)
_ = pkg[0].name
_ = pkg[0].source_abs
_ = pkg[0].generated
_ = pkg[0].header
{
var val interface{}
{
var k khiter_t = khiter_t((khint_t(0)))
for ; k < khiter_t(((pkg[0].deps)[0].n_buckets)); k++ {
if !noarch.Not((pkg[0].deps)[0].flags[k>>uint64(4)] >> uint64(uint32((khint32_t((khint_t((k & khiter_t((khint_t((khint32_t((uint32(15))))))) << uint64(1)))))))) & khint32_t((3))) {
// imports
// Warning (*ast.MemberExpr): /home/istvan/packages/downloaded/cbuild/package/index.c:181 :cannot determine type for LHS '[hash_t * hash_t *]', will use 'void *' for all fields. Is lvalue = true. n.Name = n_buckets
// Warning (*ast.MemberExpr): /home/istvan/packages/downloaded/cbuild/package/index.c:181 :cannot determine type for LHS '[hash_t * hash_t *]', will use 'void *' for all fields. Is lvalue = true. n.Name = flags
continue
}
// Warning (*ast.MemberExpr): /home/istvan/packages/downloaded/cbuild/package/index.c:181 :cannot determine type for LHS '[hash_t * hash_t *]', will use 'void *' for all fields. Is lvalue = true. n.Name = vals
val = (pkg[0].deps)[0].vals[k]
{
index_free(package_import_free(val.([]package_import_t)))
}
}
}
}
kh_destroy_ptr(pkg[0].deps)
_ = pkg
}
|
package main
import (
"flag"
//"fmt"
//"os"
//"os"
)
var (
batch = flag.Bool("b", false, "batch (non-interactive) mode")
printTokens = flag.Bool("tok", false, "print tokens")
printAst = flag.Bool("ast", false, "print abstract syntax tree")
printLLVMIR = flag.Bool("llvm", false, "print LLVM generated code")
)
func main() {
flag.Parse();
lex := Lex()
println(lex.name)
println("hello dawg")
}
|
package main
import (
"fmt"
"github.com/sclevine/agouti"
"log"
)
func main() {
fmt.Println("Hello from Selenium sample.")
driver := agouti.PhantomJS()
if err := driver.Start(); err != nil {
log.Fatalf("Failed to start phantomjs driver: %v", err)
}
defer driver.Stop()
page, err := driver.NewPage(agouti.Browser("phantomjs"))
if err != nil {
log.Fatalf("Failed to open page: %v", err)
}
if err := page.Navigate("http://www.yoheim.net"); err != nil {
log.Fatalf("Failed to navigate: %v", err)
}
page.Screenshot("/tmp/a.png")
}
|
package ravendb
import (
"net/http"
"strings"
)
type OperationExecutor struct {
store *DocumentStore
databaseName string
requestExecutor *RequestExecutor
}
func NewOperationExecutor(store *DocumentStore, databaseName string) *OperationExecutor {
res := &OperationExecutor{
store: store,
databaseName: databaseName,
}
if res.databaseName == "" {
res.databaseName = store.GetDatabase()
}
panicIf(res.databaseName == "", "databaseName is empty")
res.requestExecutor = store.GetRequestExecutor(res.databaseName)
return res
}
func (e *OperationExecutor) ForDatabase(databaseName string) *OperationExecutor {
if strings.EqualFold(e.databaseName, databaseName) {
return e
}
return NewOperationExecutor(e.store, databaseName)
}
// Note: we don't return a result because we could only return interface{}
// The caller has access to operation and can access strongly typed
// command and its result
// sessionInfo can be nil
func (e *OperationExecutor) Send(operation IOperation, sessionInfo *SessionInfo) error {
command, err := operation.GetCommand(e.store, e.requestExecutor.GetConventions(), e.requestExecutor.Cache)
if err != nil {
return err
}
return e.requestExecutor.ExecuteCommand(command, sessionInfo)
}
// sessionInfo can be nil
func (e *OperationExecutor) SendAsync(operation IOperation, sessionInfo *SessionInfo) (*Operation, error) {
command, err := operation.GetCommand(e.store, e.requestExecutor.GetConventions(), e.requestExecutor.Cache)
if err != nil {
return nil, err
}
if err = e.requestExecutor.ExecuteCommand(command, sessionInfo); err != nil {
return nil, err
}
changes := func() *DatabaseChanges {
return e.store.Changes("")
}
result := getCommandOperationIDResult(command)
return NewOperation(e.requestExecutor, changes, e.requestExecutor.GetConventions(), result.OperationID), nil
}
// Note: use SendPatchOperation() instead and check PatchOperationResult.Status
// public PatchStatus Send(PatchOperation operation) {
// public PatchStatus Send(PatchOperation operation, SessionInfo sessionInfo) {
func (e *OperationExecutor) SendPatchOperation(operation *PatchOperation, sessionInfo *SessionInfo) (*PatchOperationResult, error) {
conventions := e.requestExecutor.GetConventions()
cache := e.requestExecutor.Cache
command, err := operation.GetCommand(e.store, conventions, cache)
if err != nil {
return nil, err
}
if err = e.requestExecutor.ExecuteCommand(command, sessionInfo); err != nil {
return nil, err
}
cmdResult := operation.Command.Result
result := &PatchOperationResult{
Status: cmdResult.Status,
Document: cmdResult.ModifiedDocument,
}
switch operation.Command.StatusCode {
case http.StatusNotModified:
result.Status = PatchStatusNotModified
case http.StatusNotFound:
result.Status = PatchStatusDocumentDoesNotExist
}
return result, nil
}
|
package models
// Book 书籍对象结构体
type Book struct {
ID int64 `db:"id"`
Title string `db:"title"`
Price float64 `db:"price"`
}
|
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// THIS IS OUR API SCAFFOLDING!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// H2DatabaseSpec defines the desired state of H2Database
// +k8s:openapi-gen=true
type H2DatabaseSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
// Size is the size of the h2 deployment
// Imporant: having more that 2 pods in the deplyoment is probably not necessary,
// as currently H2 only supports running on a single node or in HA mode with
// a cluster of size 2
Size int32 `json:"size"`
// URL to which the operator should POST DB backups, leave as 'skip' string if you don't want backups
Backup string `json:"backup"`
// Indicate whether to try to run the DBs as a connected cluster; will only be considered when there
// are exactly two DB instances running (since H2 demands it); 'yes' or 'no'
Clustering string `json:"clustering"`
// Desired Cache Size of H2.
// For more info please visit https://www.h2database.com/html/features.html#cache_settings
// TODO: implement handler
CachSize int32 `json:"cacheSize"`
}
// H2DatabaseStatus defines the observed state of H2Database
// +k8s:openapi-gen=true
type H2DatabaseStatus struct {
// INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
// Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
// Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
// Nodes are the names of the h2 pods
Nodes []string `json:"nodes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// H2Database is the Schema for the h2databases API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=h2databases,scope=Namespaced
type H2Database struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec H2DatabaseSpec `json:"spec,omitempty"`
Status H2DatabaseStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// H2DatabaseList contains a list of H2Database
type H2DatabaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []H2Database `json:"items"`
}
func init() {
SchemeBuilder.Register(&H2Database{}, &H2DatabaseList{})
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
func main() {
var n int64
var k int64
var numbers []int64
scanner := bufio.NewScanner(os.Stdin)
if scanner.Scan() {
s := scanner.Text()
args := strings.Fields(s)
n, _ = strconv.ParseInt(args[0], 10, 64)
k, _ = strconv.ParseInt(args[1], 10, 64)
}
if scanner.Scan() {
s := scanner.Text()
nums := strings.Fields(s)
for _, numstr := range nums {
num, _ := strconv.ParseInt(numstr, 10, 64)
numbers = append(numbers, num)
}
}
var pairs int = 0
for i := 0; i < int(n); i++ {
for j := i + 1; j < int(n); j++ {
sum := numbers[i] + numbers[j]
if sum%k == 0 {
pairs++
}
}
}
fmt.Println(pairs)
}
|
package plot
import (
"fmt"
"os"
"code.google.com/p/plotinum/plot"
"code.google.com/p/plotinum/plotter"
"github.com/nictuku/latency"
)
// Plot saves an image of the latency histogram to filePath. The extension of filePath defines
// the format to be used - png, svg, etc.
func Plot(h *latency.Histogram, description, filePath string) error {
count := len(h.Buckets)
xys := make(plotter.XYs, count)
for bucket, freq := range h.Buckets {
xys[bucket].X = float64(bucket)
xys[bucket].Y = float64(freq)
}
p, err := plot.New()
if err != nil {
return fmt.Errorf("error generating plot: %v", err)
}
p.Title.Text = description
p.X.Label.Text = fmt.Sprintf("Latency (%v resolution)", h.Resolution)
p.Y.Label.Text = "Frequency"
hh, err := plotter.NewHistogram(xys, count)
if err != nil {
return fmt.Errorf("error generating histogram: %v", err)
}
p.Add(hh)
// Save the plot to a file. Units in inches (one inch == 72 points).
fmt.Fprintf(os.Stderr, "Saving latency histogram to %v\n", filePath)
return p.Save(8, 6, filePath)
}
|
package services
import (
"fmt"
"os"
"github.com/OrbitalbooKING/booKING/server/config"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres"
)
var DB *gorm.DB
func ConnectDataBase() error {
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable",
config.DB_HOST, config.DB_PORT, config.DB_USER, config.DB_PASSWORD, config.DB_NAME)
if dbPort := os.Getenv("DATABASE_URL"); dbPort != "" {
database, err := gorm.Open("postgres", dbPort)
if err != nil {
return err
} else {
DB = database
}
} else {
database, err := gorm.Open("postgres", psqlInfo)
if err != nil {
return err
} else {
DB = database
}
}
if err := DB.DB().Ping(); err != nil {
return err
} else {
return nil
}
}
|
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestDoubleMapStringSet(t *testing.T) {
v := make(DoubleMapString)
assert.Nil(t, v["foo"])
assert.Empty(t, v["foo"]["bar"])
assert.Nil(t, v["foo"])
v.Set("foo", "bar", "xyz")
assert.NotNil(t, v["foo"])
assert.Equal(t, "xyz", v["foo"]["bar"])
}
func TestContains(t *testing.T) {
ss := []string{"foo", "bar"}
assert.True(t, Contains(ss, "foo"))
assert.False(t, Contains(ss, "xyz"))
}
|
package main
import "fmt"
// não tem operador ternario
func obterResultado(nota float64) string {
if nota >= 1.6 {
return "Aprovado"
}
return "Reprovado"
}
func main() {
fmt.Println(obterResultado((6.2)))
}
|
/*
Symmetric Tree
Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).
For example, this binary tree [1,2,2,3,4,4,3] is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following [1,2,2,null,3,null,3] is not:
1
/ \
2 2
\ \
3 3
Note:
Bonus points if you could solve it both recursively and iteratively.
*/
package main
// recursively
func isSymmetric(root *TreeNode) bool {
if root == nil {
return true
}
return isSymmetricTree(root.Left, root.Right)
}
func isSymmetricTree(p *TreeNode, q *TreeNode) bool {
if p == nil && q == nil {
return true
} else if p == nil || q == nil {
return false
}
return p.Val == q.Val && isSymmetricTree(p.Left, q.Right) && isSymmetricTree(p.Right, q.Left)
}
// iteratively Morris Traversal
func isSymmetric1(root *TreeNode) bool {
var pre1,pre2 *TreeNode
curr1,curr2 := root,root
var val1,val2 int
for curr1 != nil || curr2 != nil {
if curr1 == nil || curr2 == nil {
return false
}
if curr1.Left == nil {
val1 = curr1.Val
curr1 = curr1.Right
} else {
for pre1 = curr1.Left;pre1.Right != nil;pre1 = pre1.Right {
if pre1.Right == curr1 {
break
}
}
if pre1.Right == nil {
pre1.Right,curr1 = curr1,curr1.Left
} else {
pre1.Right = nil
val1 = curr1.Val
curr1 = curr1.Right
}
}
if curr2.Right == nil {
val2 = curr2.Val
curr2 = curr2.Left
} else {
for pre2 = curr2.Right;pre2.Left != nil;pre2 = pre2.Left {
if pre2.Left == curr2 {
break
}
}
if pre2.Left == nil {
pre2.Left,curr2 = curr2,curr2.Right
} else {
pre2.Left = nil
val2 = curr2.Val
curr2 = curr2.Left
}
}
if val1 != val2 {
return false
}
}
return true
} |
package main
import (
"log"
"os"
bot "github.com/curi0s/learning-go-twitch-bot"
)
func handleEvents(t *bot.Twitch, ch chan interface{}) error {
for event := range ch {
switch ev := event.(type) {
case bot.EventConnected:
log.Println("Connected!")
t.SendMessage(t.Options().DefaultChannel, "HeyGuys")
// case bot.EventMessageReceived:
// fmt.Printf("%+v\n", ev)
// fmt.Printf("%+v\n", ev.ChannelUser)
// fmt.Printf("%+v\n", ev.ChannelUser.User)
// fmt.Println(ev.Channel.Name, ev.Message)
case bot.ConnectionError:
return ev.Err
}
}
return nil
}
func main() {
token := os.Getenv("TOKEN")
if token == "" {
log.Fatal("Empty TOKEN")
}
t := bot.NewTwitch(bot.Options{
Username: "curi0sde_bot",
Token: token,
Channels: []string{"curi0sde"},
DefaultChannel: "curi0sde",
})
ch := t.Connect()
err := handleEvents(t, ch)
if err != nil {
log.Fatal(err)
}
}
|
// Copyright 2018 David Sansome
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
pb "github.com/davidsansome/tsurukame/proto"
)
func RemoveNone(subject pb.Subject) pb.Subject {
var readings []*pb.Reading
for _, reading := range subject.Readings {
if reading.GetReading() != "None" {
readings = append(readings, reading)
} else {
fmt.Printf("Removing None reading from %d. %s %s\n",
subject.GetId(), subject.GetJapanese(), subject.GetMeanings()[0].GetMeaning())
}
}
subject.Readings = readings
return subject
}
|
package main
import "github.com/bjatkin/golf-engine/golf"
type collidable interface {
collide(vec2) bool
}
type character struct {
*interaction
n int
pos vec2
o golf.SOp
}
func (c *character) collide(player vec2) bool {
w, h := float64(c.o.W*8), float64(c.o.H*8)
// player is on the left or right
if player.x+14 <= c.pos.x || player.x >= c.pos.x+w {
return false
}
// player is above or below
if player.y+16 <= c.pos.y || player.y >= c.pos.y+h {
return false
}
return true
}
func (c *character) drawInteractable(player vec2) {
c.location = c.pos
c.interaction.drawInteractable(player)
g.Spr(c.n, c.pos.x, c.pos.y, c.o)
}
var guard1 = character{
interaction: &guardInter,
n: 22,
pos: vec2{192, 30},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2, FH: true},
}
var guard2 = character{
interaction: &guardInter,
n: 22,
pos: vec2{208, 30},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
var fishKid = character{
interaction: &fishKidInter,
n: 18,
pos: vec2{248, 182},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
var wellMan = character{
interaction: &wellManInter,
n: 30,
pos: vec2{352, 30},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
var talkingDog = character{
interaction: &talkingDogInter,
n: 20,
pos: vec2{38, 44},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
var countingKid = character{
interaction: &countingKidInter,
n: 26,
pos: vec2{152, 144},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
var oldMan = character{
interaction: &oldManInter,
n: 28,
pos: vec2{74, 40},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
var bootLady = character{
interaction: &bootLadyInter,
n: 24,
pos: vec2{24, 22},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
var rentGuy = character{
interaction: &rentGuyInter,
n: 30, // TODO swap this for the new graphics when they get created
pos: vec2{24, 22},
o: golf.SOp{TCol: golf.Col6, W: 2, H: 2},
}
func initCharacterEvents() {
storyEventHandler.onEvent(talkedToDog, func() { rentGuy.lines = []int{19, 36, 20} })
storyEventHandler.onEvent(twoFiftySixSecondsPassed, func() { countingKid.lines = []int{26, 27, 28, 29} })
storyEventHandler.onEvent(talkToCountKidAfterFinished, func() { countingKid.lines = []int{30} })
storyEventHandler.onEvent(talkedToOldGuy, func() { oldMan.lines = []int{6, 10} })
}
|
package core
import (
"fmt"
"time"
"github.com/praateekgupta3991/contraption/entities"
"github.com/praateekgupta3991/contraption/util"
)
type BlockService struct {
}
type BlockOperation interface {
CreateBlock(prevBid, prevProof int64, prevHash string) *entities.Block
}
func NewBlock(prevBid, prevProof int64, prevHash string) *entities.Block {
blk := &entities.Block{
Index: prevBid + 1,
Timestamp: time.Now(),
Proof: util.CalculatePOW(prevProof),
PreviousHash: prevHash,
}
fmt.Printf("Proof for the block %d - %d", blk.Index, blk.Proof)
return blk
}
func (b *BlockService) CreateBlock(prevBid, prevProof int64, prevHash string) *entities.Block {
blk := &entities.Block{
Index: prevBid + 1,
Timestamp: time.Now(),
Proof: util.CalculatePOW(prevProof),
PreviousHash: prevHash,
}
fmt.Printf("Proof for the block %d - %d", blk.Index, blk.Proof)
return blk
}
|
package voxels
import (
"math"
v "github.com/pzsz/lin3dmath"
)
func DrawSphere(store VoxelField, x,y,z float32, radius float32, power int) {
startX,endX := int(x-radius),int(x+radius+1)
startY,endY := int(y-radius),int(y+radius+1)
startZ,endZ := int(z-radius),int(z+radius+1)
op := func (ix,iy,iz int) int {
distX := float32(ix) - x
distY := float32(iy) - y
distZ := float32(iz) - z
dist := float32(math.Sqrt(float64(distX*distX+
distY*distY + distZ*distZ)))
if dist > radius {
return 0
}
return int(float32(power) * (radius - dist) / radius);
}
store.AddValue(v.Vector3i{startX, startY, startZ},
v.Vector3i{endX, endY, endZ}, op)
}
func DrawGround(store VoxelField, level int) {
sizeCube := store.Size()
op := func (ix,iy,iz int) int {
if iy < level {
return 255
}
return 0
}
store.AddValue(sizeCube.Start, sizeCube.End, op)
}
func DrawWave(store VoxelField) {
sizeCube := store.Size()
op := func (ix,iy,iz int) int {
level := math.Sin(float64(ix) * 0.08) * 5 + math.Cos(float64(iz) * 0.05) * 5 - 16
dif := float64(iy) - level
if dif < 0 {
if dif < -5 {
return 255
} else {
return int(255*(-dif/5))
}
}
return 0
}
store.AddValue(sizeCube.Start, sizeCube.End, op)
}
func DrawPerlin(store VoxelField) {
sizeCube := store.Size()
perlin := v.NewPerlinNoise3D(545)
ysize := sizeCube.End.Y - sizeCube.Start.Y
op := func (ix,iy,iz int) int {
val := perlin.At(0.1 + float64(ix)*0.1,
0.1 + float64(iy)*0.1,
0.1 + float64(iz)*0.1)
val = (val + 1) * 0.5
base := float64(sizeCube.End.Y - iy) / float64(ysize)
return int(base*40 + val*200)
}
store.AddValue(sizeCube.Start, sizeCube.End, op)
} |
// +build darwin
/*
Copyright 2019 Adobe
All Rights Reserved.
NOTICE: Adobe permits you to use, modify, and distribute this file in
accordance with the terms of the Adobe license agreement accompanying
it. If you have received this file from a source other than Adobe,
then your use, modification, or distribution of it requires the prior
written permission of Adobe.
*/
package keychainx
import (
"github.com/keybase/go-keychain"
)
func Save(label, user, password string) error {
item := keychain.NewItem()
item.SetSecClass(keychain.SecClassInternetPassword)
item.SetLabel(label)
item.SetAccount(user)
item.SetData([]byte(password))
return keychain.AddItem(item)
}
// Load credentials with a given label
func Load(label string) (string, string, error) {
query := keychain.NewItem()
query.SetSecClass(keychain.SecClassInternetPassword)
query.SetLabel(label)
query.SetMatchLimit(keychain.MatchLimitOne)
query.SetReturnAttributes(true)
query.SetReturnData(true)
results, err := keychain.QueryItem(query)
if err != nil {
return "", "", err
}
for _, r := range results {
return string(r.Account), string(r.Data), nil
}
return "", "", ErrNotFound
}
|
package main
import "net/http"
import "log"
import "encoding/json"
import "strings"
type weatherData struct {
Name string `json:"name"`
Main struct {
Temp float64 `json:"temp"`
} `json:"main"`
}
func main2() {
http.HandleFunc("/weather/", weatherDataHandler)
http.HandleFunc("/", hello)
log.Fatal(http.ListenAndServe(":8080", nil))
}
func weatherDataHandler(w http.ResponseWriter, r *http.Request) {
city := strings.SplitN(r.URL.Path, "/", 3)[2]
data, err := query(city)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(data)
}
func hello(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Hello Dude !!"))
}
func query(city string) (weatherData, error) {
resp, err := http.Get("http://api.openweathermap.org/data/2.5/weather?APPID=9aa7f88f84d94f46966fb852e9f2e9e1&q=" + city)
if err != nil {
return weatherData{}, err
}
defer resp.Body.Close()
var d weatherData
if err := json.NewDecoder(resp.Body).Decode(&d); err != nil {
return weatherData{}, err
}
return d, nil
}
|
package notification
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"telebot/common"
"telebot/models"
)
type ZabbixNotification struct {
Date string `json: "date, omitempty"`
Alias string `json: "alias"`
Subject string `json: "subject"`
Message string `json: "message"`
EventId int `json: "eventid, omitempty"`
Hostname string `json: "hostname, omitempty"`
Ipaddress string `json: "ipaddress, omitempty"`
Itemvalue string `json: "itemvalue, omitempty"`
Trigger struct {
Id int `json:"id, omitempty"`
Name string `json:"name, omitempty"`
Severity string `json:"severity, omitempty"`
Status string `json:"status, omitempty"`
Url string `json:"url, omitempty"`
} `json:"trigger, omitempty"`
}
func NotificationHandler(tobot chan<- common.BotMsg) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Println("--", err)
w.WriteHeader(422)
return
}
err = r.Body.Close()
if err != nil {
log.Println("--", err)
w.WriteHeader(422)
return
}
var notification ZabbixNotification
err = json.Unmarshal(body, ¬ification)
if err != nil {
log.Println("--", err)
w.WriteHeader(422)
return
}
sessions, err := models.GetAuthSessionsByAlias(notification.Alias)
if err != nil {
log.Println("--", err)
w.WriteHeader(423)
return
}
var text string = notification.Message
if len(notification.Trigger.Url) > 0 {
text += "\n[Описание](" + notification.Trigger.Url + ")"
}
for _, s := range *sessions {
tobot <- common.BotMsg{ChatId: s.ChatId, Text: text, Mode: "Markdown"}
}
w.WriteHeader(http.StatusCreated)
}
})
}
|
package goTezos
import "strconv"
//A function that retrieves a list of all currently delegated contracts for a delegate.
func GetDelegationsForDelegate(delegatePhk string) ([]string, error) {
var rtnString []string
getDelegations := "/chains/main/blocks/head/context/delegates/" + delegatePhk + "/delegated_contracts"
s, err := TezosRPCGet(getDelegations)
if err != nil {
return rtnString, err
}
delegations, err := unMarshelStringArray(s)
if err != nil {
return rtnString, err
}
return delegations, nil
}
//A function that retrieves a list of all currently delegated contracts for a delegate at a specific cycle.
func GetDelegationsForDelegateByCycle(delegatePhk string, cycle int) ([]string, error) {
var rtnString []string
snapShot, err := GetSnapShot(cycle)
if err != nil {
return rtnString, err
}
hash, err := GetBlockHashAtLevel(snapShot.AssociatedBlock)
if err != nil {
return rtnString, err
}
getDelegations := "/chains/main/blocks/" + hash + "/context/delegates/" + delegatePhk + "/delegated_contracts"
s, err := TezosRPCGet(getDelegations)
if err != nil {
return rtnString, err
}
delegations, err := unMarshelStringArray(s)
if err != nil {
return rtnString, err
}
return delegations, nil
}
//Gets the total rewards for a delegate earned and calculates the gross rewards earned by each delegation for multiple cycles. Also includes the share of each delegation.
func GetRewardsForDelegateForCycles(delegatePhk string, cycleStart int, cycleEnd int) (DelegationServiceRewards, error) {
dgRewards := DelegationServiceRewards{}
dgRewards.DelegatePhk = delegatePhk
var cycleRewardsArray []CycleRewards
for cycleStart <= cycleEnd {
delegations, err := getCycleRewards(delegatePhk, cycleStart)
if err != nil {
return dgRewards, err
}
cycleRewardsArray = append(cycleRewardsArray, delegations)
cycleStart++
}
dgRewards.RewardsByCycle = cycleRewardsArray
return dgRewards, nil
}
//Gets the total rewards for a delegate earned and calculates the gross rewards earned by each delegation for a single cycle. Also includes the share of each delegation.
func GetRewardsForDelegateCycle(delegatePhk string, cycle int) (DelegationServiceRewards, error) {
dgRewards := DelegationServiceRewards{}
dgRewards.DelegatePhk = delegatePhk
var cycleRewardsArray []CycleRewards
delegations, err := getCycleRewards(delegatePhk, cycle)
if err != nil {
return dgRewards, err
}
cycleRewardsArray = append(cycleRewardsArray, delegations)
return dgRewards, nil
}
//Get total rewards for a cycle for a delegate
func getCycleRewards(delegatePhk string, cycle int) (CycleRewards, error) {
cycleRewards := CycleRewards{}
cycleRewards.Cycle = cycle
rewards, err := GetDelegateRewardsForCycle(delegatePhk, cycle)
if err != nil {
return cycleRewards, err
}
if rewards == "" {
rewards = "0"
}
cycleRewards.TotalRewards = rewards
contractRewards, err := getContractRewardsForDelegate(delegatePhk, cycleRewards.TotalRewards, cycle)
if err != nil {
return cycleRewards, err
}
cycleRewards.Delegations = contractRewards
return cycleRewards, nil
}
//A function that gets the rewards earned by a delegate for a specific cycle.
func GetDelegateRewardsForCycle(delegatePhk string, cycle int) (string, error) {
get := "/chains/main/blocks/head/context/raw/json/contracts/index/" + delegatePhk + "/frozen_balance/" + strconv.Itoa(cycle) + "/"
s, err := TezosRPCGet(get)
if err != nil {
return "", err
}
rewards, err := unMarshelFrozenBalanceRewards(s)
if err != nil {
return rewards.Rewards, err
}
return rewards.Rewards, nil
}
//A private function to fill out delegation data like gross rewards and share.
func getContractRewardsForDelegate(delegatePhk, totalRewards string, cycle int) ([]ContractRewards, error) {
var contractRewards []ContractRewards
delegations, err := GetDelegationsForDelegateByCycle(delegatePhk, cycle)
if err != nil {
return contractRewards, err
}
for _, contract := range delegations {
contractReward := ContractRewards{}
contractReward.DelegationPhk = contract
bigIntRewards, err := strconv.Atoi(totalRewards)
if err != nil {
return contractRewards, err
}
floatRewards := float64(bigIntRewards) / 1000000
share, err := GetShareOfContract(delegatePhk, contract, cycle)
if err != nil {
return contractRewards, err
}
contractReward.Share = share
bigIntGrossRewards := int((share * floatRewards) * 1000000)
strGrossRewards := strconv.Itoa(bigIntGrossRewards)
contractReward.GrossRewards = strGrossRewards
contractRewards = append(contractRewards, contractReward)
}
return contractRewards, nil
}
//Returns the share of a delegation for a specific cycle.
func GetShareOfContract(delegatePhk, delegationPhk string, cycle int) (float64, error) {
stakingBalance, err := GetDelegateStakingBalance(delegatePhk, cycle)
if err != nil {
return 0, err
}
delegationBalance, err := GetAccountBalanceAtSnapshot(delegationPhk, cycle)
if err != nil {
return 0, err
}
return delegationBalance / stakingBalance, nil
}
//RPC command to retrieve information about a delegate at the head block
func GetDelegate(delegatePhk string) (Delegate, error) {
var delegate Delegate
get := "/chains/main/blocks/head/context/delegates/" + delegatePhk
byts, err := TezosRPCGet(get)
if err != nil {
return delegate, err
}
delegate, err = unMarshelDelegate(byts)
if err != nil {
return delegate, err
}
return delegate, err
}
//RPC command to get the staking balance of a delegate at a specific cycle
func GetStakingBalanceAtCycle(cycle int, delegateAddr string) (string, error) {
var balance string
snapShot, err := GetSnapShot(cycle)
if err != nil {
return balance, err
}
get := "/chains/main/blocks/" + snapShot.AssociatedHash + "/context/delegates/" + delegateAddr + "/staking_balance"
byts, err := TezosRPCGet(get)
if err != nil {
return balance, err
}
balance, err = unMarshelString(byts)
if err != nil {
return balance, err
}
return balance, nil
}
//Gets the baking rights for a specific cycle
func GetBakingRights(cycle int) (Baking_Rights, error) {
var BakingRights Baking_Rights
get := "/chains/main/blocks/head/helpers/baking_rights?cycle=" + strconv.Itoa(cycle) + "?max_priority=4"
byts, err := TezosRPCGet(get)
if err != nil {
return BakingRights, err
}
BakingRights, err = unMarshelBakingRights(byts)
if err != nil {
return BakingRights, err
}
return BakingRights, nil
}
//Gets the endorsing rights for a specific cycle
func GetEndorsingRights(cycle int) (Endorsing_Rights, error) {
var endorsingRights Endorsing_Rights
get := "/chains/main/blocks/head/helpers/endorsing_rights?cycle=" + strconv.Itoa(cycle)
byts, err := TezosRPCGet(get)
if err != nil {
return endorsingRights, err
}
endorsingRights, err = unMarshelEndorsingRights(byts)
if err != nil {
return endorsingRights, err
}
return endorsingRights, nil
}
//Retrieves a list of all tz1 addresses at a certain hash
func GetAllDelegatesByHash(hash string) ([]string, error) {
var delList []string
get := "/chains/main/blocks" + hash + "/context/delegates?active"
bytes, err := TezosRPCGet(get)
if err != nil {
return delList, err
}
delList, err = unMarshelStringArray(bytes)
if err != nil {
return delList, err
}
return delList, nil
}
//Retrieves a list of all tz1 addresses
func GetAllDelegates() ([]string, error) {
var delList []string
get := "/chains/main/blocks/head/context/delegates?active"
bytes, err := TezosRPCGet(get)
if err != nil {
return delList, err
}
delList, err = unMarshelStringArray(bytes)
if err != nil {
return delList, err
}
return delList, nil
}
|
package models
import (
"fmt"
"strings"
"github.com/markbates/pop/nulls"
)
type User struct {
Model
FirstName string `sql:"not null"`
LastName string
NickName string
Email string `json:",omitempty" sql:"not null;index;unique"`
Password []byte `json:"-" sql:"not null;size:60"`
Website string `json:",omitempty"`
Biography string `json:",omitempty" sql:"type:TEXT"`
ProfilePicture *Picture
ProfilePictureID nulls.UInt32
RoleID nulls.UInt32 `sql:"not null;index"`
CreatedByID nulls.UInt32
}
func (u User) FullName() string {
return strings.TrimSpace(fmt.Sprintf("%s %s", u.FirstName, u.LastName))
}
/*func (u User) HasCapability(s string) bool {
c := capability.FromRouteName(s)
if allowed, ok := capability.Acl[c]; ok {
for _, r := range allowed {
if r == u.Role {
return true
}
}
}
return false
}
func (u User) capabilities() []capability.Capability {
var caps []capability.Capability
for c, allowed := range capability.Acl {
for _, r := range allowed {
if r == u.Role {
caps = append(caps, c)
}
}
}
return caps
}
func (u User) MarshalBinary() (data []byte, err error) {
var buf bytes.Buffer
buf.WriteByte(byte(u.ID))
buf.WriteString(u.FirstName)
buf.WriteString(u.LastName)
buf.WriteString(u.Email)
r, _ := u.Role.MarshalJSON()
buf.Write(r)
return buf.Bytes(), nil
}
func (u *User) UnmarshalBinary(data []byte) error {
buf := bytes.NewBuffer(data)
_, err := fmt.Fscan(buf, &u.ID, &u.FirstName, &u.LastName, &u.Email, &u.Role)
return err
}*/
|
package gui
import (
"github.com/magicmonkey/go-streamdeck"
"github.com/magicmonkey/go-streamdeck/actionhandlers"
"github.com/magicmonkey/go-streamdeck/buttons"
"streamdeckOpenHab/openhab"
"streamdeckOpenHab/openhab/actionHandler"
"time"
)
const (
testSceneName = "TestScene"
mainSceneName = "MainScene"
emptySceneName = "EmptyScene"
settingsSceneName = "SettingsScene"
sleepSceneName = "SleepScene"
tempSceneName = "tempScene"
)
func GetTestScene(sd *streamdeck.StreamDeck, registry *SceneRegistry, stopFunc func()) *Scene {
result := Scene{name: testSceneName}
button1 := buttons.NewTextButton("1")
button1.SetActionHandler(&actionHandler.OpenHabAction{})
result.AddButton(button1, 0, 0)
tempButton := &TempButton{
Room: "bedroom",
ItemNames: []string{"HeaterBedroom"},
sd: sd,
curState: "cold",
nextState: "warm",
}
result.AddButton(tempButton.GenerateButton(), 1, 0)
button3 := buttons.NewTextButton("3")
button3.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "THREE"})
result.AddButton(button3, 2, 0)
button4 := buttons.NewTextButton("4")
button4.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "FOUR"})
result.AddButton(button4, 3, 0)
button5 := buttons.NewTextButton(">")
button5.SetActionHandler(&SceneAction{mainSceneName, registry, sd})
result.AddButton(button5, 4, 0)
button6 := buttons.NewTextButton("6")
button6.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "SIX"})
result.AddButton(button6, 0, 1)
button7 := buttons.NewTextButton("7")
button7.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "SEVEN"})
result.AddButton(button7, 1, 1)
imgTest, _ := buttons.NewImageFileButton("images/light_bedroom_on.png")
result.AddButton(imgTest, 2, 1)
button9 := buttons.NewTextButton("9")
button9.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "NINE"})
result.AddButton(button9, 3, 1)
button10 := buttons.NewTextButton("NXT")
button10.SetActionHandler(&SceneAction{mainSceneName, registry, sd})
result.AddButton(button10, 4, 1)
button11 := buttons.NewTextButton("11")
button11.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "ELEVEN"})
result.AddButton(button11, 0, 2)
button12 := buttons.NewTextButton("12")
button12.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "TWELVE"})
result.AddButton(button12, 1, 2)
button13 := buttons.NewTextButton("13")
button13.SetActionHandler(&actionhandlers.TextLabelChangeAction{NewLabel: "THIRTEEN"})
result.AddButton(button13, 2, 2)
button14 := buttons.NewTextButton("14")
button14.SetActionHandler(&StopAppAction{
func() {
time.Sleep(5 * time.Second)
stopFunc()
}})
result.AddButton(button14, 3, 2)
button15 := buttons.NewTextButton("15")
button15.SetActionHandler(&StopAppAction{stopFunc})
result.AddButton(button15, 4, 2)
return &result
}
func GetMainScene(sd *streamdeck.StreamDeck, registry *SceneRegistry) *Scene {
result := Scene{name: mainSceneName}
result.init()
buttonBwd := buttons.NewTextButton("<")
buttonBwd.SetActionHandler(&SceneAction{testSceneName, registry, sd})
result.AddButton(buttonBwd, 0, 0)
lightButton, _ := buttons.NewImageFileButton("images/light.png")
lightButton.SetActionHandler(&SceneAction{mainSceneName, registry, sd})
result.AddButton(lightButton, 1, 0)
tempButton, _ := buttons.NewImageFileButton("images/temp_dark.png")
tempButton.SetActionHandler(&SceneAction{tempSceneName, registry, sd})
result.AddButton(tempButton, 2, 0)
settingsButton, _ := buttons.NewImageFileButton("images/settings_dark.png")
settingsButton.SetActionHandler(&SceneAction{settingsSceneName, registry, sd})
result.AddButton(settingsButton, 3, 0)
lightBedroom := &LightButton{
Room: "bedroom",
ItemNames: []string{"LightBedRoom_Color"},
sd: sd,
active: openhab.IsLightActive("LightBedRoom_Color"),
}
result.AddButton(lightBedroom.GenerateButton(), 1, 1)
lightLivingroom := &LightButton{
Room: "livingroom",
ItemNames: []string{"LightLivingRoom_Color", "LightPlayLivingroomDoor_Color", "LightPlayLivingroomWindow_Color", "LightstripBedroom_Color"},
sd: sd,
active: openhab.IsLightActive("LightLivingRoom_Color" ),
}
result.AddButton(lightLivingroom.GenerateButton(), 1, 2)
lightKitchen := &LightButton{
Room: "kitchen",
ItemNames: []string{"LightKitchen_Color"},
sd: sd,
active: openhab.IsLightActive("LightKitchen_Color" ),
}
result.AddButton(lightKitchen.GenerateButton(), 3, 2)
buttonFwd := buttons.NewTextButton(">")
buttonFwd.SetActionHandler(&SceneAction{tempSceneName, registry, sd})
result.AddButton(buttonFwd, 4, 0)
return &result
}
func GetTempScene(sd *streamdeck.StreamDeck, registry *SceneRegistry) *Scene {
result := Scene{name: tempSceneName}
result.init()
lightButton, _ := buttons.NewImageFileButton("images/light_dark.png")
lightButton.SetActionHandler(&SceneAction{mainSceneName, registry, sd})
result.AddButton(lightButton, 1, 0)
tempButton, _ := buttons.NewImageFileButton("images/temp.png")
tempButton.SetActionHandler(&SceneAction{tempSceneName, registry, sd})
result.AddButton(tempButton, 2, 0)
settingsButton, _ := buttons.NewImageFileButton("images/settings_dark.png")
settingsButton.SetActionHandler(&SceneAction{settingsSceneName, registry, sd})
result.AddButton(settingsButton, 3, 0)
bedroomTempState := openhab.TempToName(openhab.ConvertTemperatureToFloat(openhab.GetItemStateWithDefault("HeaterBedroom", "0.0")))
bedroomTempButton := &TempButton{
Room: "bedroom",
ItemNames: []string{"HeaterBedroom"},
sd: sd,
curState: bedroomTempState,
nextState: openhab.DetermineNextTempState(bedroomTempState),
}
result.AddButton(bedroomTempButton.GenerateButton(), 1, 1)
livingroomTempState := openhab.TempToName(openhab.ConvertTemperatureToFloat(openhab.GetItemStateWithDefault("HeaterLivingroomWindow", "0.0")))
livingroomTempButton := &TempButton{
Room: "livingroom",
ItemNames: []string{"HeaterLivingroomWindow", "HeaterLivingroomDoor"},
sd: sd,
curState: livingroomTempState,
nextState: openhab.DetermineNextTempState(livingroomTempState),
}
result.AddButton(livingroomTempButton.GenerateButton(), 1, 2)
bathroomTempState := openhab.TempToName(openhab.ConvertTemperatureToFloat(openhab.GetItemStateWithDefault("HeaterBathroom", "0.0")))
bathroomTempButton := &TempButton{
Room: "bathroom",
ItemNames: []string{"HeaterBathroom"},
sd: sd,
curState: bathroomTempState,
nextState: openhab.DetermineNextTempState(bathroomTempState),
}
result.AddButton(bathroomTempButton.GenerateButton(), 2, 2)
kitchenTempState := openhab.TempToName(openhab.ConvertTemperatureToFloat(openhab.GetItemStateWithDefault("HeaterKitchen", "0.0")))
kitchenTempButton := &TempButton{
Room: "kitchen",
ItemNames: []string{"HeaterKitchen"},
sd: sd,
curState: kitchenTempState,
nextState: openhab.DetermineNextTempState(kitchenTempState),
}
result.AddButton(kitchenTempButton.GenerateButton(), 3, 2)
buttonFwd := buttons.NewTextButton(">")
buttonFwd.SetActionHandler(&SceneAction{settingsSceneName, registry, sd})
result.AddButton(buttonFwd, 4, 0)
buttonBwd := buttons.NewTextButton("<")
buttonBwd.SetActionHandler(&SceneAction{mainSceneName, registry, sd})
result.AddButton(buttonBwd, 0, 0)
return &result
}
func GetEmptyScene() *Scene {
result := Scene{name: emptySceneName}
result.init()
return &result
}
func GetSettingsScene(sd *streamdeck.StreamDeck, registry *SceneRegistry, shutdown func()) *Scene {
result := Scene{name: settingsSceneName}
result.init()
buttonBwd := buttons.NewTextButton("<")
buttonBwd.SetActionHandler(&SceneAction{tempSceneName, registry, sd})
result.AddButton(buttonBwd, 0, 0)
lightButton, _ := buttons.NewImageFileButton("images/light_dark.png")
lightButton.SetActionHandler(&SceneAction{mainSceneName, registry, sd})
result.AddButton(lightButton, 1, 0)
tempButton, _ := buttons.NewImageFileButton("images/temp_dark.png")
tempButton.SetActionHandler(&SceneAction{tempSceneName, registry, sd})
result.AddButton(tempButton, 2, 0)
settingsButton, _ := buttons.NewImageFileButton("images/settings.png")
settingsButton.SetActionHandler(&SceneAction{settingsSceneName, registry, sd})
result.AddButton(settingsButton, 3, 0)
buttonSleep, _ := buttons.NewImageFileButton("images/sleep.png")
thisActionHandler := &actionhandlers.ChainedAction{}
thisActionHandler.AddAction(&actionHandler.OpenHabAction{func() { sd.SetBrightness(0) }})
thisActionHandler.AddAction(&SceneAction{sleepSceneName, registry, sd})
buttonSleep.SetActionHandler(thisActionHandler)
result.AddButton(buttonSleep, 1, 1)
buttonStop := buttons.NewTextButton("Shutdown")
buttonStop.SetActionHandler(&StopAppAction{shutdown})
result.AddButton(buttonStop, 3, 2)
buttonFwd := buttons.NewTextButton(">")
buttonFwd.SetActionHandler(&SceneAction{testSceneName, registry, sd})
result.AddButton(buttonFwd, 4, 0)
return &result
}
func GetSleepScene(sd *streamdeck.StreamDeck, registry *SceneRegistry) *Scene {
result := Scene{name: sleepSceneName}
result.init()
wakeUpButton := GetEmptyButton()
thisActionHandler := &actionhandlers.ChainedAction{}
thisActionHandler.AddAction(&actionHandler.OpenHabAction{func() { sd.SetBrightness(50) }})
thisActionHandler.AddAction(&SceneAction{mainSceneName, registry, sd})
wakeUpButton.(*buttons.TextButton).SetActionHandler(thisActionHandler)
for x := 0; x < Cols; x++ {
for y := 0; y < Rows; y++ {
result.AddButton(wakeUpButton, x, y)
}
}
sd.SetBrightness(0)
return &result
}
|
package helper
// return url for qrcode data
func Url() string {
return "http://127.0.0.1:3000" // modify this for your own url
}
|
//Copyright 2019 Chris Wojno
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
// documentation files (the "Software"), to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
// Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
// OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package vsql_engine
import (
"context"
"github.com/wojnosystems/vsql_engine/engine_context"
"testing"
)
func TestEngine_Ping(t *testing.T) {
didRun := false
engine := NewSingle()
engine.PingMW().Append(func(ctx context.Context, c engine_context.Er) {
didRun = true
c.Next(ctx)
})
_ = engine.Ping(context.Background())
if didRun == false {
t.Error("expected middleware to run")
}
}
|
package main
import (
"GoleGolas/socket-code/function"
"encoding/json"
"flag"
"fmt"
"github.com/astaxie/beego"
_ "github.com/go-sql-driver/mysql"
"github.com/gomodule/redigo/redis"
"github.com/mxi4oyu/MoonSocket/protocol"
"log"
"net"
"regexp"
"strings"
"sync"
)
//const (
// WHITE = "\x1b[37;1m"
// RED = "\x1b[31;1m"
// GREEN = "\x1b[32;1m"
// YELLOW = "\x1b[33;1m"
// BLUE = "\x1b[34;1m"
// MAGENTA = "\x1b[35;1m"
// CYAN = "\x1b[36;1m"
// VERSION = "2.5.0"
//)
var (
inputIP = flag.String("IP", "0.0.0.0", "Listen IP")
inputPort = flag.String("PORT", "8848", "Listen Port")
//counter int //用于会话计数,给map的key使用
connlist = make(map[string]net.Conn) //通过ip当做key存储所有连接的会话
//connlistIPAddr map[int]string = make(map[int]string) //存储所有IP地址,提供输入标识符显示
lock = &sync.Mutex{}
cmdinfo = make(map[string]string)
pcinfo = make(map[string]string)
)
// ReadLine 函数等待命令行输入,返回字符串
//func ReadLine() string {
// buf := bufio.NewReader(os.Stdin)
// lin, _, err := buf.ReadLine()
// if err != nil {
// fmt.Println(RED, "[!] Error to Read Line!")
// }
// return string(lin)
//}
// Socket客户端连接处理程序,专用于接收消息处理
func connection(conn net.Conn) {
defer conn.Close()
//var clientid int
clientip := conn.RemoteAddr().String()
fmt.Println(conn.RemoteAddr().Network())
add := strings.Split(clientip, ":")
lock.Lock()
//counter++
//clientid = counter
connlist[add[0]] = conn
//connlistIPAddr[counter] = clientip
lock.Unlock()
fmt.Printf("--- client: %s 连接成功 ---\n", clientip)
function.InitPcInfo(clientip)
tmpbuf := make([]byte, 0)
buf := make([]byte, 1024)
for {
n, err := conn.Read(buf)
//如果客户端断开
if err != nil {
conn.Close()
delete(connlist, add[0])
//delete(connlistIPAddr, clientid)
fmt.Printf("--- client:%s 关闭 ---\n", clientip)
//设置client下线
function.OfflineClient(add[0])
return
}
tmpbuf = protocol.Depack(append(tmpbuf, buf[:n]...))
handClientMsg(add[0], string(tmpbuf))
}
}
//处理client返回的信息
func handClientMsg(ip string, msg string) {
if msg == "❤❤❤❤❤❤" {
//fmt.Println("接收到心跳消息")
} else if msg[:1] == "{" {
//这里将获取的主机信息存入数据库
if err := json.Unmarshal([]byte(msg), &pcinfo); err == nil {
fmt.Printf("接收到客户端的信息:%s", pcinfo)
function.UpdatePcData(pcinfo)
}
} else {
//这里打印执行完命令的返回值
fmt.Printf("接收到客户端%s的信息:%s \n", ip, msg)
//这里通过redis返回给beego
publish(ip, msg)
}
}
// 等待Socket 客户端连接
func handleConnWait() {
l, err := net.Listen("tcp", *inputIP+":"+*inputPort)
if err != nil {
log.Fatal(err)
}
defer l.Close()
for {
conn, err := l.Accept()
if err != nil {
log.Fatal(err)
}
go connection(conn)
}
}
// redis初始化连接
func newRedisclient() (conn redis.Conn, err error) {
host := "127.0.0.1"
port := "6379"
adderss := host + ":" + port
c, err := redis.Dial("tcp", adderss)
return c, err
}
//订阅redis,接收来自beego的数据
func resolveOrderCreate(wait *sync.WaitGroup) {
defer wait.Done()
conn, err := newRedisclient()
if err != nil {
beego.Error("redis没有启动....")
return
}
client := redis.PubSubConn{conn}
err = client.Subscribe("command")
if err != nil {
fmt.Println("订阅错误:", err)
return
}
fmt.Println("等待订阅数据 ---->")
for {
switch v := client.Receive().(type) {
case redis.Message:
fmt.Printf("收到来自%s订阅消息:%s", v.Channel, string(v.Data))
handleRedisMsg(string(v.Data))
case redis.Subscription:
fmt.Println("Subscription", v.Channel, v.Kind, v.Count)
}
}
}
//发布redis把client的数据返回给beego
func publish(ip string, value string) {
reqinfo := make(map[string]string)
reqinfo[ip] = value
values, _ := json.Marshal(reqinfo)
conn, err := newRedisclient()
if err != nil {
return
}
//value,_ := json.Marshal(cmdinfo)
conn.Do("Publish", "result", values)
}
//从redis读取数据发送到client
func handleRedisMsg(info string) {
err := json.Unmarshal([]byte(info), &cmdinfo)
if err != nil {
fmt.Println("string转map失败", err)
} else {
fmt.Println("收到命令------", cmdinfo)
for key, value := range cmdinfo {
fmt.Println(key, value)
ips := handleData(key)
fmt.Println(ips)
fmt.Println(len(ips))
if key != "" {
for _, v := range ips {
_, ok := connlist[v]
if ok {
fmt.Printf("当前执行主机%s,命令%s \n", v, value)
sendMsgToClient(connlist[v], value)
} else {
fmt.Println("该主机未连接 \n")
function.OfflineClient(v)
}
}
}
//运行后要把map里面的ip删除
delete(cmdinfo, key)
}
}
}
func handleData(args string) []string {
var ip_list []string
reg1 := regexp.MustCompile(`(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)\.(25[0-5]|2[0-4]\d|[0-1]\d{2}|[1-9]?\d)`)
ips := reg1.FindAll([]byte(args), -1)
for _, value := range ips {
ip_list = append(ip_list, string(value))
}
return ip_list
}
func main() {
flag.Parse()
go handleConnWait()
var wg sync.WaitGroup
wg.Add(1)
go resolveOrderCreate(&wg)
wg.Wait()
//connid := 0
//for {
// fmt.Print(RED, "SESSION ", connlistIPAddr[connid], WHITE, "> ")
// command := ReadLine()
// _conn, ok := connlist[connid]
// switch command {
// case "":
// // 如果输入为空,则什么都不做
// case "help":
// fmt.Println("")
// fmt.Println(CYAN, "-------------------------------------------------------")
// fmt.Println(CYAN, "session 选择在线的客户端")
// fmt.Println(CYAN, "exit 客户端下线")
// fmt.Println(CYAN, "quit 退出服务器端")
// fmt.Println(CYAN, "-------------------------------------------------------")
// fmt.Println("")
// case "session":
// fmt.Println(connlist)
// fmt.Print("选择客户端ID: ")
// inputid := ReadLine()
// if inputid != "" {
// var e error
// connid, e = strconv.Atoi(inputid)
// if e != nil {
// fmt.Println("请输入数字")
// } else if _, ok := connlist[connid]; ok {
// //如果输入并且存在客户端id
// //_cmd := base64.URLEncoding.EncodeToString([]byte("getos"))
// sendMsgToClient(connlist[connid],"cmd")
// }
// }
// case "exit":
// if ok {
// sendMsgToClient(_conn,"exit")
// }
// case "quit":
// os.Exit(0)
//
// case "screenshot":
// if ok {
// sendMsgToClient(_conn,"screenshot")
// }
// default:
// if ok {
// sendMsgToClient(_conn,command)
// }
// }
//}
}
//发送信息
func sendMsgToClient(conn net.Conn, msg string) {
//将信息封包
smsg := protocol.Enpack([]byte(msg))
conn.Write(smsg)
}
|
package sys
import (
"math/rand"
"time"
)
func Random(num int32) int32{
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
return int32(r1.Intn(int(num + 1)))
} |
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package aucoalesce
import (
"math"
"os/user"
"strings"
"sync"
"time"
)
const cacheTimeout = time.Minute
var (
userLookup = NewUserCache(cacheTimeout)
groupLookup = NewGroupCache(cacheTimeout)
// noExpiration = time.Unix(math.MaxInt64, 0)
// The above breaks time.Before and time.After due to overflows.
// See https://stackoverflow.com/questions/25065055/what-is-the-maximum-time-time-in-go
//
// Safe alternative:
noExpiration = time.Unix(0, 0).Add(math.MaxInt64 - 1)
)
type stringItem struct {
timeout time.Time
value string
}
func (i *stringItem) isExpired() bool {
return time.Now().After(i.timeout)
}
// EntityCache is a cache of IDs and usernames.
type EntityCache struct {
byID, byName stringCache
}
// NewUserCache returns a new EntityCache to resolve users. EntityCache is thread-safe.
func NewUserCache(expiration time.Duration) *EntityCache {
return &EntityCache{
byID: stringCache{
expiration: expiration,
data: map[string]stringItem{
"0": {timeout: noExpiration, value: "root"},
},
lookupFn: func(s string) string {
user, err := user.LookupId(s)
if err != nil {
return ""
}
return user.Username
},
},
byName: stringCache{
expiration: expiration,
data: map[string]stringItem{
"root": {timeout: noExpiration, value: "0"},
},
lookupFn: func(s string) string {
user, err := user.Lookup(s)
if err != nil {
return ""
}
return user.Uid
},
},
}
}
// LookupID looks up an UID/GID and returns the user/group name associated with it. If
// no name could be found an empty string is returned. The value will be
// cached for a minute.
func (c *EntityCache) LookupID(uid string) string {
return c.byID.lookup(uid)
}
// LookupName looks up an user/group name and returns the ID associated with it. If
// no ID could be found an empty string is returned. The value will be
// cached for a minute. This requires cgo on Linux.
func (c *EntityCache) LookupName(name string) string {
return c.byName.lookup(name)
}
// NewGroupCache returns a new EntityCache to resolve groups. EntityCache is thread-safe.
func NewGroupCache(expiration time.Duration) *EntityCache {
return &EntityCache{
byID: stringCache{
expiration: expiration,
data: map[string]stringItem{
"0": {timeout: noExpiration, value: "root"},
},
lookupFn: func(s string) string {
grp, err := user.LookupGroupId(s)
if err != nil {
return ""
}
return grp.Name
},
},
byName: stringCache{
expiration: expiration,
data: map[string]stringItem{
"root": {timeout: noExpiration, value: "0"},
},
lookupFn: func(s string) string {
grp, err := user.LookupGroup(s)
if err != nil {
return ""
}
return grp.Gid
},
},
}
}
// ResolveIDs translates all uid and gid values to their associated names.
// Prior to Go 1.9 this requires cgo on Linux. UID and GID values are cached
// for 60 seconds from the time they are read.
func ResolveIDs(event *Event) {
ResolveIDsFromCaches(event, userLookup, groupLookup)
}
// ResolveIDsFromCaches translates all uid and gid values to their associated
// names using the provided caches. Prior to Go 1.9 this requires cgo on Linux.
func ResolveIDsFromCaches(event *Event, users, groups *EntityCache) {
// Actor
if v := users.LookupID(event.Summary.Actor.Primary); v != "" {
event.Summary.Actor.Primary = v
}
if v := users.LookupID(event.Summary.Actor.Secondary); v != "" {
event.Summary.Actor.Secondary = v
}
// User
names := map[string]string{}
for key, id := range event.User.IDs {
if strings.HasSuffix(key, "uid") {
if v := users.LookupID(id); v != "" {
names[key] = v
}
} else if strings.HasSuffix(key, "gid") {
if v := groups.LookupID(id); v != "" {
names[key] = v
}
}
}
if len(names) > 0 {
event.User.Names = names
}
// File owner/group
if event.File != nil {
if event.File.UID != "" {
event.File.Owner = users.LookupID(event.File.UID)
}
if event.File.GID != "" {
event.File.Group = groups.LookupID(event.File.GID)
}
}
// ECS User and groups
event.ECS.User.lookup(users)
event.ECS.Group.lookup(groups)
}
// HardcodeUsers is useful for injecting values for testing.
func HardcodeUsers(users ...user.User) {
for _, usr := range users {
userLookup.byID.hardcode(usr.Uid, usr.Username)
userLookup.byName.hardcode(usr.Username, usr.Uid)
}
}
// HardcodeGroups is useful for injecting values for testing.
func HardcodeGroups(groups ...user.Group) {
for _, grp := range groups {
groupLookup.byID.hardcode(grp.Gid, grp.Name)
groupLookup.byName.hardcode(grp.Name, grp.Gid)
}
}
type stringCache struct {
mutex sync.Mutex
expiration time.Duration
data map[string]stringItem
lookupFn func(string) string
}
func (c *stringCache) lookup(key string) string {
if key == "" || key == "unset" {
return ""
}
c.mutex.Lock()
defer c.mutex.Unlock()
if item, found := c.data[key]; found && !item.isExpired() {
return item.value
}
// Cache the result (even on error).
resolved := c.lookupFn(key)
c.data[key] = stringItem{timeout: time.Now().Add(c.expiration), value: resolved}
return resolved
}
func (c *stringCache) hardcode(key, value string) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.data[key] = stringItem{
timeout: noExpiration,
value: value,
}
}
|
package display
import (
"reflect"
"strings"
"testing"
"github.com/AnuchitPrasertsang/roshambo/decide"
)
func TestSplitArtAscii(t *testing.T) {
a := splitArtAscii(PaperArt)
if !reflect.DeepEqual(a, strings.Split(PaperArt, "\n")) {
t.Error("split art ascii wrong")
}
}
func TestHightShouldBeEqual(t *testing.T) {
pl := len(splitArtAscii(PaperArt))
rl := len(splitArtAscii(RockArt))
sl := len(splitArtAscii(ScissorsArt))
if !(pl == rl && rl == sl) {
t.Errorf("line Hight should be equal but got paper: %d scissors: %d rock: %d \n", pl, sl, rl)
}
}
func TestMaxWidthPaperArt(t *testing.T) {
actual := maxWidth(splitArtAscii(PaperArt))
expected := 64
if actual != expected {
t.Errorf(" max width should be %d but got %d", expected, actual)
}
}
func TestMaxWidthScissorsArt(t *testing.T) {
actual := maxWidth(splitArtAscii(ScissorsArt))
expected := 65
if actual != expected {
t.Errorf(" max width should be %d but got %d", expected, actual)
}
}
func TestMaxWidthRockArt(t *testing.T) {
actual := maxWidth(splitArtAscii(RockArt))
expected := 64
if actual != expected {
t.Errorf(" max width should be %d but got %d", expected, actual)
}
}
func TestConcatSameLineForDisplay(t *testing.T) {
r := []string{"rline 1 ", "rline 2 ", "rline 3 "}
p := []string{"pline 1 ", "pline 2 ", "pline 3 "}
result := concatSameLineForDisplay(r, p)
expected := "rline 1 pline 1 \nrline 2 pline 2 \nrline 3 pline 3 "
if result != expected {
t.Errorf("expect:\n % #v\n but got: \n % #v\n", expected, result)
}
}
func TestDisplay(t *testing.T) {
d := Display(decide.Paper, decide.Rock)
lenOfConcat := 3742
if len(d) != lenOfConcat {
t.Errorf("expect %d, but got '%d'", lenOfConcat, len(d))
}
}
func TestGetArt(t *testing.T) {
art := GetArt(decide.Rock)
if len(art) < 1 {
t.Error("art is empty")
}
}
|
package file
import (
"io/ioutil"
plugin_v1 "github.com/cyberark/secretless-broker/internal/plugin/v1"
)
// Provider reads the contents of the specified file.
type Provider struct {
Name string
}
// ProviderFactory constructs a filesystem Provider.
// No configuration or credentials are required.
func ProviderFactory(options plugin_v1.ProviderOptions) (plugin_v1.Provider, error) {
return &Provider{
Name: options.Name,
}, nil
}
// GetName returns the name of the provider
func (p *Provider) GetName() string {
return p.Name
}
// GetValues takes in variable ids and returns their resolved values. This method is
// needed to the Provider interface
func (p *Provider) GetValues(ids ...string) (map[string]plugin_v1.ProviderResponse, error) {
return plugin_v1.GetValues(p, ids...)
}
// GetValue reads the contents of the identified file.
func (p *Provider) GetValue(id string) ([]byte, error) {
return ioutil.ReadFile(id)
}
|
package main
import (
"log"
"net/http"
"time"
)
func Logger(httpHandler http.Handler, name string) http.Handler {
return http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {
start := time.Now()
httpHandler.ServeHTTP(res, req)
log.Printf(
"%s\t%s\t%s\t%s",
req.Method,
req.RequestURI,
name,
time.Since(start),
)
})
}
|
package core
import (
"sync"
"time"
"github.com/benbjohnson/clock"
)
const wakeupTimeout = 30 * time.Second
// Timer measures active time between start and stop events
type Timer struct {
sync.Mutex
clck clock.Clock
started time.Time
}
// NewTimer creates timer that can expire
func NewTimer() *Timer {
return &Timer{
clck: clock.New(),
}
}
// Start starts the timer if not started already
func (m *Timer) Start() {
m.Lock()
defer m.Unlock()
if !m.started.IsZero() {
return
}
m.started = m.clck.Now()
}
// Reset resets the timer
func (m *Timer) Stop() {
m.Lock()
defer m.Unlock()
m.started = time.Time{}
}
// Expired checks if the timer has elapsed and if resets its status
func (m *Timer) Expired() bool {
m.Lock()
defer m.Unlock()
res := !m.started.IsZero() && (m.clck.Since(m.started) >= wakeupTimeout)
if res {
m.started = time.Time{}
}
return res
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package sync2_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"storj.io/common/sync2"
)
func TestWaitGroup(t *testing.T) {
t.Parallel()
const Wait = 2 * time.Second
const TimeError = time.Second / 2
var group sync2.WorkGroup
require.True(t, group.Start())
go func() {
defer group.Done()
time.Sleep(Wait)
}()
require.True(t, group.Go(func() {
time.Sleep(Wait)
}))
start := time.Now()
group.Wait()
duration := time.Since(start)
if duration < Wait-TimeError || duration > Wait+TimeError {
t.Fatalf("waited %s instead of %s", duration, Wait)
}
}
func TestWaitGroupClose(t *testing.T) {
t.Parallel()
const Wait = 2 * time.Second
const LongWait = 10 * time.Second
const TimeError = time.Second / 2
var group sync2.WorkGroup
require.True(t, group.Go(func() {
time.Sleep(Wait)
}))
group.Close()
require.False(t, group.Go(func() {
time.Sleep(LongWait)
}))
start := time.Now()
group.Wait()
duration := time.Since(start)
if duration < Wait-TimeError || duration > LongWait-TimeError {
t.Fatalf("waited %s instead of %s", duration, Wait)
}
}
|
package systemtests
import (
"encoding/json"
"time"
"github.com/contiv/volplugin/config"
. "gopkg.in/check.v1"
)
func (s *systemtestSuite) TestVolpluginCrashRestart(c *C) {
c.Assert(s.createVolume("mon0", "tenant1", "test", nil), IsNil)
c.Assert(s.vagrant.GetNode("mon0").RunCommand("docker run -itd -v tenant1/test:/mnt debian sleep infinity"), IsNil)
c.Assert(stopVolplugin(s.vagrant.GetNode("mon0")), IsNil)
time.Sleep(10 * time.Second) // this is based on a 5s ttl set at volmaster/volplugin startup
c.Assert(startVolplugin(s.vagrant.GetNode("mon0")), IsNil)
time.Sleep(1 * time.Second)
c.Assert(s.createVolume("mon1", "tenant1", "test", nil), IsNil)
c.Assert(s.vagrant.GetNode("mon1").RunCommand("docker run -itd -v tenant1/test:/mnt debian sleep infinity"), NotNil)
c.Assert(stopVolplugin(s.vagrant.GetNode("mon0")), IsNil)
c.Assert(startVolplugin(s.vagrant.GetNode("mon0")), IsNil)
time.Sleep(10 * time.Second)
c.Assert(s.createVolume("mon1", "tenant1", "test", nil), IsNil)
c.Assert(s.vagrant.GetNode("mon1").RunCommand("docker run -itd -v tenant1/test:/mnt debian sleep infinity"), NotNil)
s.clearContainers()
c.Assert(s.createVolume("mon1", "tenant1", "test", nil), IsNil)
c.Assert(s.vagrant.GetNode("mon1").RunCommand("docker run -itd -v tenant1/test:/mnt debian sleep infinity"), IsNil)
}
func (s *systemtestSuite) TestVolpluginHostLabel(c *C) {
c.Assert(stopVolplugin(s.vagrant.GetNode("mon0")), IsNil)
c.Assert(s.vagrant.GetNode("mon0").RunCommandBackground("sudo -E `which volplugin` --host-label quux --debug --ttl 5"), IsNil)
time.Sleep(10 * time.Millisecond)
c.Assert(s.createVolume("mon0", "tenant1", "foo", nil), IsNil)
out, err := s.docker("run -d -v tenant1/foo:/mnt debian sleep infinity")
c.Assert(err, IsNil)
defer s.purgeVolume("mon0", "tenant1", "foo", true)
defer s.docker("rm -f " + out)
ut := &config.UseConfig{}
// we know the pool is rbd here, so cheat a little.
out, err = s.volcli("use get tenant1 foo")
c.Assert(err, IsNil)
c.Assert(json.Unmarshal([]byte(out), ut), IsNil)
c.Assert(ut.Hostname, Equals, "quux")
}
|
package httpx
import (
"context"
"crypto/sha256"
"fmt"
)
// FetchResource fetches the specified resource and returns it.
func (c Client) FetchResource(ctx context.Context, URLPath string) ([]byte, error) {
request, err := c.NewRequest(ctx, "GET", URLPath, nil, nil)
if err != nil {
return nil, err
}
return c.Do(request)
}
// FetchResourceAndVerify fetches and verifies a specific resource.
func (c Client) FetchResourceAndVerify(ctx context.Context, URL, SHA256Sum string) ([]byte, error) {
c.Logger.Debugf("httpx: expected SHA256: %s", SHA256Sum)
data, err := c.FetchResource(ctx, URL)
if err != nil {
return nil, err
}
s := fmt.Sprintf("%x", sha256.Sum256(data))
c.Logger.Debugf("httpx: real SHA256: %s", s)
if SHA256Sum != s {
return nil, fmt.Errorf("httpx: SHA256 mismatch: got %s and expected %s", s, SHA256Sum)
}
return data, nil
}
|
package locale
import (
"fmt"
"io/fs"
"github.com/BurntSushi/toml"
"github.com/cloudfoundry/jibber_jabber"
"github.com/evcc-io/evcc/server/assets"
"github.com/evcc-io/evcc/util/locale/internal"
"github.com/nicksnyder/go-i18n/v2/i18n"
"golang.org/x/text/language"
)
type Config = i18n.LocalizeConfig
var (
Locale internal.ContextKey
Bundle *i18n.Bundle
Language string
Localizer *i18n.Localizer
)
func Init() error {
Bundle = i18n.NewBundle(language.English)
Bundle.RegisterUnmarshalFunc("toml", toml.Unmarshal)
dir, err := fs.ReadDir(assets.I18n, ".")
if err != nil {
panic(err)
}
for _, d := range dir {
if _, err := Bundle.LoadMessageFileFS(assets.I18n, d.Name()); err != nil {
return fmt.Errorf("loading locales failed: %w", err)
}
}
Language, err = jibber_jabber.DetectLanguage()
if err != nil {
Language = language.German.String()
}
Localizer = i18n.NewLocalizer(Bundle, Language)
return nil
}
func Localize(lc *Config) string {
msg, _, err := Localizer.LocalizeWithTag(lc)
if err != nil {
msg = lc.MessageID
}
return msg
}
func LocalizeID(id string) string {
return Localize(&Config{
MessageID: id,
})
}
|
package main
import "fmt"
func main() {
a := 1
if a == 0 {
fmt.Println("first")
} else if a == 1 {
fmt.Println("second")
} else {
fmt.Println("third")
}
}
|
package sstats
import (
"testing"
)
func TestSumUpdate(t *testing.T) {
m, err := NewSum(5)
if err != nil {
t.Fatal(err)
}
vals := []float64{1, 2, 3, 4, 5, 6, 7, 8, 9}
expected := []float64{1, 3, 6, 10, 15, 20, 25, 30, 35}
for i, v := range vals {
m.Update(v)
val := m.Value()
if val != expected[i] {
t.Errorf("Expected value %.3f, but got %.3f\n", expected[i], val)
continue
}
}
}
func BenchmarkSumUpdate(b *testing.B) {
window := 1000
numValues := 100000
m, err := NewSum(window)
if err != nil {
b.Fatal(err)
}
for j := 0; j < b.N; j++ {
for i := 0; i < numValues; i++ {
m.Update(float64(i))
}
m.Reset()
}
}
|
package generic
import (
"time"
"github.com/iotaledger/hive.go/kvstore/debug"
"github.com/iotaledger/hive.go/objectstorage"
)
type (
Option = objectstorage.Option
Options = objectstorage.Options
ReadOption = objectstorage.ReadOption
ReadOptions = objectstorage.ReadOptions
IteratorOption = objectstorage.IteratorOption
IteratorOptions = objectstorage.IteratorOptions
)
// CacheTime sets the time after which the object is evicted from the cache.
func CacheTime(duration time.Duration) Option {
return objectstorage.CacheTime(duration)
}
// LogAccess sets up a logger that logs all calls to the underlying store in the given file. It is possible to filter
// the logged commands by providing an optional filter flag.
func LogAccess(fileName string, commandsFilter ...debug.Command) Option {
return objectstorage.LogAccess(fileName, commandsFilter...)
}
// PersistenceEnabled enables the persistence of the object storage.
func PersistenceEnabled(persistenceEnabled bool) Option {
return objectstorage.PersistenceEnabled(persistenceEnabled)
}
// KeysOnly is used to store only the keys of the elements.
func KeysOnly(keysOnly bool) Option {
return objectstorage.KeysOnly(keysOnly)
}
// StoreOnCreation writes an object directly to the persistence layer on creation.
func StoreOnCreation(store bool) Option {
return objectstorage.StoreOnCreation(store)
}
// ReleaseExecutorWorkerCount sets the number of workers that execute the
// scheduled eviction of the objects in parallel (whenever they become due).
func ReleaseExecutorWorkerCount(releaseExecutorWorkerCount int) Option {
return objectstorage.ReleaseExecutorWorkerCount(releaseExecutorWorkerCount)
}
// LeakDetectionEnabled enables the leak detection of the object storage.
func LeakDetectionEnabled(leakDetectionEnabled bool, options ...LeakDetectionOptions) Option {
return objectstorage.LeakDetectionEnabled(leakDetectionEnabled, options...)
}
// OverrideLeakDetectionWrapper is used to override the default leak detection wrapper.
func OverrideLeakDetectionWrapper[T StorableObject](wrapperFunc func(cachedObject *CachedObject[T]) LeakDetectionWrapper) Option {
return objectstorage.OverrideLeakDetectionWrapper(func(cachedObject *objectstorage.CachedObjectImpl) objectstorage.LeakDetectionWrapper {
return wrapperFunc(newCachedObject[T](cachedObject))
})
}
// PartitionKey sets the partition sizes of the key.
func PartitionKey(keyPartitions ...int) Option {
return objectstorage.PartitionKey(keyPartitions...)
}
// OnEvictionCallback sets a function that is called on eviction of the object.
func OnEvictionCallback[T StorableObject](cb func(cachedObject *CachedObject[T])) Option {
return objectstorage.OnEvictionCallback(func(cachedObject objectstorage.CachedObject) {
cb(newCachedObject[T](cachedObject))
})
}
// WithReadSkipCache is used to skip the elements in the cache.
func WithReadSkipCache(skipCache bool) ReadOption {
return objectstorage.WithReadSkipCache(skipCache)
}
// WithReadSkipStorage is used to skip the elements in the storage.
func WithReadSkipStorage(skipStorage bool) ReadOption {
return objectstorage.WithReadSkipStorage(skipStorage)
}
// WithIteratorSkipCache is used to skip the elements in the cache.
func WithIteratorSkipCache(skipCache bool) IteratorOption {
return objectstorage.WithIteratorSkipCache(skipCache)
}
// WithIteratorSkipStorage is used to skip the elements in the storage.
func WithIteratorSkipStorage(skipStorage bool) IteratorOption {
return objectstorage.WithIteratorSkipStorage(skipStorage)
}
// WithIteratorPrefix is used to iterate a subset of elements with a defined prefix.
func WithIteratorPrefix(prefix []byte) IteratorOption {
return objectstorage.WithIteratorPrefix(prefix)
}
// WithIteratorMaxIterations is used to stop the iteration after a certain amount of iterations.
// 0 disables the limit.
func WithIteratorMaxIterations(maxIterations int) IteratorOption {
return objectstorage.WithIteratorMaxIterations(maxIterations)
}
|
package math
import (
"errors"
"reflect"
"strconv"
)
// i2float pretty like cast.ToFloat64E
func i2float(a interface{}) (float64, error) { // interface to number
aValue := reflect.ValueOf(a)
switch aValue.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(aValue.Int()), nil
case reflect.Float32, reflect.Float64:
return aValue.Float(), nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return float64(aValue.Uint()), nil
/*
case reflect.Bool:
if a == true {
return 1, nil
}
return 0, nil
*/
case reflect.String:
return strconv.ParseFloat(aValue.String(), 64)
default:
return 0, errors.New("type error")
}
}
func Compute(a, b interface{}, op rune) (float64, error) {
x, errX := i2float(a)
y, errY := i2float(b)
for _, err := range []error{errX, errY} {
if err != nil {
return 0, err
}
}
switch op {
case '+':
return x + y, nil
case '-':
return x - y, nil
case '*':
return x * y, nil
case '/':
if y == 0 {
return 0, errors.New("can't divide the value by 0")
}
return x / y, nil
default:
return 0, errors.New("there is no such an operation")
}
}
|
package main
import "fmt"
func main() {
for i := 1; i < 100; i++ {
fmt.Printf("decimal : %d\tbinary : %b\thexadecimal : %#x\n", i, i, i)
}
}
|
package internal
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_Generate_Run(t *testing.T) {
out, err := runCobraCmd(GenerateCmd)
assert.Nil(t, err)
assert.Contains(t, out, "generate")
}
|
package utils
import (
"reflect"
"strconv"
)
func InSlice(val interface{}, array interface{}) (exists bool, index int) {
exists = false
index = -1
switch reflect.TypeOf(array).Kind() {
case reflect.Slice:
s := reflect.ValueOf(array)
for i := 0; i < s.Len(); i++ {
if reflect.DeepEqual(val, s.Index(i).Interface()) == true {
index = i
exists = true
return
}
}
}
return
}
func BlockSlideSlice(array interface{}, blockSize int, f func(interface{}) bool) {
run := true
switch reflect.TypeOf(array).Kind() {
case reflect.Slice:
s := reflect.ValueOf(array)
n := s.Len()
for i := 0; (i < n) && run; i += blockSize {
if i+blockSize > n { //TODO improve here (?)
run = f(s.Slice(i, n).Interface())
} else {
run = f(s.Slice(i, i+blockSize).Interface())
}
}
}
}
func ToInt64Slice(array []string) ([]int64, error) {
var err error
chatids := make([]int64, len(array))
for i, v := range array {
chatids[i], err = strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
}
return chatids, nil
}
|
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"crd/pkg/apis/crd.com"
)
var SchemeGroupVersion = schema.GroupVersion{Group: crdcom.GroupName, Version: "v1alpha1"}
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
localSchemeBuilder.Register(addKnownTypes)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&CustomPod{},
&CustomPodList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}
|
package main
import (
// "fmt"
"github.com/emicklei/go-restful"
"net/http"
)
var (
Result string
)
type Response struct {
Code int `json:"Code"`
// Message string `json:"Message,omitempty"`
// Result interface{} `json:"Result,omitempty"`
// Count int `json:"Count,omitempty"`
// MD5 string `json:"MD5,omitempty"`
}
var responseData Response
// var responseData = make(map[string]interface{})
func init() {
responseData = Response{Code: 0}
// responseData["Code"] = 0
}
func main() {
// setup service
ws := new(restful.WebService)
ws.Consumes("*/*")
ws.Produces(restful.MIME_JSON)
ws.Route(ws.GET("/").To(testGet))
ws.Route(ws.POST("/").To(testPost))
restful.Add(ws)
// setup request + writer
// run
// restful.DefaultContainer.ServeHTTP(httpWriter, httpRequest)
http.ListenAndServe(":8888", nil)
}
func testGet(rq *restful.Request, rp *restful.Response) {
// fmt.Println("get here")
rp.WriteHeaderAndEntity(http.StatusOK, responseData)
}
func testPost(rq *restful.Request, rp *restful.Response) {
// fmt.Println("post here")
rp.WriteHeaderAndEntity(http.StatusOK, "")
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package server_test
import (
"context"
"sort"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
"github.com/dustin/go-humanize"
"github.com/stretchr/testify/require"
)
// TestAddNewStoresToExistingNodes tests database behavior with
// multiple stores per node, in particular when new stores are
// added while nodes are shut down. This test starts a cluster with
// three nodes, shuts down all nodes and adds a store to each node,
// and ensures nodes start back up successfully. See #39415.
func TestAddNewStoresToExistingNodes(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderStress(t, "too many new stores and nodes for stress")
ctx := context.Background()
n1s1, n1cleanup1 := testutils.TempDir(t)
defer n1cleanup1()
n2s1, n2cleanup1 := testutils.TempDir(t)
defer n2cleanup1()
n3s1, n3cleanup1 := testutils.TempDir(t)
defer n3cleanup1()
numNodes := 3
tcArgs := base.TestClusterArgs{
ServerArgsPerNode: map[int]base.TestServerArgs{
// NB: on my local (beefy) machine, upreplication
// takes ~6s. This is pretty hefty compared to ~1s
// with ephemeral stores. But - we need the real
// stores here. At the time of writing, we perform
// ~100 change replicas txns, all in all, and
// 0.06s for a replication change does seem ok.
0: {StoreSpecs: []base.StoreSpec{{Path: n1s1}}},
1: {StoreSpecs: []base.StoreSpec{{Path: n2s1}}},
2: {StoreSpecs: []base.StoreSpec{{Path: n3s1}}},
},
}
tc := testcluster.StartTestCluster(t, numNodes, tcArgs)
// NB: it's important that this test wait for full replication. Otherwise,
// with only a single voter on the range that allocates store IDs, it can
// pass erroneously. StartTestCluster already calls it, but we call it
// again explicitly.
if err := tc.WaitForFullReplication(); err != nil {
log.Fatalf(ctx, "while waiting for full replication: %v", err)
}
clusterID := tc.Server(0).ClusterID()
tc.Stopper().Stop(ctx)
// Add two additional stores to each node.
n1s2, n1cleanup2 := testutils.TempDir(t)
defer n1cleanup2()
n2s2, n2cleanup2 := testutils.TempDir(t)
defer n2cleanup2()
n3s2, n3cleanup2 := testutils.TempDir(t)
defer n3cleanup2()
n1s3, n1cleanup3 := testutils.TempDir(t)
defer n1cleanup3()
n2s3, n2cleanup3 := testutils.TempDir(t)
defer n2cleanup3()
n3s3, n3cleanup3 := testutils.TempDir(t)
defer n3cleanup3()
tcArgs = base.TestClusterArgs{
// We need ParallelStart since this is an existing cluster. If
// we started sequentially, then the first node would hang forever
// waiting for the KV layer to become available, but that only
// happens when the second node also starts.
ParallelStart: true,
ReplicationMode: base.ReplicationManual, // saves time
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {
StoreSpecs: []base.StoreSpec{
{Path: n1s1}, {Path: n1s2}, {Path: n1s3},
},
},
1: {
StoreSpecs: []base.StoreSpec{
{Path: n2s1}, {Path: n2s2}, {Path: n2s3},
},
},
2: {
StoreSpecs: []base.StoreSpec{
{Path: n3s1}, {Path: n3s2}, {Path: n3s3},
},
},
},
}
// Start all nodes with additional stores.
tc = testcluster.StartTestCluster(t, numNodes, tcArgs)
defer tc.Stopper().Stop(ctx)
// Sanity check that we're testing what we wanted to test and didn't accidentally
// bootstrap three single-node clusters (who knows).
for _, srv := range tc.Servers {
require.Equal(t, clusterID, srv.ClusterID())
}
// Ensure all nodes have all stores available, and each store has a unique
// store ID.
testutils.SucceedsSoon(t, func() error {
var storeIDs []roachpb.StoreID
for _, server := range tc.Servers {
var storeCount = 0
if err := server.GetStores().(*kvserver.Stores).VisitStores(
func(s *kvserver.Store) error {
storeCount++
storeIDs = append(storeIDs, s.StoreID())
return nil
},
); err != nil {
return errors.Errorf("failed to visit all nodes, got %v", err)
}
if storeCount != 3 {
return errors.Errorf("expected 3 stores to be available on n%s, got %d stores instead", server.NodeID(), storeCount)
}
}
sort.Slice(storeIDs, func(i, j int) bool {
return storeIDs[i] < storeIDs[j]
})
for i := range storeIDs {
expStoreID := roachpb.StoreID(i + 1)
if storeIDs[i] != expStoreID {
t.Fatalf("expected the %s store to have storeID s%s, found s%s", humanize.Ordinal(i+1), expStoreID, storeIDs[i])
}
}
return nil
})
}
// TestMultiStoreIDAlloc validates that we don't accidentally re-use or
// skip-over allocated store IDs in multi-store setups.
func TestMultiStoreIDAlloc(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
skip.UnderStress(t, "too many new stores and nodes for stress")
ctx := context.Background()
numNodes := 3
numStoresPerNode := 3
var storeSpecs []base.StoreSpec
for i := 0; i < numStoresPerNode; i++ {
storeSpecs = append(storeSpecs, base.StoreSpec{InMemory: true})
}
tcArgs := base.TestClusterArgs{
ParallelStart: true,
ReplicationMode: base.ReplicationManual, // saves time
ServerArgsPerNode: map[int]base.TestServerArgs{
0: {StoreSpecs: storeSpecs},
1: {StoreSpecs: storeSpecs},
2: {StoreSpecs: storeSpecs},
},
}
tc := testcluster.StartTestCluster(t, numNodes, tcArgs)
defer tc.Stopper().Stop(ctx)
// Sanity check that we're testing what we wanted to test and didn't accidentally
// bootstrap three single-node clusters (who knows).
clusterID := tc.Server(0).ClusterID()
for _, srv := range tc.Servers {
require.Equal(t, clusterID, srv.ClusterID())
}
// Ensure all nodes have all stores available, and each store has a unique
// store ID.
testutils.SucceedsSoon(t, func() error {
var storeIDs []roachpb.StoreID
for _, server := range tc.Servers {
var storeCount = 0
if err := server.GetStores().(*kvserver.Stores).VisitStores(
func(s *kvserver.Store) error {
storeCount++
storeIDs = append(storeIDs, s.StoreID())
return nil
},
); err != nil {
return errors.Errorf("failed to visit all nodes, got %v", err)
}
if storeCount != numStoresPerNode {
return errors.Errorf("expected %d stores to be available on n%s, got %d stores instead",
numStoresPerNode, server.NodeID(), storeCount)
}
}
sort.Slice(storeIDs, func(i, j int) bool {
return storeIDs[i] < storeIDs[j]
})
for i := range storeIDs {
expStoreID := roachpb.StoreID(i + 1)
if storeIDs[i] != expStoreID {
t.Fatalf("expected the %s store to have storeID s%s, found s%s", humanize.Ordinal(i+1), expStoreID, storeIDs[i])
}
}
return nil
})
}
|
package storage
import (
"os"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq"
log "github.com/sirupsen/logrus"
)
// Database struct
type Database struct {
Conn *sqlx.DB
}
// NewConnection return database connection
func NewConnection() (*Database, error) {
var db Database
data, err := sqlx.Connect("postgres", os.Getenv("DATABASE_URL"))
db.Conn = data
if err != nil {
log.Fatal(err)
}
return &db, err
}
|
package httpserver
import (
"time"
"github.com/valyala/fasthttp"
)
const (
defaultReadTimeout = 5 * time.Second
defaultWriteTimeout = 5 * time.Second
defaultAddr = ":5500"
defaultShutdownTimeout = 3 * time.Second
)
type Server struct {
server *fasthttp.Server
notify chan error
shutdownTimeout time.Duration
}
func NewServer(handler fasthttp.RequestHandler, opts ...Option) *Server {
httpServer := &fasthttp.Server{
Handler: handler,
ReadTimeout: defaultReadTimeout,
WriteTimeout: defaultWriteTimeout,
}
s := &Server{
server: httpServer,
notify: make(chan error, 1),
shutdownTimeout: defaultShutdownTimeout,
}
// Custom options
for _, opt := range opts {
opt(s)
}
s.start(defaultAddr)
return s
}
func (s *Server) start(addr string) {
go func() {
s.notify <- s.server.ListenAndServe(addr)
close(s.notify)
}()
}
func (s *Server) Notify() <-chan error {
return s.notify
}
func (s *Server) Shutdown() error {
return s.server.Shutdown()
}
|
package server
import (
"encoding/json"
"github.com/tidwall/gjson"
)
type Script struct {
Proto string `json:"proto"`
Data []gjson.Result `json:data`
HttpRequest *HttpRequest `json:"-"`
ScriptResponse []*ScriptResponse `json:"response"`
}
type ScriptResponse struct {
Name string `json:"name"`
Response *Response `json:"response"`
}
func GenerateScript() *Script {
return &Script{
HttpRequest: GenerateHttpRequest(true),
ScriptResponse: make([]*ScriptResponse, 0),
}
}
func (script *Script) Validate() {
sentCh := make(chan bool)
response := make(chan *Response, 1)
for _, v := range script.Data {
script.HttpRequest.Url = v.Get("data.url").String()
script.HttpRequest.Method = v.Get("data.method").String()
script.HttpRequest.Cookie = v.Get("cookie").String()
script.HttpRequest.HttpBody = new(HttpBody)
header := v.Get("header").String()
body := v.Get("body").String()
json.Unmarshal([]byte(header), &script.HttpRequest.Header)
json.Unmarshal([]byte(body), &script.HttpRequest.HttpBody.Body)
go script.HttpRequest.HttpSend(response, sentCh)
<-sentCh
script.ScriptResponse = append(script.ScriptResponse, &ScriptResponse{
Name: v.Get("data.name").String(),
Response: <-response,
})
}
}
func (script *Script) GetResponse() (vc []byte, err error) {
vc, err = json.Marshal(script.ScriptResponse)
return
}
|
package server
import (
"github.com/julienschmidt/httprouter"
)
func middleware(h httprouter.Handle, middleware ...func(httprouter.Handle) httprouter.Handle) httprouter.Handle {
for _, mw := range middleware {
h = mw(h)
}
return h
}
|
package kail
import (
"fmt"
"io"
"github.com/fatih/color"
"encoding/json"
"bytes"
)
var (
prefixColor = color.New(color.FgHiWhite, color.Bold)
)
type Writer interface {
Print(event Event) error
Fprint(w io.Writer, event Event) error
}
func NewWriter(out io.Writer, jsonPP bool) Writer {
return &writer{
out: out,
jsonPP: jsonPP,
}
}
type writer struct {
out io.Writer
jsonPP bool
}
func (w *writer) Print(ev Event) error {
return w.Fprint(w.out, ev)
}
func (w *writer) Fprint(out io.Writer, ev Event) error {
prefix := w.prefix(ev)
if _, err := prefixColor.Fprint(out, prefix); err != nil {
return err
}
if _, err := prefixColor.Fprint(out, ": "); err != nil {
return err
}
log := ev.Log()
if w.jsonPP {
pp := tryJsonPrettyPrint(log)
if pp != nil {
log = pp
}
}
if _, err := out.Write(log); err != nil {
return err
}
if sz := len(log); sz == 0 || log[sz-1] != byte('\n') {
if _, err := out.Write([]byte("\n")); err != nil {
return err
}
}
return nil
}
func (w *writer) prefix(ev Event) string {
return fmt.Sprintf("%v/%v[%v]",
ev.Source().Namespace(),
ev.Source().Name(),
ev.Source().Container())
}
func tryJsonPrettyPrint(o []byte) []byte {
t := &bytes.Buffer{}
err := json.Indent(t, o, "", " ")
if err != nil {
return nil
}
return t.Bytes()
}
|
package cmds
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"github.com/google/go-github/v28/github"
"github.com/peterbourgon/diskv"
"github.com/spf13/cobra"
"github.com/alexec/github-toolkit/cmd/ght/util"
)
func NewReleaseNoteCmd() *cobra.Command {
var repo githubRepo
cache := true
var cmd = &cobra.Command{
Use: "relnote REVISION_RANGE",
Short: "Create release note based on Github issues.",
Example: ` # Create the note:
ACCESS_TOKEN=7eebf... ght relnote v1.3.0-rc3..v1.3.0-rc4`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
cmd.HelpFunc()(cmd, args)
os.Exit(1)
}
revisionRange := args[0]
ctx, client := newClient(repo, cmd)
base := filepath.Join("/tmp", "relnote", repo.owner, repo.repo)
_ = os.MkdirAll(base, 0777)
diskCache := diskv.New(diskv.Options{
BasePath: base,
Transform: func(s string) []string { return []string{} },
CacheSizeMax: 1024 * 1024,
})
output, err := exec.Command("git", "log", "--format=%H", revisionRange, "--", ".").Output()
util.Check(err)
var issues []int
contributors := map[string]int{}
var other []string
for _, sha := range strings.Split(string(output), "\n") {
if sha == "" {
continue
}
key := "commit." + sha
data, err := diskCache.Read(key)
commit := &github.Commit{}
if cache && err == nil {
util.Check(json.Unmarshal(data, commit))
} else {
commit, _, err = client.Git.GetCommit(ctx, repo.owner, repo.repo, sha)
util.Check(err)
marshal, err := json.Marshal(commit)
util.Check(err)
util.Check(diskCache.Write(key, marshal))
}
// extract the issue and add to the note
message := strings.SplitN(commit.GetMessage(), "\n", 2)[0]
foundIssues := findIssues(message)
if len(foundIssues) == 0 {
other = append(other, message)
} else {
issues = append(issues, foundIssues...)
}
// add the author as a contributor
name := *commit.Author.Name
num, ok := contributors[name]
if ok {
contributors[name] = num + 1
} else {
contributors[name] = 1
}
}
done := make(map[int]bool)
var enhancements []string
var bugFixes []string
var pullRequests []string
for ; len(issues) > 0; {
var id int
id, issues = issues[len(issues)-1], issues[:len(issues)-1]
_, ok := done[id]
done[id] = true
if !ok {
key := fmt.Sprintf("issue.%v", id)
data, err := diskCache.Read(key)
issue := &github.Issue{}
if err == nil {
err := json.Unmarshal(data, issue)
util.Check(err)
} else {
issue, _, err = client.Issues.Get(ctx, repo.owner, repo.repo, id)
util.Check(err)
data, err := json.Marshal(issue)
util.Check(err)
err = diskCache.Write(key, data)
util.Check(err)
}
labels := map[string]bool{}
for _, l := range issue.Labels {
labels[*l.Name] = true
}
message := fmt.Sprintf("#%v %s", id, *issue.Title)
if issue.IsPullRequest() {
pullRequests = append(pullRequests, message)
} else if labels["enhancement"] {
enhancements = append(enhancements, message)
} else if labels["bug"] {
bugFixes = append(bugFixes, message)
} else {
other = append(other, message)
}
}
}
if len(enhancements) > 0 {
fmt.Println("#### Enhancements")
fmt.Println()
sort.Strings(enhancements)
for _, i := range enhancements {
fmt.Printf("* %s\n", i)
}
fmt.Println()
}
if len(bugFixes) > 0 {
fmt.Println("#### Bug Fixes")
fmt.Println()
sort.Strings(bugFixes)
for _, i := range bugFixes {
fmt.Printf("- %s\n", i)
}
fmt.Println()
}
if len(other) > 0 {
fmt.Println("#### Other")
fmt.Println()
sort.Strings(other)
for _, i := range other {
fmt.Printf("- %s\n", i)
}
fmt.Println()
}
if len(pullRequests) > 0 {
fmt.Println("#### Pull Requests")
fmt.Println()
sort.Strings(pullRequests)
for _, i := range pullRequests {
fmt.Printf("- %s\n", i)
}
fmt.Println()
}
fmt.Println("#### Contributors")
fmt.Println()
var names []string
for name := range contributors {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
fmt.Printf("* %s <!-- num=%v -->\n", name, contributors[name])
}
},
}
repo = gitHubRepo()
cmd.Flags().Bool("cache", true, "Use a cache")
return cmd
}
func findIssues(message string) []int {
var issues []int
for _, text := range regexp.MustCompile("#[0-9]+").FindAllString(message, -1) {
id, err := strconv.Atoi(strings.TrimPrefix(text, "#"))
util.Check(err)
issues = append(issues, id)
}
return issues
}
|
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validate
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"testing"
intvalidate "github.com/go-swagger/go-swagger/internal/validate"
"github.com/go-swagger/go-swagger/spec"
"github.com/go-swagger/go-swagger/strfmt"
"github.com/stretchr/testify/assert"
)
func TestIssue52(t *testing.T) {
fp := filepath.Join("..", "fixtures", "bugs", "52", "swagger.json")
jstext, _ := ioutil.ReadFile(fp)
// as json schema
var sch spec.Schema
if assert.NoError(t, json.Unmarshal(jstext, &sch)) {
validator := intvalidate.NewSchemaValidator(spec.MustLoadSwagger20Schema(), nil, "", strfmt.Default)
res := validator.Validate(&sch)
assert.False(t, res.IsValid())
assert.EqualError(t, res.Errors[0], ".paths in body is required")
}
// as swagger spec
doc, err := spec.JSONSpec(fp)
if assert.NoError(t, err) {
validator := intvalidate.NewSpecValidator(doc.Schema(), strfmt.Default)
res, _ := validator.Validate(doc)
assert.False(t, res.IsValid())
assert.EqualError(t, res.Errors[0], ".paths in body is required")
}
}
func TestIssue53(t *testing.T) {
fp := filepath.Join("..", "fixtures", "bugs", "53", "noswagger.json")
jstext, _ := ioutil.ReadFile(fp)
// as json schema
var sch spec.Schema
if assert.NoError(t, json.Unmarshal(jstext, &sch)) {
validator := intvalidate.NewSchemaValidator(spec.MustLoadSwagger20Schema(), nil, "", strfmt.Default)
res := validator.Validate(&sch)
assert.False(t, res.IsValid())
assert.EqualError(t, res.Errors[0], ".swagger in body is required")
}
// as swagger spec
doc, err := spec.JSONSpec(fp)
if assert.NoError(t, err) {
validator := intvalidate.NewSpecValidator(doc.Schema(), strfmt.Default)
res, _ := validator.Validate(doc)
if assert.False(t, res.IsValid()) {
assert.EqualError(t, res.Errors[0], ".swagger in body is required")
}
}
}
func TestIssue62(t *testing.T) {
fp := filepath.Join("..", "fixtures", "bugs", "62", "swagger.json")
// as swagger spec
doc, err := spec.JSONSpec(fp)
if assert.NoError(t, err) {
validator := intvalidate.NewSpecValidator(spec.MustLoadSwagger20Schema(), strfmt.Default)
res, _ := validator.Validate(doc)
assert.NotEmpty(t, res.Errors)
assert.True(t, res.HasErrors())
}
}
func TestIssue63(t *testing.T) {
fp := filepath.Join("..", "fixtures", "bugs", "63", "swagger.json")
// as swagger spec
doc, err := spec.JSONSpec(fp)
if assert.NoError(t, err) {
validator := intvalidate.NewSpecValidator(doc.Schema(), strfmt.Default)
res, _ := validator.Validate(doc)
assert.True(t, res.IsValid())
}
}
func TestIssue61_MultipleRefs(t *testing.T) {
fp := filepath.Join("..", "fixtures", "bugs", "61", "multiple-refs.json")
// as swagger spec
doc, err := spec.JSONSpec(fp)
if assert.NoError(t, err) {
validator := intvalidate.NewSpecValidator(doc.Schema(), strfmt.Default)
res, _ := validator.Validate(doc)
assert.Empty(t, res.Errors)
assert.True(t, res.IsValid())
}
}
func TestIssue61_ResolvedRef(t *testing.T) {
fp := filepath.Join("..", "fixtures", "bugs", "61", "unresolved-ref-for-name.json")
// as swagger spec
doc, err := spec.JSONSpec(fp)
if assert.NoError(t, err) {
validator := intvalidate.NewSpecValidator(doc.Schema(), strfmt.Default)
res, _ := validator.Validate(doc)
assert.Empty(t, res.Errors)
assert.True(t, res.IsValid())
}
}
func TestIssue123(t *testing.T) {
fp := filepath.Join("..", "fixtures", "bugs", "123", "swagger.yml")
// as swagger spec
doc, err := spec.YAMLSpec(fp)
if assert.NoError(t, err) {
validator := intvalidate.NewSpecValidator(doc.Schema(), strfmt.Default)
res, _ := validator.Validate(doc)
for _, e := range res.Errors {
fmt.Println(e)
}
assert.True(t, res.IsValid())
}
}
|
/*
Copyright 2018 Cai Gwatkin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package context
import (
"context"
"strings"
"testing"
go_testing "github.com/caigwatkin/go/testing"
)
func TestBackground(t *testing.T) {
result := Background()
expectedCorrelationID := CorrelationIDBackground
expectedTest := false
if CorrelationID(result) != expectedCorrelationID {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "CorrelationID(result)",
Expected: expectedCorrelationID,
Result: CorrelationID(result),
}))
}
if Test(result) != expectedTest {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "Test(result)",
Expected: expectedTest,
Result: Test(result),
}))
}
}
func TestStartUp(t *testing.T) {
result := StartUp()
expectedCorrelationID := CorrelationIDStartUp
expectedTest := false
if CorrelationID(result) != expectedCorrelationID {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "CorrelationID(result)",
Expected: expectedCorrelationID,
Result: CorrelationID(result),
}))
}
if Test(result) != expectedTest {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "Test(result)",
Expected: expectedTest,
Result: Test(result),
}))
}
}
func TestShutDown(t *testing.T) {
result := ShutDown()
expectedCorrelationID := CorrelationIDShutDown
expectedTest := false
if CorrelationID(result) != expectedCorrelationID {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "CorrelationID(result)",
Expected: expectedCorrelationID,
Result: CorrelationID(result),
}))
}
if Test(result) != expectedTest {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "Test(result)",
Expected: expectedTest,
Result: Test(result),
}))
}
}
func TestNew(t *testing.T) {
background := context.Background()
pkgContextBackground := Background()
pkgContextStartUp := StartUp()
customized := context.WithValue(context.WithValue(context.Background(), keyCorrelationID, "customized"), keyTest, true)
type expected struct {
correlationIDSuffix string
test bool
}
var data = []struct {
desc string
input context.Context
expected
}{
{
desc: "background",
input: background,
expected: expected{
correlationIDSuffix: CorrelationID(background),
test: Test(background),
},
},
{
desc: "go_context background",
input: pkgContextBackground,
expected: expected{
correlationIDSuffix: CorrelationID(pkgContextBackground),
test: Test(pkgContextBackground),
},
},
{
desc: "go_context start up",
input: pkgContextStartUp,
expected: expected{
correlationIDSuffix: CorrelationID(pkgContextStartUp),
test: Test(pkgContextStartUp),
},
},
{
desc: "customized",
input: customized,
expected: expected{
correlationIDSuffix: "customized",
test: true,
},
},
{
desc: "nil",
input: nil,
expected: expected{
correlationIDSuffix: "",
test: false,
},
},
}
for i, d := range data {
result := New(d.input)
if CorrelationID(result) == "" {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "CorrelationID(result)",
Desc: d.desc,
At: i,
Expected: "NOT EMPTY STRING",
Result: CorrelationID(result),
}))
}
if !strings.HasSuffix(CorrelationID(result), d.expected.correlationIDSuffix) {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "CorrelationID(result) suffix",
Desc: d.desc,
At: i,
Expected: d.expected.correlationIDSuffix,
Result: CorrelationID(result),
}))
}
if Test(result) != d.expected.test {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "Test(result)",
Desc: d.desc,
At: i,
Expected: d.expected.test,
Result: Test(result),
}))
}
}
}
func TestCorrelationID(t *testing.T) {
var data = []struct {
desc string
input context.Context
expected string
}{
{
desc: "correlationID",
input: context.WithValue(context.Background(), keyCorrelationID, "correlationID"),
expected: "correlationID",
},
{
desc: "empty",
input: context.WithValue(context.Background(), keyCorrelationID, ""),
expected: "",
},
{
desc: "unexpected type",
input: context.WithValue(context.Background(), keyCorrelationID, true),
expected: "",
},
{
desc: "none",
input: context.Background(),
expected: "",
},
}
for i, d := range data {
result := CorrelationID(d.input)
if result != d.expected {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "result",
Desc: d.desc,
At: i,
Expected: d.expected,
Result: result,
}))
}
}
}
func TestWithCorrelationID(t *testing.T) {
type input struct {
ctx context.Context
correlationID string
}
var data = []struct {
desc string
input
expected string
}{
{
desc: "none",
input: input{
ctx: context.Background(),
correlationID: "correlationID",
},
expected: "correlationID",
},
{
desc: "override",
input: input{
ctx: context.WithValue(context.Background(), keyCorrelationID, "xxxxx"),
correlationID: "correlationID",
},
expected: "correlationID",
},
}
for i, d := range data {
result := WithCorrelationID(d.input.ctx, d.input.correlationID)
if v, ok := result.Value(keyCorrelationID).(string); !ok {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "result.Value(keyCorrelationID).(string) ok",
Desc: d.desc,
At: i,
Expected: "exists",
Result: nil,
}))
} else if v != d.expected {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "result.Value(keyCorrelationID).(string)",
Desc: d.desc,
At: i,
Expected: d.expected,
Result: v,
}))
}
}
}
func TestTest(t *testing.T) {
var data = []struct {
desc string
input context.Context
expected bool
}{
{
desc: "true",
input: context.WithValue(context.Background(), keyTest, true),
expected: true,
},
{
desc: "false",
input: context.WithValue(context.Background(), keyTest, false),
expected: false,
},
{
desc: "unexpected type",
input: context.WithValue(context.Background(), keyTest, "true"),
expected: false,
},
{
desc: "none",
input: context.Background(),
expected: false,
},
}
for i, d := range data {
result := Test(d.input)
if result != d.expected {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "result",
Desc: d.desc,
At: i,
Expected: d.expected,
Result: result,
}))
}
}
}
func TestWithTest(t *testing.T) {
type input struct {
ctx context.Context
test bool
}
var data = []struct {
desc string
input
expected bool
}{
{
desc: "false",
input: input{
ctx: context.Background(),
test: false,
},
expected: false,
},
{
desc: "true",
input: input{
ctx: context.Background(),
test: true,
},
expected: true,
},
{
desc: "override",
input: input{
ctx: context.WithValue(context.Background(), keyTest, true),
test: false,
},
expected: false,
},
}
for i, d := range data {
result := WithTest(d.input.ctx, d.input.test)
if v, ok := result.Value(keyTest).(bool); !ok {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "result.Value(keyTest).(bool) ok",
Desc: d.desc,
At: i,
Expected: "exists",
Result: nil,
}))
} else if v != d.expected {
t.Error(go_testing.Errorf(go_testing.Error{
Unexpected: "result.Value(keyTest).(bool)",
Desc: d.desc,
At: i,
Expected: d.expected,
Result: v,
}))
}
}
}
|
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"strings"
"time"
"github.com/gorilla/mux"
)
var cache *string
func ProjectHandler(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
GitHubWebHookHandler(w, r)
} else {
FetchReadMeHandler(w, r)
}
}
type GithubWebResponse struct {
Ref string `json:"ref"`
}
func GitHubWebHookHandler(w http.ResponseWriter, r *http.Request) {
/* extract our vars out */
vars := mux.Vars(r)
wh := &GithubWebResponse{}
body, _ := ioutil.ReadAll(r.Body)
defer r.Body.Close()
err := json.Unmarshal(body, wh)
if err != nil {
fmt.Println(err.Error())
}
if strings.HasSuffix(wh.Ref, "/master") {
/* used for our project name */
project := "github.com/" + vars["username"] + "/" + vars["project"]
directoryName := "/tmp/" + project
os.RemoveAll(directoryName)
fmt.Println("Resetting: ", directoryName)
}
}
func FetchReadMeHandler(w http.ResponseWriter, r *http.Request) {
/* extract our vars out */
vars := mux.Vars(r)
prefix := "http://"
if r.TLS != nil {
prefix = "https://"
}
url := prefix + r.Host
readme := "## Binaries\n"
readme += " [386](" + url + "/" + vars["username"] + "/" + vars["project"] + "/mac/386) | [amd64](" + url + "/" + vars["username"] + "/" + vars["project"] + "/mac/amd64)"
readme += "\n\n"
readme += " [386](" + url + "/" + vars["username"] + "/" + vars["project"] + "/windows/386) | [amd64](" + url + "/" + vars["username"] + "/" + vars["project"] + "/windows/amd64)"
readme += "\n\n"
readme += " [386](" + url + "/" + vars["username"] + "/" + vars["project"] + "/linux/386) | [amd64](" + url + "/" + vars["username"] + "/" + vars["project"] + "/linux/amd64)"
readme += "\n\n"
w.Write([]byte(readme))
}
func FetchBinaryHandler(w http.ResponseWriter, r *http.Request) {
cache := "90s"
/* extract our vars out */
vars := mux.Vars(r)
/* mac? really should be darwin ... dumb */
if vars["os"] == "mac" {
vars["os"] = "darwin"
}
/* used for our project name */
project := "github.com/" + vars["username"] + "/" + vars["project"]
/* filename, used to download */
downloadFileName := vars["project"]
directoryName := "/tmp/" + project
actualFileName := directoryName + "/" + vars["project"] + "_" + vars["os"] + "_" + vars["arch"]
if vars["os"] == "windows" {
actualFileName = actualFileName + ".exe"
downloadFileName = downloadFileName + ".exe"
}
/* does the folder exist? */
info, err := os.Stat(actualFileName)
dur, dur_err := time.ParseDuration(cache)
if dur_err != nil {
dur, _ = time.ParseDuration("60m")
}
if err != nil || (err == nil && time.Now().After(info.ModTime().Add(dur))) {
if dir_err := os.MkdirAll(directoryName, 0755); dir_err == nil {
_, project_err := exec.Command("bash", "-c", "go get -u "+project).Output()
version, _ := exec.Command("bash", "-c", "cd /go/src/"+project+" && git rev-list --count HEAD").Output()
commits, _ := exec.Command("bash", "-c", "cd /go/src/"+project+" && git rev-parse HEAD").Output()
if project_err == nil {
_, build_err := exec.Command("bash", "-c", "gox -ldflags \"-X main.Commit="+string(commits)+" -X main.Version=0.1."+string(version)+"\" --os="+vars["os"]+" --arch="+vars["arch"]+" --output="+strings.Replace(actualFileName, ".exe", "", -1)+" "+project).Output()
if build_err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("error"))
return
}
} else {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("invalid project"))
return
}
}
}
if data, read_err := ioutil.ReadFile(actualFileName); read_err == nil {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", "attachment; filename="+downloadFileName)
w.Header().Set("Content-Transfer-Encoding", "binary")
w.Header().Set("Expires", "0")
http.ServeContent(w, r, actualFileName, time.Now(), bytes.NewReader(data))
return
} else {
fmt.Println("actual", actualFileName)
fmt.Println("download", downloadFileName)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte("error2"))
return
}
}
func main() {
port := flag.Int("port", 80, "Port")
username := flag.String("username", "[A-Za-z0-9\\-\\_]+", "")
cache = flag.String("cache", "0s", "Cache for how many minutes")
flag.Parse()
fmt.Println("go-dist")
r := mux.NewRouter()
/* Grab text for use in your README.md */
r.HandleFunc("/{username:"+*username+"}/{project:[A-Za-z0-9\\-\\_]+}", ProjectHandler)
// Routes consist of a path and a handler function.
r.HandleFunc("/{username:"+*username+"}/{project:[A-Za-z0-9\\-\\_]+}/{os:mac|windows|linux}/{arch:amd64|arm|386}", FetchBinaryHandler)
// Bind to a port and pass our router in
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", *port), r))
}
|
package lexers
import (
"strings"
. "github.com/alecthomas/chroma/v2" // nolint
)
// HTTP lexer.
var HTTP = Register(httpBodyContentTypeLexer(MustNewLexer(
&Config{
Name: "HTTP",
Aliases: []string{"http"},
Filenames: []string{},
MimeTypes: []string{},
NotMultiline: true,
DotAll: true,
},
httpRules,
)))
func httpRules() Rules {
return Rules{
"root": {
{`(GET|POST|PUT|DELETE|HEAD|OPTIONS|TRACE|PATCH|CONNECT)( +)([^ ]+)( +)(HTTP)(/)([123](?:\.[01])?)(\r?\n|\Z)`, ByGroups(NameFunction, Text, NameNamespace, Text, KeywordReserved, Operator, LiteralNumber, Text), Push("headers")},
{`(HTTP)(/)([123](?:\.[01])?)( +)(\d{3})( *)([^\r\n]*)(\r?\n|\Z)`, ByGroups(KeywordReserved, Operator, LiteralNumber, Text, LiteralNumber, Text, NameException, Text), Push("headers")},
},
"headers": {
{`([^\s:]+)( *)(:)( *)([^\r\n]+)(\r?\n|\Z)`, EmitterFunc(httpHeaderBlock), nil},
{`([\t ]+)([^\r\n]+)(\r?\n|\Z)`, EmitterFunc(httpContinuousHeaderBlock), nil},
{`\r?\n`, Text, Push("content")},
},
"content": {
{`.+`, EmitterFunc(httpContentBlock), nil},
},
}
}
func httpContentBlock(groups []string, state *LexerState) Iterator {
tokens := []Token{
{Generic, groups[0]},
}
return Literator(tokens...)
}
func httpHeaderBlock(groups []string, state *LexerState) Iterator {
tokens := []Token{
{Name, groups[1]},
{Text, groups[2]},
{Operator, groups[3]},
{Text, groups[4]},
{Literal, groups[5]},
{Text, groups[6]},
}
return Literator(tokens...)
}
func httpContinuousHeaderBlock(groups []string, state *LexerState) Iterator {
tokens := []Token{
{Text, groups[1]},
{Literal, groups[2]},
{Text, groups[3]},
}
return Literator(tokens...)
}
func httpBodyContentTypeLexer(lexer Lexer) Lexer { return &httpBodyContentTyper{lexer} }
type httpBodyContentTyper struct{ Lexer }
func (d *httpBodyContentTyper) Tokenise(options *TokeniseOptions, text string) (Iterator, error) { // nolint: gocognit
var contentType string
var isContentType bool
var subIterator Iterator
it, err := d.Lexer.Tokenise(options, text)
if err != nil {
return nil, err
}
return func() Token {
token := it()
if token == EOF {
if subIterator != nil {
return subIterator()
}
return EOF
}
switch {
case token.Type == Name && strings.ToLower(token.Value) == "content-type":
{
isContentType = true
}
case token.Type == Literal && isContentType:
{
isContentType = false
contentType = strings.TrimSpace(token.Value)
pos := strings.Index(contentType, ";")
if pos > 0 {
contentType = strings.TrimSpace(contentType[:pos])
}
}
case token.Type == Generic && contentType != "":
{
lexer := MatchMimeType(contentType)
// application/calendar+xml can be treated as application/xml
// if there's not a better match.
if lexer == nil && strings.Contains(contentType, "+") {
slashPos := strings.Index(contentType, "/")
plusPos := strings.LastIndex(contentType, "+")
contentType = contentType[:slashPos+1] + contentType[plusPos+1:]
lexer = MatchMimeType(contentType)
}
if lexer == nil {
token.Type = Text
} else {
subIterator, err = lexer.Tokenise(nil, token.Value)
if err != nil {
panic(err)
}
return EOF
}
}
}
return token
}, nil
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package youtube contains the test code for VideoCUJ.
package youtube
import (
"context"
"path/filepath"
"strings"
"time"
"chromiumos/tast/common/perf"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/cuj"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/webutil"
"chromiumos/tast/local/input"
"chromiumos/tast/local/ui/cujrecorder"
"chromiumos/tast/testing"
)
const (
// YoutubeWeb indicates to test against Youtube web.
YoutubeWeb = "YoutubeWeb"
// YoutubeApp indicates to test against Youtube app.
YoutubeApp = "YoutubeApp"
// YoutubeWindowTitle indicates the title of the youtube web and app window.
YoutubeWindowTitle = "YouTube"
)
// TestResources holds the cuj test resources passed in from main test case.
type TestResources struct {
Cr *chrome.Chrome
Tconn *chrome.TestConn
Bt browser.Type
A *arc.ARC
Kb *input.KeyboardEventWriter
UIHandler cuj.UIActionHandler
}
// TestParams holds the cuj test parameters passed in from main test case.
type TestParams struct {
OutDir string
App string
TabletMode bool
Tier cuj.Tier
ExtendedDisplay bool
CheckPIP bool
TraceConfigPath string
YoutubeApkPath string
}
// VideoApp declares video operation.
type VideoApp interface {
// Install installs the Youtube app with apk.
Install(ctx context.Context) error
// OpenAndPlayVideo opens a video.
OpenAndPlayVideo(video VideoSrc) uiauto.Action
// EnterFullScreen switches video to full screen.
EnterFullScreen(ctx context.Context) error
// ExitFullScreen exits Youtube video from full screen.
ExitFullScreen(ctx context.Context) error
// PauseAndPlayVideo verifies video playback.
PauseAndPlayVideo(ctx context.Context) error
// IsPlaying verifies video is playing.
IsPlaying() uiauto.Action
// Close closes the resources related to video.
Close(ctx context.Context)
}
// VideoSrc struct defines video src for testing.
type VideoSrc struct {
URL string
Title string
// Quality is the string that test will look for in youtube
// "Settings / Quality" menu to change video playback quality.
Quality string
}
var basicVideoSrc = []VideoSrc{
{
cuj.YoutubeGoogleTVVideoURL,
"Chris Paul | Watch With Me | Google TV",
"1080p",
},
{
cuj.YoutubeDeveloperKeynoteVideoURL,
"Developer Keynote (Google I/O '21) - American Sign Language",
"720p60",
},
{
cuj.YoutubeStadiaGDCVideoURL,
"Stadia GDC 2019 Gaming Announcement",
"1080p60",
},
}
var premiumVideoSrc = []VideoSrc{
{
cuj.YoutubeStadiaGDCVideoURL,
"Stadia GDC 2019 Gaming Announcement",
"2160p60",
},
}
// knownGoodVersions represents relatively stable versions of the YouTube app.
var knownGoodVersions = []string{"16.35.38", "17.33.42"}
// Run runs the VideoCUJ test.
func Run(ctx context.Context, resources TestResources, param TestParams) error {
var (
cr = resources.Cr
tconn = resources.Tconn
bt = resources.Bt
a = resources.A
kb = resources.Kb
uiHandler = resources.UIHandler
outDir = param.OutDir
appName = param.App
tabletMode = param.TabletMode
tier = param.Tier
extendedDisplay = param.ExtendedDisplay
traceConfigPath = param.TraceConfigPath
youtubeApkPath = param.YoutubeApkPath
)
testing.ContextLogf(ctx, "Run app appName: %s tabletMode: %t, extendedDisplay: %t", appName, tabletMode, extendedDisplay)
tabChecker, err := cuj.NewTabCrashChecker(ctx, tconn)
if err != nil {
return errors.Wrap(err, "failed to create TabCrashChecker")
}
ui := uiauto.New(tconn)
// Give 10 seconds to set initial settings. It is critical to ensure
// cleanupSetting can be executed with a valid context so it has its
// own cleanup context from other cleanup functions. This is to avoid
// other cleanup functions executed earlier to use up the context time.
cleanupSettingsCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
cleanupSetting, err := cuj.InitializeSetting(ctx, tconn)
if err != nil {
return errors.Wrap(err, "failed to set initial settings")
}
defer cleanupSetting(cleanupSettingsCtx)
testing.ContextLog(ctx, "Start to get browser start time")
l, browserStartTime, err := cuj.GetBrowserStartTime(ctx, tconn, true, tabletMode, bt)
if err != nil {
return errors.Wrap(err, "failed to get browser start time")
}
// If lacros exists, close lacros finally.
if l != nil {
defer l.Close(ctx)
}
br := cr.Browser()
if l != nil {
br = l.Browser()
}
bTconn, err := br.TestAPIConn(ctx)
if err != nil {
return errors.Wrapf(err, "failed to create Test API connection for %v browser", bt)
}
videoSources := basicVideoSrc
if tier == cuj.Premium || tier == cuj.Advanced {
videoSources = premiumVideoSrc
}
// Give 5 seconds to clean up device objects connected to UI Automator server resources.
cleanupDeviceCtx := ctx
ctx, cancel = ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
d, err := a.NewUIDevice(ctx)
if err != nil {
return errors.Wrap(err, "failed to create new ARC device")
}
defer func(ctx context.Context) {
if d.Alive(ctx) {
testing.ContextLog(ctx, "UI device is still alive")
d.Close(ctx)
}
}(cleanupDeviceCtx)
// Give 5 seconds to cleanup recorder.
cleanupRecorderCtx := ctx
ctx, cancel = ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
options := cujrecorder.NewPerformanceCUJOptions()
recorder, err := cujrecorder.NewRecorder(ctx, cr, bTconn, a, options)
if err != nil {
return errors.Wrap(err, "failed to create a recorder")
}
defer recorder.Close(cleanupRecorderCtx)
if err := cuj.AddPerformanceCUJMetrics(bt, tconn, bTconn, recorder); err != nil {
return errors.Wrap(err, "failed to add metrics to recorder")
}
if traceConfigPath != "" {
recorder.EnableTracing(outDir, traceConfigPath)
}
var videoApp VideoApp
switch appName {
case YoutubeWeb:
videoApp = NewYtWeb(br, tconn, kb, extendedDisplay, ui, uiHandler)
case YoutubeApp:
videoApp = NewYtApp(tconn, kb, a, d, outDir, youtubeApkPath)
if err := videoApp.Install(ctx); err != nil {
return errors.Wrap(err, "failed to install Youtube app")
}
}
run := func(ctx context.Context, videoSource VideoSrc) (retErr error) {
// Give time to cleanup videoApp resources.
cleanupResourceCtx := ctx
ctx, cancel = ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
// Close the currently playing video and restart the new one.
defer func(ctx context.Context) {
if appName == YoutubeWeb {
// Before closing the youtube site outside the recorder, dump the UI tree to capture a screenshot.
faillog.DumpUITreeWithScreenshotOnError(ctx, outDir, func() bool { return retErr != nil }, cr, "ui_dump")
if bt == browser.TypeLacros {
// For lacros, leave a new tab to keep the browser alive for further testing.
if err := browser.ReplaceAllTabsWithSingleNewTab(ctx, bTconn); err != nil {
testing.ContextLog(ctx, "Failed to keep new tab: ", err)
}
} else {
videoApp.Close(ctx)
}
}
}(cleanupResourceCtx)
return recorder.Run(ctx, func(ctx context.Context) (retErr error) {
// Give time to dump arc UI tree.
cleanupCtx := ctx
ctx, cancel = ctxutil.Shorten(ctx, 15*time.Second)
defer cancel()
defer func(ctx context.Context) {
// Make sure to close the arc UI device before calling the function. Otherwise uiautomator might have errors.
if appName == YoutubeApp && retErr != nil {
if err := d.Close(ctx); err != nil {
testing.ContextLog(ctx, "Failed to close ARC UI device: ", err)
}
a.DumpUIHierarchyOnError(ctx, filepath.Join(outDir, "arc"), func() bool { return retErr != nil })
}
if appName == YoutubeApp {
faillog.DumpUITreeWithScreenshotOnError(ctx, outDir, func() bool { return retErr != nil }, cr, "ui_dump")
videoApp.Close(ctx)
}
}(cleanupCtx)
return videoScenario(ctx, resources, param, br, videoApp, videoSource, tabChecker)
})
}
for _, videoSource := range videoSources {
if err := run(ctx, videoSource); err != nil {
return errors.Wrapf(err, "failed to run %q video playback", appName)
}
}
pv := perf.NewValues()
// We'll collect Browser.StartTime for both YouTube-Web and YouTube-App
pv.Set(perf.Metric{
Name: "Browser.StartTime",
Unit: "ms",
Direction: perf.SmallerIsBetter,
Multiple: true,
}, float64(browserStartTime.Milliseconds()))
if appName == YoutubeApp {
pv.Set(perf.Metric{
Name: "Apps.StartTime",
Unit: "ms",
Direction: perf.SmallerIsBetter,
}, float64(appStartTime.Milliseconds()))
}
// Use a short timeout value so it can return fast in case of failure.
recordCtx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
if err := recorder.Record(recordCtx, pv); err != nil {
return errors.Wrap(err, "failed to record the performance metrics")
}
if err := pv.Save(outDir); err != nil {
return errors.Wrap(err, "failed to save performance metrics")
}
if err := recorder.SaveHistograms(outDir); err != nil {
return errors.Wrap(err, "failed to save histogram raw data")
}
return nil
}
func videoScenario(ctx context.Context, resources TestResources, param TestParams, br *browser.Browser,
videoApp VideoApp, videoSrc VideoSrc, tabChecker *cuj.TabCrashChecker) error {
var (
appName = param.App
extendedDisplay = param.ExtendedDisplay
checkPIP = param.CheckPIP
uiHandler = resources.UIHandler
tconn = resources.Tconn
)
openGmailWeb := func(ctx context.Context) (*chrome.Conn, error) {
// If there's a lacros browser, bring it to active.
lacrosWin, err := ash.FindWindow(ctx, tconn, func(w *ash.Window) bool {
return w.WindowType == ash.WindowTypeLacros
})
if err != nil && err != ash.ErrWindowNotFound {
return nil, errors.Wrap(err, "failed to find lacros window")
}
if err == nil {
if err := lacrosWin.ActivateWindow(ctx, tconn); err != nil {
return nil, errors.Wrap(err, "failed to activate lacros window")
}
}
conn, err := uiHandler.NewChromeTab(ctx, br, cuj.GmailURL, true)
if err != nil {
return conn, errors.Wrap(err, "failed to open gmail web page")
}
if err := webutil.WaitForQuiescence(ctx, conn, 2*time.Minute); err != nil {
return conn, errors.Wrap(err, "failed to wait for gmail page to finish loading")
}
ui := uiauto.New(tconn)
// YouTube sometimes pops up a prompt to notice users how to operate YouTube
// if there're new features. Dismiss prompt if it exist.
gotItPrompt := nodewith.Name("Got it").Role(role.Button)
uiauto.IfSuccessThen(
ui.WaitUntilExists(gotItPrompt),
uiHandler.ClickUntil(
gotItPrompt,
ui.WithTimeout(2*time.Second).WaitUntilGone(gotItPrompt),
),
)
return conn, nil
}
if err := videoApp.OpenAndPlayVideo(videoSrc)(ctx); err != nil {
return errors.Wrapf(err, "failed to open %s", appName)
}
// Play video at full screen.
if err := videoApp.EnterFullScreen(ctx); err != nil {
return errors.Wrap(err, "failed to play video in fullscreen")
}
// After entering full screen, it must be in playback state.
// This will make sure to switch to pip mode.
if err := uiauto.Retry(3, videoApp.IsPlaying())(ctx); err != nil {
return errors.Wrap(err, "failed to verify video is playing")
}
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
// Open Gmail web.
testing.ContextLog(ctx, "Open Gmail web")
gConn, err := openGmailWeb(ctx)
if err != nil {
return errors.Wrap(err, "failed to open Gmail website")
}
defer gConn.Close()
defer gConn.CloseTarget(cleanupCtx)
ytApp, ok := videoApp.(*YtApp)
// Only do PiP testing for YT APP and when logged in as premium user.
if ok && checkPIP && ytApp.isPremiumAccount() {
if err = ytApp.checkYoutubeAppPIP(ctx); err != nil {
return errors.Wrap(err, "youtube app smaller video preview window is not shown")
}
}
// Switch back to video playing.
if appName == YoutubeApp {
if err := uiHandler.SwitchToAppWindow("YouTube")(ctx); err != nil {
return errors.Wrap(err, "failed to switch to YouTube app")
}
} else {
if err := uiHandler.SwitchWindow()(ctx); err != nil {
return errors.Wrap(err, "failed to switch back to video playing")
}
}
// Pause and resume video playback.
if err := videoApp.PauseAndPlayVideo(ctx); err != nil {
return errors.Wrap(err, "failed to pause and play video")
}
if extendedDisplay {
if err := moveGmailWindow(ctx, tconn, resources); err != nil {
return errors.Wrap(err, "failed to move Gmail window between main display and extended display")
}
if appName == YoutubeWeb {
if err := moveYTWebWindow(ctx, tconn, resources); err != nil {
return errors.Wrap(err, "failed to move YT Web window to internal display")
}
}
}
if err := videoApp.ExitFullScreen(ctx); err != nil {
return errors.Wrap(err, "failed to exit full screen")
}
// Before recording the metrics, check if there is any tab crashed.
if err := tabChecker.Check(ctx); err != nil {
return errors.Wrap(err, "tab renderer crashed")
}
return nil
}
func waitWindowStateFullscreen(ctx context.Context, tconn *chrome.TestConn, winTitle string) error {
testing.ContextLog(ctx, "Check if the window is in fullscreen state")
if err := ash.WaitForCondition(ctx, tconn, func(w *ash.Window) bool {
return strings.Contains(w.Title, winTitle) && w.State == ash.WindowStateFullscreen
}, &testing.PollOptions{Timeout: 10 * time.Second}); err != nil {
return errors.Wrap(err, "failed to wait for fullscreen")
}
return nil
}
func waitWindowStateExitFullscreen(ctx context.Context, tconn *chrome.TestConn, winTitle string) error {
testing.ContextLog(ctx, "Check if the window is in full screen state")
if err := ash.WaitForCondition(ctx, tconn, func(w *ash.Window) bool {
return strings.Contains(w.Title, winTitle) && w.State != ash.WindowStateFullscreen
}, &testing.PollOptions{Timeout: 10 * time.Second}); err != nil {
return errors.Wrap(err, "failed to wait for exit from full screen")
}
return nil
}
func getFirstWindowID(ctx context.Context, tconn *chrome.TestConn) (int, error) {
all, err := ash.GetAllWindows(ctx, tconn)
if err != nil {
return -1, errors.Wrap(err, "failed to get all windows")
}
if len(all) != 1 {
for _, win := range all {
testing.ContextLogf(ctx, "%+v", *win)
}
testing.ContextLogf(ctx, "Expect 1 window, got %d", len(all))
}
return all[0].ID, nil
}
// moveGmailWindow switches Gmail to the extended display and switches back to internal display.
func moveGmailWindow(ctx context.Context, tconn *chrome.TestConn, testRes TestResources) error {
return uiauto.Combine("switch to gmail and move it between two displays",
testRes.UIHandler.SwitchWindow(),
cuj.SwitchWindowToDisplay(ctx, tconn, testRes.Kb, true), // Move to external display.
uiauto.Sleep(2*time.Second), // Keep the window in external display for 2 second.
cuj.SwitchWindowToDisplay(ctx, tconn, testRes.Kb, false), // Move to internal display.
)(ctx)
}
// moveYTWebWindow switches Youtube Web to the internal display.
func moveYTWebWindow(ctx context.Context, tconn *chrome.TestConn, testRes TestResources) error {
return uiauto.Combine("switch to YT Web and move it to internal display",
testRes.UIHandler.SwitchWindow(),
cuj.SwitchWindowToDisplay(ctx, tconn, testRes.Kb, false), // Move to internal display.
)(ctx)
}
|
package main
import (
"fmt"
"testing"
)
func TestCalc(t *testing.T) {
var op Operate
assertCorrectMessage := func(t *testing.T, got, want float64) {
t.Helper()
if got != want {
t.Errorf("got %.2f want %.2f", got, want)
}
}
t.Run("5 + 2", func(t *testing.T) {
fmt.Println("Testing : " + t.Name())
op.Operand_L = 5.0
op.Operator = "+"
op.Operand_R = 2.0
got := Calc(op)
want := 7.0
assertCorrectMessage(t, got, want)
})
t.Run("-8 + 3", func(t *testing.T) {
fmt.Println("Testing : " + t.Name())
op.Operand_L = -8.0
op.Operator = "+"
op.Operand_R = 3.0
got := Calc(op)
want := -5.0
assertCorrectMessage(t, got, want)
})
t.Run("135 - 2.5", func(t *testing.T) {
fmt.Println("Testing : " + t.Name())
op.Operand_L = 135.0
op.Operator = "-"
op.Operand_R = 2.5
got := Calc(op)
want := 132.5
assertCorrectMessage(t, got, want)
})
t.Run("4 * 5", func(t *testing.T) {
fmt.Println("Testing : " + t.Name())
op.Operand_L = 4.0
op.Operator = "*"
op.Operand_R = 5.0
got := Calc(op)
want := 23.0
assertCorrectMessage(t, got, want)
})
t.Run("240 / 12", func(t *testing.T) {
fmt.Println("Testing : " + t.Name())
op.Operand_L = 240.0
op.Operator = "/"
op.Operand_R = 12.0
got := Calc(op)
want := 20.0
assertCorrectMessage(t, got, want)
})
t.Run("4 * 7", func(t *testing.T) {
fmt.Println("Testing : " + t.Name())
op.Operand_L = 4.0
op.Operator = "*"
op.Operand_R = 7.0
got := Calc(op)
want := 28.0
assertCorrectMessage(t, got, want)
})
}
|
package sdplugin
import (
"encoding/json"
)
// Sender can send message to the StreamDeck app
type Sender interface {
SetState(context string, state int) error
ShowAlert(context string) error
ShowOk(context string) error
SetSettings(context string, payload interface{}) error
SendToPropertyInspector(context string, action string, payload interface{}) error
SetTitle(context string, title string, target string) error
SetImage(context string, image string, target string) error
SwitchToProfile(context string, device string, profile string) error
OpenURL(url string) error
}
func (p *Plugin) sendMessage(v interface{}) error {
p.connSendMutex.Lock()
defer p.connSendMutex.Unlock()
return p.conn.WriteJSON(v)
}
// SetState of action
func (p *Plugin) SetState(context string, state int) error {
return p.sendMessage(&SetStateEventMessage{
Event: "setState",
Context: context,
Payload: SetStatePayload{
State: state,
},
})
}
// ShowAlert on action button
func (p *Plugin) ShowAlert(context string) error {
return p.sendMessage(&ShowNotifyEventMessage{
Event: "showAlert",
Context: context,
})
}
// ShowOk on action button
func (p *Plugin) ShowOk(context string) error {
return p.sendMessage(&ShowNotifyEventMessage{
Event: "showOk",
Context: context,
})
}
// SetSettings associated to each action
func (p *Plugin) SetSettings(context string, payload interface{}) error {
data, err := json.Marshal(payload)
if err != nil {
return err
}
return p.sendMessage(&SetSettingsEventMessage{
Event: "setSettings",
Context: context,
Payload: data,
})
}
// SendToPropertyInspector send data to the PropertyInspector
func (p *Plugin) SendToPropertyInspector(context string, action string, payload interface{}) error {
data, err := json.Marshal(payload)
if err != nil {
return err
}
return p.sendMessage(&SendToPropertyInspectorEventMessage{
Event: "sendToPropertyInspector",
Context: context,
Action: action,
Payload: data,
})
}
// SetTitle to new value.
//Target defines if the title should be shown in software, on hardware or in both places.
func (p *Plugin) SetTitle(context string, title string, target string) error {
return p.sendMessage(&SetTitleEventMessage{
Event: "setTitle",
Context: context,
Payload: SetTitlePayload{
Title: title,
Target: target,
},
})
}
//SetImage for action. The image must be a base64 decoded string.
//Target defines if the image should be shown in software, on hardware or in both places.
func (p *Plugin) SetImage(context string, image string, target string) error {
return p.sendMessage(&SetImageEventMessage{
Event: "setImage",
Context: context,
Payload: SetImagePayload{
Image: image,
Target: target,
},
})
}
//SwitchToProfile with the given profile name.
func (p *Plugin) SwitchToProfile(context string, device string, profile string) error {
return p.sendMessage(&SwitchToProfileEventMessage{
Event: "switchToProfile",
Context: context,
Payload: SwitchToProfilePayload{
Profile: profile,
},
})
}
//OpenURL in default browser.
func (p *Plugin) OpenURL(url string) error {
return p.sendMessage(&OpenURLEventMessage{
Event: "openUrl",
Payload: OpenURLPayload{
URL: url,
},
})
}
|
package hoist_test
import (
"testing"
"github.com/hoistup/hoist-go/hoist"
"github.com/matryer/is"
)
func TestNewService(t *testing.T) {
is := is.New(t)
myName := "abc"
service := hoist.NewService(myName)
exported := service.Export()
expected := &hoist.ExportedService{
Name: myName,
Functions: make(map[string]*hoist.ExportedFunction),
}
is.Equal(exported, expected)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package master
import (
"context"
"github.com/google/gapid/test/robot/search"
"google.golang.org/grpc"
xctx "golang.org/x/net/context"
)
type server struct {
master Master
restart bool
}
// Serve wraps a Master in a grpc server.
func Serve(ctx context.Context, grpcServer *grpc.Server, m Master) error {
s := &server{master: m}
RegisterServiceServer(grpcServer, s)
return nil
}
// Search implements ServiceServer.Search
// It delegates the call to the provided Master implementation.
func (s *server) Search(query *search.Query, stream Service_SearchServer) error {
ctx := stream.Context()
return s.master.Search(ctx, query, func(ctx context.Context, e *Satellite) error { return stream.Send(e) })
}
// Orbit implements ServiceServer.Orbit
// It delegates the call to the provided Master implementation.
func (s *server) Orbit(request *OrbitRequest, stream Service_OrbitServer) error {
ctx := stream.Context()
return s.master.Orbit(ctx, *request.Services,
func(ctx context.Context, command *Command) error { return stream.Send(command) },
)
}
// Shutdown implements ServiceServer.Shutdown
// It delegates the call to the provided Master implementation.
func (s *server) Shutdown(ctx xctx.Context, request *ShutdownRequest) (*ShutdownResponse, error) {
return s.master.Shutdown(ctx, request)
}
|
package DataLayer
import (
"encoding/json"
"fmt"
"testing"
"utils"
)
const tbinfo string = `[
{
"fieldname":"id",
"fieldlen":10,
"fieldtype":0,
"makeindex":true
},
{
"fieldname":"name",
"fieldlen":10,
"fieldtype":0,
"makeindex":true
},
{
"fieldname":"age",
"fieldlen":4,
"fieldtype":0,
"makeindex":false
}
]`
func TestNewDB(t *testing.T) {
logger, _ := utils.New("test_db")
db := NewSDatabase("testdb", "./testdata", logger)
var feilds []FieldMeta
err := json.Unmarshal([]byte(tbinfo), feilds)
if err != nil {
fmt.Printf("error\n")
}
fields := make([]FieldMeta, 0)
fields = append(fields, FieldMeta{FieldLen: 10, Fieldname: "id", FieldType: 0, MkIdx: true})
fields = append(fields, FieldMeta{FieldLen: 10, Fieldname: "name", FieldType: 0, MkIdx: true})
fields = append(fields, FieldMeta{FieldLen: 10, Fieldname: "age", FieldType: 0, MkIdx: false})
err = db.CreateTable("biao", fields)
if err != nil {
fmt.Printf("biao err\n")
}
content := make(map[string]string)
content["id"] = "10"
content["name"] = "abc"
content["age"] = "20"
for i := 0; i < 100000; i++ {
db.AddData("biao",content)
}
res:=db.FindDocId("biao",3456)
fmt.Printf("%v\n",res)
}
|
package models
func (u *User) GetByUsernameAndPassword(email string, password string) error {
db, err := GetDatabase()
if err != nil {
return err
}
err = db.Where("email = ? AND password = ?", email, password).First(&u).Error
return err
}
func (u *User) GetByID() error {
db, err := GetDatabase()
if err != nil {
return err
}
return db.Where(u.ID).Find(&u).Error
}
func (u *User) Save() error {
db, err := GetDatabase()
if err != nil {
return err
}
if u.ID > 0 {
return db.Save(u).Error
} else {
return db.Create(u).Error
}
}
|
package configure
import (
"context"
"errors"
"fmt"
"mvdan.cc/sh/v3/expand"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"strings"
"github.com/loft-sh/devspace/pkg/devspace/deploy/deployer/helm"
"github.com/loft-sh/devspace/pkg/devspace/pipeline/engine"
"github.com/sirupsen/logrus"
"github.com/loft-sh/devspace/pkg/devspace/config/versions/latest"
"github.com/loft-sh/devspace/pkg/util/ptr"
"github.com/loft-sh/devspace/pkg/util/survey"
"github.com/loft-sh/devspace/pkg/util/yamlutil"
)
// AddKubectlDeployment adds a new kubectl deployment to the provided config
func (m *manager) AddKubectlDeployment(deploymentName string, isKustomization bool) error {
question := "Please enter the paths to your Kubernetes manifests (comma separated, glob patterns are allowed, e.g. 'manifests/**' or 'kube/pod.yaml') [Enter to abort]"
if isKustomization {
question = "Please enter path to your Kustomization folder (e.g. ./kube/kustomization/)"
}
manifests, err := m.log.Question(&survey.QuestionOptions{
Question: question,
ValidationFunc: func(value string) error {
if value == "" {
return nil
}
if isKustomization {
stat, err := os.Stat(path.Join(value, "kustomization.yaml"))
if err == nil && !stat.IsDir() {
return nil
}
return fmt.Errorf("path `%s` is not a Kustomization (kustomization.yaml missing)", value)
} else {
matches, err := filepath.Glob(value)
if err != nil {
return fmt.Errorf("path `%s` is not a valid glob pattern", value)
}
if len(matches) == 0 {
return fmt.Errorf("path `%s` did not match any manifests", value)
}
}
return nil
},
})
if err != nil {
return err
}
if manifests == "" {
return fmt.Errorf("adding kubectl deployment aborted")
}
splitted := strings.Split(manifests, ",")
splittedPointer := []string{}
for _, s := range splitted {
trimmed := strings.TrimSpace(s)
splittedPointer = append(splittedPointer, trimmed)
}
if m.config.Deployments == nil {
m.config.Deployments = map[string]*latest.DeploymentConfig{}
}
m.config.Deployments[deploymentName] = &latest.DeploymentConfig{
Name: deploymentName,
Kubectl: &latest.KubectlConfig{
Manifests: splittedPointer,
},
}
if isKustomization {
m.config.Deployments[deploymentName].Kubectl.Kustomize = ptr.Bool(isKustomization)
}
m.isRemote[deploymentName] = false
return nil
}
// AddHelmDeployment adds a new helm deployment to the provided config
func (m *manager) AddHelmDeployment(deploymentName string) error {
for {
helmConfig := &latest.HelmConfig{
Chart: &latest.ChartConfig{},
Values: map[string]interface{}{
"someChartValue": "",
},
}
var (
localPath = "Use a local Helm chart (e.g. ./helm/chart/)"
chartRepo = "Use a Helm chart repository (e.g. app-chart stored in https://charts.company.tld)"
archiveURL = "Use a .tar.gz archive from URL (e.g. https://artifacts.company.tld/chart.tar.gz)"
gitRepo = "Use a chart from another git repository (e.g. you have an infra repo)"
abort = "Abort and return to more options"
)
chartLocation, err := m.log.Question(&survey.QuestionOptions{
Question: "Which Helm chart do you want to use?",
Options: []string{
localPath,
chartRepo,
archiveURL,
gitRepo,
abort,
},
})
if err != nil {
return err
}
if chartLocation == abort {
return errors.New("")
}
if chartLocation == localPath {
localChartPath, err := m.log.Question(&survey.QuestionOptions{
Question: "Please enter the relative path to your local Helm chart (e.g. ./chart)",
ValidationRegexPattern: ".+",
})
if err != nil {
return err
}
absPath, err := filepath.Abs(".")
if err != nil {
return err
}
localChartPathRel, err := filepath.Rel(absPath, localChartPath)
if err != nil {
localChartPathRel = localChartPath
}
stat, err := os.Stat(path.Join(localChartPathRel, "Chart.yaml"))
if err != nil || stat.IsDir() {
m.log.WriteString(logrus.InfoLevel, "\n")
m.log.Errorf("Local path `%s` is not a Helm chart (Chart.yaml missing)", localChartPathRel)
continue
}
helmConfig.Chart.Name = localChartPathRel
m.isRemote[deploymentName] = false
} else if chartLocation == chartRepo || chartLocation == archiveURL {
ChartRepoLoop:
for {
requestURL := ""
if chartLocation == chartRepo {
tempChartRepoURL, err := m.log.Question(&survey.QuestionOptions{
Question: "Please specify the full URL of the chart repo (e.g. https://charts.org.tld/)",
ValidationFunc: func(value string) error {
_, err := url.ParseRequestURI(chartRepoURL(value))
if err != nil {
return err
}
return nil
},
})
if err != nil {
return err
}
helmConfig.Chart.RepoURL = chartRepoURL(tempChartRepoURL)
requestURL = strings.TrimRight(helmConfig.Chart.RepoURL, "/") + "/index.yaml"
helmConfig.Chart.Name, err = m.log.Question(&survey.QuestionOptions{
Question: "Please specify the name of the chart within your chart repository (e.g. payment-service)",
ValidationRegexPattern: ".+",
})
if err != nil {
return err
}
} else {
requestURL, err = m.log.Question(&survey.QuestionOptions{
Question: "Please specify the full URL of your tar archived chart (e.g. https://artifacts.org.tld/chart.tar.gz)",
ValidationRegexPattern: "^http(s)?://.*",
})
if err != nil {
return err
}
helmConfig.Chart.Name = requestURL
}
username := ""
password := ""
for {
httpClient := &http.Client{}
req, err := http.NewRequest("GET", requestURL, nil)
if err != nil {
return err
}
if username != "" || password != "" {
req.SetBasicAuth(username, password)
}
resp, err := httpClient.Do(req)
if resp == nil {
return err
}
if resp.StatusCode != http.StatusOK {
if resp.StatusCode == http.StatusUnauthorized {
m.log.Error("Not authorized to access Helm chart repository. Please provide auth credentials")
username, err = m.log.Question(&survey.QuestionOptions{
Question: "Enter your username for accessing " + requestURL,
})
if err != nil {
return err
}
password, err = m.log.Question(&survey.QuestionOptions{
Question: "Enter your password for accessing " + requestURL,
})
if err != nil {
return err
}
} else {
m.log.Errorf("Error: Received %s for chart repo index file `%s`", resp.Status, requestURL)
break
}
} else {
if username != "" || password != "" {
usernameVar := "HELM_USERNAME"
passwordVar := "HELM_PASSWORD"
helmConfig.Chart.Username = fmt.Sprintf("${%s}", usernameVar)
helmConfig.Chart.Password = fmt.Sprintf("${%s}", passwordVar)
if m.config.Vars == nil {
m.config.Vars = map[string]*latest.Variable{}
}
m.config.Vars[passwordVar] = &latest.Variable{
Name: passwordVar,
Password: true,
}
m.localCache.SetVar(usernameVar, username)
m.localCache.SetVar(passwordVar, password)
}
m.isRemote[deploymentName] = true
break ChartRepoLoop
}
}
}
} else {
for {
chartTempPath := ".devspace/chart-repo"
gitRepo, err := m.log.Question(&survey.QuestionOptions{
Question: "Please specify the git repo that contains the chart (e.g. https://git.org.tld/team/project.git)",
})
if err != nil {
return err
}
gitBranch, err := m.log.Question(&survey.QuestionOptions{
Question: "On which git branch is your Helm chart? (e.g. main, master, stable)",
DefaultValue: "main",
})
if err != nil {
return err
}
gitSubFolder, err := m.log.Question(&survey.QuestionOptions{
Question: "In which folder is your Helm chart within this other git repo? (e.g. ./chart)",
})
if err != nil {
return err
}
gitCommand := fmt.Sprintf("if [ -d '%s/.git' ]; then cd \"%s\" && git pull origin %s; else mkdir -p %s; git clone --single-branch --branch %s %s %s; fi", chartTempPath, chartTempPath, gitBranch, chartTempPath, gitBranch, gitRepo, chartTempPath)
m.log.WriteString(logrus.InfoLevel, "\n")
m.log.Infof("Cloning external repo `%s` containing to retrieve Helm chart", gitRepo)
err = engine.ExecuteSimpleShellCommand(context.TODO(), "", expand.ListEnviron(os.Environ()...), os.Stdout, os.Stderr, nil, gitCommand)
if err != nil {
m.log.WriteString(logrus.InfoLevel, "\n")
m.log.Errorf("Unable to clone repository `%s` (branch: %s)", gitRepo, gitBranch)
continue
}
chartFolder := path.Join(chartTempPath, gitSubFolder)
stat, err := os.Stat(chartFolder)
if err != nil || !stat.IsDir() {
m.log.WriteString(logrus.InfoLevel, "\n")
m.log.Errorf("Local path `%s` does not exist or is not a directory", chartFolder)
continue
}
helmConfig.Chart.Name = chartFolder
m.config.Hooks = append(m.config.Hooks, &latest.HookConfig{
Command: gitCommand,
Events: []string{"before:deploy"},
})
m.isRemote[deploymentName] = true
break
}
}
if m.config.Deployments == nil {
m.config.Deployments = map[string]*latest.DeploymentConfig{}
}
m.config.Deployments[deploymentName] = &latest.DeploymentConfig{
Name: deploymentName,
Helm: helmConfig,
}
break
}
return nil
}
// AddComponentDeployment adds a new deployment to the provided config
func (m *manager) AddComponentDeployment(deploymentName, image string, servicePort int) error {
componentConfig := &latest.ComponentConfig{
Containers: []*latest.ContainerConfig{
{
Image: image,
},
},
}
if servicePort > 0 {
componentConfig.Service = &latest.ServiceConfig{
Ports: []*latest.ServicePortConfig{
{
Port: &servicePort,
},
},
}
}
chartValues, err := yamlutil.ToInterfaceMap(componentConfig)
if err != nil {
return err
}
if m.config.Deployments == nil {
m.config.Deployments = map[string]*latest.DeploymentConfig{}
}
m.config.Deployments[deploymentName] = &latest.DeploymentConfig{
Helm: &latest.HelmConfig{
Chart: &latest.ChartConfig{
Name: helm.DevSpaceChartConfig.Name,
RepoURL: helm.DevSpaceChartConfig.RepoURL,
},
Values: chartValues,
},
}
m.isRemote[deploymentName] = true
return nil
}
func (m *manager) IsRemoteDeployment(deploymentName string) bool {
return m.isRemote[deploymentName]
}
func chartRepoURL(url string) string {
repoURL := url
if !(strings.HasPrefix(url, "https://") || strings.HasPrefix(url, "http://")) {
repoURL = "https://" + url
}
return repoURL
}
|
package carbonapi
import (
"expvar"
"net/http"
"net/http/pprof"
"github.com/dgryski/httputil"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func initHandlersInternal(app *App) http.Handler {
r := http.NewServeMux()
r.HandleFunc("/block-headers/", httputil.TimeHandler(app.blockHeaders, app.bucketRequestTimes))
r.HandleFunc("/block-headers", httputil.TimeHandler(app.blockHeaders, app.bucketRequestTimes))
r.HandleFunc("/unblock-headers/", httputil.TimeHandler(app.unblockHeaders, app.bucketRequestTimes))
r.HandleFunc("/unblock-headers", httputil.TimeHandler(app.unblockHeaders, app.bucketRequestTimes))
r.HandleFunc("/debug/version", app.debugVersionHandler)
r.Handle("/debug/vars", expvar.Handler())
r.HandleFunc("/debug/pprof/", pprof.Index)
r.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
r.HandleFunc("/debug/pprof/profile", pprof.Profile)
r.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
r.HandleFunc("/debug/pprof/trace", pprof.Trace)
r.Handle("/metrics", promhttp.Handler())
return r
}
func initHandlers(app *App) http.Handler {
r := http.NewServeMux()
r.HandleFunc("/render/", httputil.TimeHandler(app.validateRequest(http.HandlerFunc(app.renderHandler), "render"), app.bucketRequestTimes))
r.HandleFunc("/render", httputil.TimeHandler(app.validateRequest(http.HandlerFunc(app.renderHandler), "render"), app.bucketRequestTimes))
r.HandleFunc("/metrics/find/", httputil.TimeHandler(app.validateRequest(http.HandlerFunc(app.findHandler), "find"), app.bucketRequestTimes))
r.HandleFunc("/metrics/find", httputil.TimeHandler(app.validateRequest(http.HandlerFunc(app.findHandler), "find"), app.bucketRequestTimes))
r.HandleFunc("/info/", httputil.TimeHandler(app.validateRequest(http.HandlerFunc(app.infoHandler), "info"), app.bucketRequestTimes))
r.HandleFunc("/info", httputil.TimeHandler(app.validateRequest(http.HandlerFunc(app.infoHandler), "info"), app.bucketRequestTimes))
r.HandleFunc("/lb_check", httputil.TimeHandler(app.lbcheckHandler, app.bucketRequestTimes))
r.HandleFunc("/version", httputil.TimeHandler(app.versionHandler, app.bucketRequestTimes))
r.HandleFunc("/version/", httputil.TimeHandler(app.versionHandler, app.bucketRequestTimes))
r.HandleFunc("/functions", httputil.TimeHandler(app.functionsHandler, app.bucketRequestTimes))
r.HandleFunc("/functions/", httputil.TimeHandler(app.functionsHandler, app.bucketRequestTimes))
r.HandleFunc("/tags/autoComplete/tags", httputil.TimeHandler(app.tagsHandler, app.bucketRequestTimes))
r.HandleFunc("/", httputil.TimeHandler(app.usageHandler, app.bucketRequestTimes))
return r
}
|
package lib
// Report uses for providing a struct for a report of executed task
type Report struct {
reqTarget string
reqCookies string
respBody string
respStatus string
}
// Execute uses for execute a test plan
func Execute(plan Plan, store APIStore) {
for _, task := range plan.Tasks {
target := store[task.TargetAPI]
cookies := plan.PreparedCookies[task.UsedCookies]
body, status := HTTPRequest(target.Method, target.URL, target.Headers, cookies)
report := Report{
reqTarget: task.TargetAPI,
reqCookies: task.UsedCookies,
respBody: body,
respStatus: status,
}
report.Print()
}
}
|
package confs
import (
"fmt"
"io/ioutil"
"time"
"encoding/json"
"net/http"
)
type Conference struct {
Name string
URL string
StartDate string
EndDate string
City string
Country string
CFPUrl string
CFPEndDate string
Twitter string
}
func GetConferences(topic string) ([]Conference, error) {
var conferences []Conference
url := fmt.Sprintf("https://raw.githubusercontent.com/tech-conferences/conference-data/master/conferences/%d/%s.json", time.Now().Year(), topic)
resp, err := http.Get(url)
if err != nil {
return conferences, err
}
if resp.StatusCode != 200 {
return conferences, fmt.Errorf("Got response code %d when calling %s", resp.StatusCode, url)
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&conferences)
if err != nil {
return conferences, err
}
return conferences, nil
}
func LoadState(finename string) []Conference {
state, err := ioutil.ReadFile(finename)
if err != nil {
return []Conference{}
}
var conferences = []Conference{}
json.Unmarshal(state, &conferences)
return FilterConferences(conferences, NewIsInFutureTest())
}
func SaveState(filename string, conferences []Conference) error {
stateString, err := json.Marshal(conferences)
if err != nil {
return err
}
return ioutil.WriteFile(filename, stateString, 0644)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.