text
stringlengths 11
4.05M
|
|---|
package tools
import (
"fmt"
"net/http"
"runtime"
)
var (
//Max_Num = os.Getenv("MAX_NUM")
MaxWorker = runtime.NumCPU()
MaxQueue = 1000
)
type Serload struct {
pri string
}
type Job struct {
serload Serload
}
var JobQueue chan Job
type Worker struct {
WorkerPool chan chan Job
JobChannel chan Job
Quit chan bool
}
func NewWorker(workPool chan chan Job) Worker {
return Worker{
WorkerPool: workPool,
JobChannel: make(chan Job),
Quit: make(chan bool),
}
}
func (w Worker) Start() {
go func() {
for {
w.WorkerPool <- w.JobChannel
select {
case job := <-w.JobChannel:
// excute job
fmt.Println(job.serload.pri)
case <-w.Quit:
return
}
}
}()
}
func (w Worker) Stop() {
go func() {
w.Quit <- true
}()
}
type Dispatcher struct {
MaxWorkers int
WorkerPool chan chan Job
Quit chan bool
}
func NewDispatcher(maxWorkers int) *Dispatcher {
pool := make(chan chan Job, maxWorkers)
return &Dispatcher{MaxWorkers: maxWorkers, WorkerPool: pool, Quit: make(chan bool)}
}
func (d *Dispatcher) Run() {
for i := 0; i < d.MaxWorkers; i++ {
worker := NewWorker(d.WorkerPool)
worker.Start()
}
go d.Dispatch()
}
func (d *Dispatcher) Stop() {
go func() {
d.Quit <- true
}()
}
func (d *Dispatcher) Dispatch() {
for {
select {
case job := <-JobQueue:
go func(job Job) {
jobChannel := <-d.WorkerPool
jobChannel <- job
}(job)
case <-d.Quit:
return
}
}
}
func entry(res http.ResponseWriter, req *http.Request) {
// fetch job
work := Job{serload: Serload{pri: "Just do it"}}
JobQueue <- work
memStat := new(runtime.MemStats)
runtime.ReadMemStats(memStat)
fmt.Fprintf(res, "Hello World ...again",memStat)
}
func init() {
runtime.GOMAXPROCS(MaxWorker)
JobQueue = make(chan Job, MaxQueue)
dispatcher := NewDispatcher(MaxWorker)
dispatcher.Run()
}
func main() {
http.HandleFunc("/", entry)
var err error
err = http.ListenAndServe(":8086", nil)
if err != nil {
fmt.Println("Server failure /// ", err)
}
fmt.Println("quit")
}
|
package models_test
import (
"encoding/json"
"github.com/APTrust/exchange/models"
"github.com/stretchr/testify/assert"
"io/ioutil"
"path/filepath"
"testing"
"time"
)
func TestDeleteAttemptedAndSucceeded(t *testing.T) {
// TODO: Need a more reliable way to get path to test data file
filepath := filepath.Join("..", "testdata", "json_objects", "cleanup_result.json")
var result models.CleanupResult
file, err := ioutil.ReadFile(filepath)
if err != nil {
t.Errorf("Error loading cleanup result test file '%s': %v", filepath, err)
return
}
err = json.Unmarshal(file, &result)
if err != nil {
t.Errorf("Error parson JSON from cleanup result test file '%s': %v", filepath, err)
return
}
assert.True(t, result.Succeeded())
for _, file := range result.Files {
if file.DeleteAttempted() == false {
assert.True(t, file.DeleteAttempted())
}
// Set these for next test
file.DeletedAt = time.Time{}
file.ErrorMessage = "Spongebob"
}
assert.False(t, result.Succeeded())
for _, file := range result.Files {
assert.True(t, file.DeleteAttempted())
// Set these for next test
file.DeletedAt = time.Time{}
file.ErrorMessage = ""
}
assert.False(t, result.Succeeded())
for _, file := range result.Files {
if file.DeleteAttempted() == true {
// Delete not attempted, because DeletedAt == 0
assert.False(t, file.DeleteAttempted())
}
}
}
|
package annotations_test
import (
"github.com/haproxytech/kubernetes-ingress/controller/annotations"
"github.com/haproxytech/kubernetes-ingress/controller/store"
)
func (suite *AnnotationSuite) TestGlobalCfgSnippetUpdate() {
tests := []struct {
input store.StringW
expected string
}{
{store.StringW{Value: "ssl-default-bind-ciphers EECDH+AESGCM:EECDH+CHACHA20"},
"###_config-snippet_### BEGIN\n ssl-default-bind-ciphers EECDH+AESGCM:EECDH+CHACHA20\n ###_config-snippet_### END"},
{store.StringW{Value: `tune.ssl.default-dh-param 2048
tune.bufsize 32768`,
},
"###_config-snippet_### BEGIN\n tune.ssl.default-dh-param 2048\n tune.bufsize 32768\n ###_config-snippet_### END"},
}
for _, test := range tests {
suite.T().Log(test.input)
a := annotations.NewGlobalCfgSnippet("", suite.client)
if suite.NoError(a.Parse(test.input, true)) {
suite.NoError(a.Update())
result, _ := suite.client.GlobalWriteConfig("global", "config-snippet")
suite.Equal(test.expected, result)
}
}
}
func (suite *AnnotationSuite) TestGlobalCfgSnippetFail() {
test := store.StringW{Value: " "}
a := annotations.NewGlobalCfgSnippet("", suite.client)
err := a.Parse(test, true)
suite.T().Log(err)
suite.Error(err)
}
|
package hybrid
import (
"fmt"
"strings"
"time"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/retry"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kumahq/kuma/pkg/config/core"
. "github.com/kumahq/kuma/test/framework"
)
func ApplicationHealthCheckOnKubernetesUniversal() {
meshMTLSOn := func(mesh string) string {
return fmt.Sprintf(`
apiVersion: kuma.io/v1alpha1
kind: Mesh
metadata:
name: %s
spec:
mtls:
enabledBackend: ca-1
backends:
- name: ca-1
type: builtin
`, mesh)
}
namespaceWithSidecarInjection := func(namespace string) string {
return fmt.Sprintf(`
apiVersion: v1
kind: Namespace
metadata:
name: %s
annotations:
kuma.io/sidecar-injection: "enabled"
`, namespace)
}
var globalK8s, zoneK8s, zoneUniversal Cluster
var optsGlobalK8s, optsZoneK8s, optsZoneUniversal = KumaK8sDeployOpts, KumaZoneK8sDeployOpts, KumaUniversalDeployOpts
BeforeEach(func() {
k8sClusters, err := NewK8sClusters([]string{Kuma1, Kuma2}, Silent)
Expect(err).ToNot(HaveOccurred())
universalClusters, err := NewUniversalClusters([]string{Kuma3}, Silent)
Expect(err).ToNot(HaveOccurred())
globalK8s = k8sClusters.GetCluster(Kuma1)
err = NewClusterSetup().
Install(Kuma(core.Global, optsGlobalK8s...)).
Install(YamlK8s(meshMTLSOn("default"))).
Setup(globalK8s)
Expect(err).ToNot(HaveOccurred())
optsZoneK8s = append(optsZoneK8s,
WithIngress(),
WithGlobalAddress(globalK8s.GetKuma().GetKDSServerAddress()))
zoneK8s = k8sClusters.GetCluster(Kuma2)
err = NewClusterSetup().
Install(Kuma(core.Zone, optsZoneK8s...)).
Install(YamlK8s(namespaceWithSidecarInjection(TestNamespace))).
Install(DemoClientK8s("default")).
Setup(zoneK8s)
Expect(err).ToNot(HaveOccurred())
testServerToken, err := globalK8s.GetKuma().GenerateDpToken("default", "test-server")
Expect(err).ToNot(HaveOccurred())
optsZoneUniversal = append(optsZoneUniversal,
WithGlobalAddress(globalK8s.GetKuma().GetKDSServerAddress()))
zoneUniversal = universalClusters.GetCluster(Kuma3)
ingressTokenKuma3, err := globalK8s.GetKuma().GenerateZoneIngressToken(Kuma3)
Expect(err).ToNot(HaveOccurred())
err = NewClusterSetup().
Install(Kuma(core.Zone, optsZoneUniversal...)).
Install(TestServerUniversal("test-server-1", "default", testServerToken,
WithArgs([]string{"echo", "--instance", "dp-universal-1"}),
WithProtocol("tcp"))).
Install(TestServerUniversal("test-server-2", "default", testServerToken,
WithArgs([]string{"echo", "--instance", "dp-universal-2"}),
WithProtocol("tcp"),
ProxyOnly(),
ServiceProbe())).
Install(TestServerUniversal("test-server-3", "default", testServerToken,
WithArgs([]string{"echo", "--instance", "dp-universal-3"}),
WithProtocol("tcp"))).
Install(IngressUniversal(ingressTokenKuma3)).
Setup(zoneUniversal)
Expect(err).ToNot(HaveOccurred())
err = zoneUniversal.VerifyKuma()
Expect(err).ToNot(HaveOccurred())
})
AfterEach(func() {
if ShouldSkipCleanup() {
return
}
Expect(zoneK8s.DeleteNamespace(TestNamespace)).To(Succeed())
err := zoneK8s.DeleteKuma(optsZoneK8s...)
Expect(err).ToNot(HaveOccurred())
err = zoneK8s.DismissCluster()
Expect(err).ToNot(HaveOccurred())
err = zoneUniversal.DeleteKuma(optsZoneUniversal...)
Expect(err).ToNot(HaveOccurred())
err = zoneUniversal.DismissCluster()
Expect(err).ToNot(HaveOccurred())
err = globalK8s.DeleteKuma()
Expect(err).ToNot(HaveOccurred())
err = globalK8s.DismissCluster()
Expect(err).ToNot(HaveOccurred())
})
It("should not load balance requests to unhealthy instance", func() {
pods, err := k8s.ListPodsE(zoneK8s.GetTesting(), zoneK8s.GetKubectlOptions(TestNamespace), metav1.ListOptions{
LabelSelector: fmt.Sprintf("app=%s", "demo-client"),
})
Expect(err).ToNot(HaveOccurred())
Expect(pods).To(HaveLen(1))
cmd := []string{"curl", "-v", "-m", "3", "--fail", "test-server.mesh"}
instances := []string{"dp-universal-1", "dp-universal-3"}
instanceSet := map[string]bool{}
_, err = retry.DoWithRetryE(zoneK8s.GetTesting(), fmt.Sprintf("kubectl exec %s -- %s", pods[0].GetName(), strings.Join(cmd, " ")),
100, 500*time.Millisecond, func() (string, error) {
stdout, _, err := zoneK8s.Exec(TestNamespace, pods[0].GetName(), "demo-client", cmd...)
if err != nil {
return "", err
}
for _, instance := range instances {
if strings.Contains(stdout, instance) {
instanceSet[instance] = true
}
}
if len(instanceSet) != len(instances) {
return "", errors.Errorf("checked %d/%d instances", len(instanceSet), len(instances))
}
return "", nil
},
)
Expect(err).ToNot(HaveOccurred())
var counter1, counter2, counter3 int
const numOfRequest = 100
for i := 0; i < numOfRequest; i++ {
var stdout string
stdout, _, err = zoneK8s.Exec(TestNamespace, pods[0].GetName(), "demo-client", cmd...)
Expect(err).ToNot(HaveOccurred())
switch {
case strings.Contains(stdout, "dp-universal-1"):
counter1++
case strings.Contains(stdout, "dp-universal-2"):
counter2++
case strings.Contains(stdout, "dp-universal-3"):
counter3++
}
}
Expect(counter2).To(Equal(0))
Expect(counter1 > 0).To(BeTrue())
Expect(counter3 > 0).To(BeTrue())
Expect(counter1 + counter3).To(Equal(numOfRequest))
})
}
|
package main
import (
"fmt"
"math"
"math/big"
"reflect"
)
func main() {
fmt.Println(1, 2, 1000)
fmt.Println("Literal inteiro", reflect.TypeOf(32000))
// Apenas números positivos (conjunto dos Naturais)
var a uint8 = 5
var b uint16 = 20
var c uint32 = 500
var d uint64 = 35441125
var a2 byte = 8
fmt.Println("Literal byte", reflect.TypeOf(a2))
fmt.Println(a)
fmt.Println(b)
fmt.Println(c)
fmt.Println(d)
fmt.Println("")
fmt.Println("Números máximos de cada tipo de int")
fmt.Println("------------------------------------")
fmt.Println("int8", math.MaxInt8)
fmt.Println("int16", math.MaxInt16)
fmt.Println("int32", math.MaxInt32)
fmt.Println("int64", math.MaxInt64)
fmt.Println("")
fmt.Println("Números máximos de cada tipo de uint")
fmt.Println("------------------------------------")
fmt.Println("uint8", math.MaxUint8)
fmt.Println("uint16", math.MaxUint16)
fmt.Println("uint32", math.MaxUint32)
// fmt.Println("uint64", math.MaxUint64)
// Representa um inteiro na tabela unicode
var e rune = 'a'
var f rune = 'b'
var g rune = 'c'
fmt.Println("Literal rune é", reflect.TypeOf(e))
fmt.Println("unicode de a:", e)
fmt.Println("unicode de b:", f)
fmt.Println("unicode de c:", g)
x := big.NewInt(10)
// y := big.NewInt(5)
fmt.Println(x.Add(big.NewInt(10), big.NewInt(5)))
fmt.Println(x.Mul(big.NewInt(10), big.NewInt(5)))
fmt.Println(x.Sub(big.NewInt(9), big.NewInt(5)))
fmt.Println(x.Div(big.NewInt(6), big.NewInt(2)))
}
|
package day1
import (
"testing"
"github.com/achakravarty/30-days-of-go/assert"
)
type testCase struct {
value1 interface{}
value2 interface{}
expected interface{}
}
var numbers = []testCase{
testCase{value1: 1, value2: 2, expected: 3}}
var doubles = []testCase{
testCase{value1: float32(1.0), value2: float32(4.0), expected: float32(5.0)}}
var strings = []testCase{
testCase{value1: "World", value2: "Hello ", expected: "Hello World"},
testCase{value1: "is awesome", value2: "This ", expected: "This is awesome"},
}
func TestDataTypes(t *testing.T) {
t.Run("Add Integers", testAddIntegers)
t.Run("Add Doubles", testAddDoubles)
t.Run("Add Strings", testAddStrings)
}
func testAddIntegers(t *testing.T) {
for _, testInput := range numbers {
sum := AddIntegers(testInput.value1.(int), testInput.value2.(int))
assert.Equal(t, testInput.expected.(int), sum)
}
}
func testAddDoubles(t *testing.T) {
for _, testInput := range doubles {
sum := AddDoubles(testInput.value1.(float32), testInput.value2.(float32))
assert.Equal(t, testInput.expected.(float32), sum)
}
}
func testAddStrings(t *testing.T) {
for _, testInput := range strings {
sum := AddStrings(testInput.value1.(string), testInput.value2.(string))
assert.Equal(t, testInput.expected.(string), sum)
}
}
|
package bootstrap
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/openshift/installer/pkg/types"
)
func TestMergedMirrorSets(t *testing.T) {
tests := []struct {
name string
input []types.ImageDigestSource
expected []types.ImageDigestSource
}{{
input: []types.ImageDigestSource{{
Source: "a",
}, {
Source: "b",
}},
expected: []types.ImageDigestSource{{
Source: "a",
}, {
Source: "b",
}},
}, {
input: []types.ImageDigestSource{{
Source: "a",
}, {
Source: "a",
}},
expected: []types.ImageDigestSource{{
Source: "a",
}},
}, {
input: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb", "mb"},
}, {
Source: "a",
Mirrors: []string{"mc", "mc", "md"},
}},
expected: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb", "mc", "md"},
}},
}, {
input: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb"},
}, {
Source: "b",
Mirrors: []string{"mc", "md"},
}},
expected: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb"},
}, {
Source: "b",
Mirrors: []string{"mc", "md"},
}},
}, {
input: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb"},
}, {
Source: "a",
Mirrors: []string{"ma", "md"},
}},
expected: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb", "md"},
}},
}, {
input: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb"},
}, {
Source: "a",
Mirrors: []string{"md", "ma"},
}},
expected: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb", "md"},
}},
}, {
input: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb"},
}, {
Source: "a",
Mirrors: []string{"md", "ma"},
}, {
Source: "a",
Mirrors: []string{"me", "mb"},
}},
expected: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb", "md", "me"},
}},
}, {
input: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma"},
}, {
Source: "b",
Mirrors: []string{"md", "mc"},
}, {
Source: "a",
Mirrors: []string{"mb", "ma"},
}},
expected: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb"},
}, {
Source: "b",
Mirrors: []string{"md", "mc"},
}},
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
assert.Equal(t, test.expected, MergedMirrorSets(test.input))
})
}
}
func TestContentSourceToDigestMirror(t *testing.T) {
tests := []struct {
name string
input []types.ImageContentSource
expected []types.ImageDigestSource
}{{
input: []types.ImageContentSource{{
Source: "a",
Mirrors: []string{"ma", "mb", "mb"},
}, {
Source: "a",
Mirrors: []string{"mc", "mc", "md"},
}},
expected: []types.ImageDigestSource{{
Source: "a",
Mirrors: []string{"ma", "mb", "mb"},
}, {
Source: "a",
Mirrors: []string{"mc", "mc", "md"},
}},
}}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
assert.Equal(t, test.expected, ContentSourceToDigestMirror(test.input))
})
}
}
|
package app
import (
"encoding/json"
"io/ioutil"
"log"
"net/http"
"time"
"github.com/cloudrimmers/imt2681-assignment3/lib/database"
"github.com/cloudrimmers/imt2681-assignment3/lib/types"
)
// App ...
type App struct {
FixerioURI string
CollectionFixerName string
Mongo database.Mongo
Seedpath string
}
// Fixer2Mongo ...
func (app *App) Fixer2Mongo() {
// 1. Connect and request to fixer.io
resp, err := http.Get(app.FixerioURI)
if err != nil {
log.Println("ERROR No connection with fixer.io: "+app.FixerioURI+" ...", err.Error())
return
}
// 2. Decode payload
payload := &(types.FixerIn{})
err = json.NewDecoder(resp.Body).Decode(payload)
if err != nil {
log.Println("ERROR Could not decode resp.Body...", err.Error())
return
}
// @TODO 3 Validate incomming data
// 4. Connect to DB
collectionFixer, err := app.Mongo.OpenC(app.CollectionFixerName)
defer app.Mongo.Close()
if err != nil {
log.Println("ERROR Database no connection: ", err.Error())
return
}
// 5. Timestamp
payload.Timestamp = time.Now().String()
// 6. Dump payload to database
err = collectionFixer.Insert(payload)
if err != nil {
log.Println("ERROR on db.Insert():\n", err.Error())
return
}
log.Println("SUCCESS pulling fixer.io: ", payload)
}
// SeedFixerdata ...
func (app *App) SeedFixerdata() {
// 0. Get seed
seed := func() []types.FixerIn {
log.Println("Reading " + app.Seedpath)
data, err := ioutil.ReadFile(app.Seedpath)
if err != nil {
panic(err.Error())
}
var fixerin []types.FixerIn
if err = json.Unmarshal(data, &fixerin); err != nil {
panic(err.Error())
}
return fixerin
}()
// 1. Open collection
collectionFixer, err := app.Mongo.OpenC(app.CollectionFixerName)
if err != nil {
log.Println(err.Error())
return
}
defer app.Mongo.Close()
// 2. Insert to database
// cfixer.DropCollection()
for _, o := range seed {
if err = collectionFixer.Insert(o); err != nil {
log.Println("Unable to db.Insert seed")
}
}
}
|
package fileutil
import (
"os"
"time"
)
// FileSummary includes the intersection of the set of
// file attributes available from os.FileInfo and tar.Header.
type FileSummary struct {
RelPath string
AbsPath string
Mode os.FileMode
Size int64
ModTime time.Time
IsDir bool
IsRegularFile bool
Uid int
Gid int
}
|
package main
import "fmt"
// 小孩结构体
type Boy struct {
No int // 编号
Next *Boy // 指向下一个小孩的指针
}
// 编写一个函数,构成单项环形链表
// num 表示小孩子的个数
// *boy 返回该环形链表的第一个小孩指针
func AddBoy(num int) *Boy {
first := &Boy{} // 空节点
curBoy := &Boy{} // 空节点
// 判断
if num < 1 {
fmt.Println("num的值不对")
return first
}
// 循环的构建这个环形的链表
for i := 1; i <= num; i++ {
boy := &Boy{
No: i,
}
// 分析构成循环链表,需要一个辅助指针【帮忙的】
// 因为第一个小孩比较特殊
if i == 1 { // 第一个小孩
first = boy
curBoy = boy
curBoy.Next = first //
} else {
curBoy.Next = boy
curBoy = boy
curBoy.Next = first // 构成环形链表
}
}
return first
}
// 显示单项的环形链比【遍历】
func ShowBoy(first *Boy) {
// 处理环形脸变为空时
if first.Next == nil {
fmt.Println("链表为空,没有小孩")
return
}
// 创建一个指针帮助遍历,[至少有一个小孩]
curBoy := first
for {
fmt.Printf("小孩子编号=%d->", curBoy.No)
// 退出条件?
if curBoy.Next == first {
break
}
// curBoy 移动到下一个
curBoy = curBoy.Next
}
}
/*
设编号为1,2,.... n的n个人围坐一圈,约定好编号为k,(1<=k<=n)
的人从1开始宝树,数到m的那个人出列,它的下一位又从1开始报数,
数到m的那个人又出列,以此类推,知道所有的人出列位置,由此产生一个出队列
*/
// 分析思路
// 编写一个函数,PlayGame(first *Boy, startNo int, CountNum int)
// 最后我们使用一个算法,按照要求,在环形列表中留下最后一个人
func PlayGame(first *Boy, startNo int, countNum int) {
// 1. 空链表我们单独处理
if first.Next == nil {
fmt.Println("空的链表,没有小孩")
return
}
// 留一个,判断start_nun <= 小孩总数
// 2. 需要定义辅助指针,用来帮我们删除小孩
tail := first
// 3. 让tail指向环形链表的最后一个小孩,这个非常重要
// 因为tail在删除小孩的时候要用到
for {
if tail.Next == first { // 说明tail到了最后的小孩
break
}
tail = tail.Next
}
// 4. 让first 移动到startNo [后面我们删除小孩,就要以first为准]
for i := 1; i <= startNo -1; i++ {
first = first.Next
tail = tail.Next
}
// 5. 开始数countNum, 然后删除first 指向小孩
for {
// 开始数countNum-1次
for i := 1; i <= countNum -1; i++ {
first = first.Next
tail = tail.Next
}
fmt.Printf("小孩编号为%d 出拳 \n", first.No)
// 删除first指向的小孩
first = first.Next
tail.Next = first
// 判断如果tail == first, 圈中只有一个小孩
if tail == first {
break
}
}
fmt.Printf("小孩编号为%d 出拳 ", first.No)
}
func main() {
first := AddBoy(5)
// 显示
//ShowBoy(first)
PlayGame(first, 2, 3)
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodelabel
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
)
func TestNodeLabelFilter(t *testing.T) {
label := map[string]string{"foo": "any value", "bar": "any value"}
var pod *v1.Pod
tests := []struct {
name string
args config.NodeLabelArgs
res framework.Code
}{
{
name: "present label does not match",
args: config.NodeLabelArgs{
PresentLabels: []string{"baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "absent label does not match",
args: config.NodeLabelArgs{
AbsentLabels: []string{"baz"},
},
res: framework.Success,
},
{
name: "one of two present labels matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo", "baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "one of two absent labels matches",
args: config.NodeLabelArgs{
AbsentLabels: []string{"foo", "baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "all present labels match",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo", "bar"},
},
res: framework.Success,
},
{
name: "all absent labels match",
args: config.NodeLabelArgs{
AbsentLabels: []string{"foo", "bar"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "both present and absent label matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo"},
AbsentLabels: []string{"bar"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "neither present nor absent label matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foz"},
AbsentLabels: []string{"baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "present label matches and absent label doesn't match",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo"},
AbsentLabels: []string{"baz"},
},
res: framework.Success,
},
{
name: "present label doesn't match and absent label matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foz"},
AbsentLabels: []string{"bar"},
},
res: framework.UnschedulableAndUnresolvable,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}}
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(&node)
p, err := New(&test.args, nil)
if err != nil {
t.Fatalf("Failed to create plugin: %v", err)
}
status := p.(framework.FilterPlugin).Filter(context.TODO(), nil, pod, nodeInfo)
if status.Code() != test.res {
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), test.res)
}
})
}
}
func TestNodeLabelScore(t *testing.T) {
tests := []struct {
args config.NodeLabelArgs
want int64
name string
}{
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo"},
},
name: "one present label match",
},
{
want: 0,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"somelabel"},
},
name: "one present label mismatch",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "bar"},
},
name: "two present labels match",
},
{
want: 0,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two present labels mismatch",
},
{
want: framework.MaxNodeScore / 2,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "somelabel"},
},
name: "two present labels only one matches",
},
{
want: 0,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"foo"},
},
name: "one absent label match",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"somelabel"},
},
name: "one absent label mismatch",
},
{
want: 0,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"foo", "bar"},
},
name: "two absent labels match",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two absent labels mismatch",
},
{
want: framework.MaxNodeScore / 2,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"foo", "somelabel"},
},
name: "two absent labels only one matches",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "bar"},
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two present labels match, two absent labels mismatch",
},
{
want: 0,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"somelabel1", "somelabel2"},
AbsentLabelsPreference: []string{"foo", "bar"},
},
name: "two present labels both mismatch, two absent labels both match",
},
{
want: 3 * framework.MaxNodeScore / 4,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "somelabel"},
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two present labels one matches, two absent labels mismatch",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState()
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}}
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, []*v1.Node{node})))
p, err := New(&test.args, fh)
if err != nil {
t.Fatalf("Failed to create plugin: %+v", err)
}
nodeName := node.ObjectMeta.Name
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, nil, nodeName)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
if test.want != score {
t.Errorf("Wrong score. got %#v, want %#v", score, test.want)
}
})
}
}
func TestNodeLabelFilterWithoutNode(t *testing.T) {
var pod *v1.Pod
t.Run("node does not exist", func(t *testing.T) {
nodeInfo := framework.NewNodeInfo()
p, err := New(&config.NodeLabelArgs{}, nil)
if err != nil {
t.Fatalf("Failed to create plugin: %v", err)
}
status := p.(framework.FilterPlugin).Filter(context.TODO(), nil, pod, nodeInfo)
if status.Code() != framework.Error {
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), framework.Error)
}
})
}
func TestNodeLabelScoreWithoutNode(t *testing.T) {
t.Run("node does not exist", func(t *testing.T) {
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()))
p, err := New(&config.NodeLabelArgs{}, fh)
if err != nil {
t.Fatalf("Failed to create plugin: %+v", err)
}
_, status := p.(framework.ScorePlugin).Score(context.Background(), nil, nil, "")
if status.Code() != framework.Error {
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), framework.Error)
}
})
}
|
package dao
import (
"webapp/entities"
)
// DAO interface for Student
type StudentDAO interface {
FindAll() []entities.Student
Find(id int) *entities.Student
Exists(id int) bool
Delete(id int) bool
Create(student entities.Student) bool
Update(student entities.Student) bool
}
|
package mqtt
import (
"os/exec"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gstruct"
)
var _ = BeforeSuite(func() {
cmd := exec.Command("sh", "-c", "docker run --detach --rm --name mosquitto --publish 1883:1883 eclipse-mosquitto")
err := cmd.Run()
Ω(err).NotTo(HaveOccurred(), "mosquitto cannot start")
})
var _ = AfterSuite(func() {
cmd := exec.Command("sh", "-c", "docker stop mosquitto")
err := cmd.Run()
Ω(err).NotTo(HaveOccurred(), "mosquitto cannot stop")
})
var _ = Describe("mqtt", func() {
brok := "tcp://localhost:1883"
topi := "myTopic"
FDescribe("subscribe and unsubscribe", func() {
It("one topic", func() {
By("subscribe")
err := SubBrokerTopic(brok, topi, nil)
Ω(err).ToNot(HaveOccurred(), "cannot subscribe")
Ω(_Global.mapConn).To(HaveLen(1))
_broker := _Global.mapConn[brok]
Ω(_broker.client).ToNot(BeZero())
Ω(_broker.chMsg).ToNot(BeZero())
Ω(_broker.chQuit).ToNot(BeZero())
Ω(_broker.mapTopic).To(And(
Not(BeZero()),
HaveLen(1),
MatchAllKeys(Keys{
topi: Not(BeZero()),
})))
By("unsubscribe")
err = UnSubBrokerTopic(brok, topi)
Ω(err).ToNot(HaveOccurred(), "cannot unsubscribe")
Ω(_broker.chQuit).To(BeClosed())
Eventually(func() int {
return len(_Global.mapConn)
}).Should(Equal(0))
})
})
Describe("resubscribe", func() {
})
Describe("publish and receive", func() {
var chMsg chan string
var _broker *broker
BeforeEach(func() {
chMsg = make(chan string)
err := SubBrokerTopic(brok, topi, func(topic, message string) {
chMsg <- message
})
Ω(err).ToNot(HaveOccurred(), "cannot subscribe")
_broker = _Global.mapConn[brok]
Ω(_broker).ToNot(BeNil())
})
AfterEach(func() {
err := UnSubBrokerTopic(brok, topi)
Ω(err).ToNot(HaveOccurred(), "cannot unsubscribe")
close(chMsg)
})
It("should publish and receive message success", func() {
By("publish")
token := _broker.client.Publish(topi, byte(2), false, "hello")
token.Wait()
Ω(token.Error()).ToNot(HaveOccurred(), "cannot publish")
By("receive")
Ω(<-chMsg).To(Equal("hello"))
})
})
Describe("mixture", func() {
})
})
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"os"
"strings"
)
type Question struct {
Subject string
Text string
}
func main() {
questions := []Question{
Question{
Subject: "name",
Text: "Please enter your first name...",
},
Question{
Subject: "address",
Text: "Please enter your address...",
},
}
//q1 := questions[0]
//fmt.Println(q1, q1.Subject, q1.Text)
// get the user details
user := make(map[string]string)
reader := bufio.NewReader(os.Stdin)
for _, question := range questions {
fmt.Println(question.Text)
input, err := reader.ReadString('\n')
if err != nil {
panic(err)
}
input = strings.TrimRight(input, "\r\n")
user[question.Subject] = input
}
//fmt.Println(user)
// crate a json object from the map
byteArr, err := json.Marshal(user)
if err != nil {
panic(err)
}
//fmt.Println(byteArr) // [123 34 97 ... a byte array
// now print this back to
fmt.Println(string(byteArr))
}
|
package evs
import (
"github.com/apache/pulsar-client-go/pulsar"
pb "github.com/cybermaggedon/evs-golang-api/protos"
"github.com/golang/protobuf/proto"
)
// Wraps Pulsar communication and cyberprobe event encoding
type EventProducer struct {
*Producer
}
// Initialise the analyitc
func NewEventProducer(c HasOutputTopics) (*EventProducer, error) {
p, err := NewProducer(c)
if err != nil {
return nil, err
}
ep := &EventProducer{
Producer: p,
}
return ep, nil
}
// Output an event by iterating over all outputs
func (a *EventProducer) Output(ev *pb.Event, properties map[string]string) error {
// Marshal event to protobuf
b, err := proto.Marshal(ev)
if err != nil {
return err
}
// Create a ProducerMessage
msg := pulsar.ProducerMessage{
Payload: b,
Properties: properties,
Key: ev.Id,
}
// Delegate to Analytic.Output to output
a.Producer.Output(&msg)
return nil
}
|
package sqlite
import (
"database/sql"
"time"
"github.com/Tanibox/tania-core/src/assets/domain"
"github.com/Tanibox/tania-core/src/assets/repository"
"github.com/Tanibox/tania-core/src/assets/storage"
)
type MaterialReadRepositorySqlite struct {
DB *sql.DB
}
func NewMaterialReadRepositorySqlite(db *sql.DB) repository.MaterialReadRepository {
return &MaterialReadRepositorySqlite{DB: db}
}
func (f *MaterialReadRepositorySqlite) Save(materialRead *storage.MaterialRead) <-chan error {
result := make(chan error)
go func() {
count := 0
err := f.DB.QueryRow(`SELECT COUNT(*) FROM MATERIAL_READ WHERE UID = ?`, materialRead.UID).Scan(&count)
if err != nil {
result <- err
}
var typeData string
switch t := materialRead.Type.(type) {
case domain.MaterialTypeSeed:
typeData = t.PlantType.Code
case domain.MaterialTypePlant:
typeData = t.PlantType.Code
case domain.MaterialTypeAgrochemical:
typeData = t.ChemicalType.Code
case domain.MaterialTypeSeedingContainer:
typeData = t.ContainerType.Code
}
expirationDate := ""
if materialRead.ExpirationDate != nil {
expirationDate = materialRead.ExpirationDate.Format(time.RFC3339)
}
if count > 0 {
_, err = f.DB.Exec(`UPDATE MATERIAL_READ SET
NAME = ?, PRICE_PER_UNIT = ?, CURRENCY_CODE = ?, TYPE = ?, TYPE_DATA = ?,
QUANTITY = ?, QUANTITY_UNIT = ?, EXPIRATION_DATE = ?, NOTES = ?,
PRODUCED_BY = ?, CREATED_DATE = ?
WHERE UID = ?`,
materialRead.Name,
materialRead.PricePerUnit.Amount,
materialRead.PricePerUnit.CurrencyCode,
materialRead.Type.Code(),
typeData,
materialRead.Quantity.Value,
materialRead.Quantity.Unit.Code,
expirationDate,
materialRead.Notes,
materialRead.ProducedBy,
materialRead.CreatedDate.Format(time.RFC3339),
materialRead.UID)
if err != nil {
result <- err
}
} else {
_, err = f.DB.Exec(`INSERT INTO MATERIAL_READ
(UID, NAME, PRICE_PER_UNIT, CURRENCY_CODE, TYPE, TYPE_DATA, QUANTITY,
QUANTITY_UNIT, EXPIRATION_DATE, NOTES, PRODUCED_BY, CREATED_DATE)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
materialRead.UID,
materialRead.Name,
materialRead.PricePerUnit.Amount,
materialRead.PricePerUnit.CurrencyCode,
materialRead.Type.Code(),
typeData,
materialRead.Quantity.Value,
materialRead.Quantity.Unit.Code,
expirationDate,
materialRead.Notes,
materialRead.ProducedBy,
materialRead.CreatedDate.Format(time.RFC3339))
if err != nil {
result <- err
}
}
result <- nil
close(result)
}()
return result
}
|
package pg
import (
"context"
"database/sql"
"time"
"github.com/tnclong/go-que"
)
type job struct {
db *sql.DB
tx *sql.Tx
id int64
queue string
args []byte
runAt time.Time
retryCount int
lastErrMsg sql.NullString
lastErrStack sql.NullString
}
func (j *job) ID() int64 {
return j.id
}
func (j *job) Queue() string {
return j.queue
}
func (j *job) Args() []byte {
return j.args
}
func (j *job) RunAt() time.Time {
return j.runAt
}
func (j *job) RetryCount() int {
return j.retryCount
}
func (j *job) LastErrMsg() string {
return j.lastErrMsg.String
}
func (j *job) LastErrStack() string {
return j.lastErrStack.String
}
func (j *job) In(tx *sql.Tx) {
j.tx = tx
}
const doneJob = `UPDATE goque_jobs
SET done_at = now()
WHERE id = $1::bigint`
func (j *job) Done(ctx context.Context) error {
_, err := j.exec(j.tx)(ctx, doneJob, j.id)
return err
}
const destroyJob = `DELETE
FROM goque_jobs
WHERE id = $1::bigint`
func (j *job) Destroy(ctx context.Context) error {
_, err := j.exec(j.tx)(ctx, destroyJob, j.id)
return err
}
const expireJob = `UPDATE goque_jobs
SET retry_count = retry_count + 1,
expired_at = now()
WHERE id = $1::bigint`
func (j *job) Expire(ctx context.Context) error {
_, err := j.exec(j.tx)(ctx, expireJob, j.id)
return err
}
const retryJob = `UPDATE goque_jobs
SET retry_count = retry_count + 1,
run_at = now() + $1::float * '1 second'::interval,
last_err_msg = left($2::text, 512),
last_err_stack = left($3::text, 8192)
WHERE id = $4::bigint`
func (j *job) RetryIn(ctx context.Context, interval time.Duration, cerr error) error {
args := make([]interface{}, 4)
args[0] = interval.Seconds()
if cerr != nil {
args[1] = cerr.Error()
args[2] = que.Stack(4)
}
args[3] = j.id
_, err := j.exec(j.tx)(ctx, retryJob, args...)
return err
}
func (j *job) exec(tx *sql.Tx) func(context.Context, string, ...interface{}) (sql.Result, error) {
if tx != nil {
return tx.ExecContext
}
return j.db.ExecContext
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//39. Combination Sum
//Given a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target.
//The same repeated number may be chosen from candidates unlimited number of times.
//Note:
//All numbers (including target) will be positive integers.
//The solution set must not contain duplicate combinations.
//Example 1:
//Input: candidates = [2,3,6,7], target = 7,
//A solution set is:
//[
// [7],
// [2,2,3]
//]
//Example 2:
//Input: candidates = [2,3,5], target = 8,
//A solution set is:
//[
// [2,2,2,2],
// [2,3,3],
// [3,5]
//]
//func combinationSum(candidates []int, target int) [][]int {
//}
// Time Is Money
|
package secret
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"gopkg.in/yaml.v2"
uuid "github.com/satori/go.uuid"
"golang.org/x/crypto/ssh/terminal"
"github.com/werf/logboek"
"github.com/werf/logboek/pkg/style"
"github.com/werf/werf/pkg/deploy/secrets_manager"
"github.com/werf/werf/pkg/secret"
"github.com/werf/werf/pkg/util"
"github.com/werf/werf/pkg/werf"
)
func SecretEdit(ctx context.Context, m *secrets_manager.SecretsManager, workingDir, filePath string, values bool) error {
var encoder *secret.YamlEncoder
if enc, err := m.GetYamlEncoder(ctx, workingDir); err != nil {
return err
} else {
encoder = enc
}
data, encodedData, err := readEditedFile(filePath, values, encoder)
if err != nil {
return err
}
tmpFilePath := filepath.Join(werf.GetTmpDir(), fmt.Sprintf("werf-edit-secret-%s.yaml", uuid.NewV4().String()))
defer os.RemoveAll(tmpFilePath)
if err := createTmpEditedFile(tmpFilePath, data); err != nil {
return err
}
bin, binArgs, err := editor()
if err != nil {
return err
}
args := append(binArgs, tmpFilePath)
editIteration := func() error {
cmd := exec.Command(bin, args...)
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return err
}
newData, err := ioutil.ReadFile(tmpFilePath)
if err != nil {
return err
}
var newEncodedData []byte
if values {
newEncodedData, err = encoder.EncryptYamlData(newData)
if err != nil {
return err
}
} else {
newEncodedData, err = encoder.Encrypt(newData)
if err != nil {
return err
}
newEncodedData = append(newEncodedData, []byte("\n")...)
}
if !bytes.Equal(data, newData) {
if values {
newEncodedData, err = prepareResultValuesData(data, encodedData, newData, newEncodedData)
if err != nil {
return err
}
}
if err := SaveGeneratedData(filePath, newEncodedData); err != nil {
return err
}
}
return nil
}
for {
err := editIteration()
if err != nil {
if strings.HasPrefix(err.Error(), "encryption failed") {
logboek.Warn().LogF("Error: %s\n", err)
ok, err := askForConfirmation()
if err != nil {
return err
}
if ok {
continue
}
}
return err
}
break
}
return nil
}
func readEditedFile(filePath string, values bool, encoder *secret.YamlEncoder) ([]byte, []byte, error) {
var data, encodedData []byte
exist, err := util.FileExists(filePath)
if err != nil {
return nil, nil, err
}
if exist {
encodedData, err = ioutil.ReadFile(filePath)
if err != nil {
return nil, nil, err
}
encodedData = bytes.TrimSpace(encodedData)
if values {
data, err = encoder.DecryptYamlData(encodedData)
if err != nil {
return nil, nil, err
}
} else {
data, err = encoder.Decrypt(encodedData)
if err != nil {
return nil, nil, err
}
}
}
return data, encodedData, nil
}
func askForConfirmation() (bool, error) {
r := os.Stdin
fmt.Println(logboek.Colorize(style.Highlight(), "Do you want to continue editing the file (Y/n)?"))
isTerminal := terminal.IsTerminal(int(r.Fd()))
if isTerminal {
if oldState, err := terminal.MakeRaw(int(r.Fd())); err != nil {
return false, err
} else {
defer terminal.Restore(int(r.Fd()), oldState)
}
}
var buf [1]byte
n, err := r.Read(buf[:])
if n > 0 {
switch buf[0] {
case 'y', 'Y', 13:
return true, nil
default:
return false, nil
}
}
if err != nil && err != io.EOF {
return false, err
}
return false, nil
}
func createTmpEditedFile(filePath string, data []byte) error {
if err := SaveGeneratedData(filePath, data); err != nil {
return err
}
return nil
}
func editor() (string, []string, error) {
var editorArgs []string
editorValue := os.Getenv("EDITOR")
if editorValue != "" {
editorFields := strings.Fields(editorValue)
return editorFields[0], editorFields[1:], nil
}
var defaultEditors []string
if runtime.GOOS == "windows" {
defaultEditors = []string{"notepad"}
} else {
defaultEditors = []string{"vim", "vi", "nano"}
}
for _, bin := range defaultEditors {
if _, err := exec.LookPath(bin); err != nil {
continue
}
return bin, editorArgs, nil
}
return "", editorArgs, fmt.Errorf("editor not detected")
}
func prepareResultValuesData(data, encodedData, newData, newEncodedData []byte) ([]byte, error) {
dataConfig, err := unmarshalYaml(data)
if err != nil {
return nil, err
}
encodeDataConfig, err := unmarshalYaml(encodedData)
if err != nil {
return nil, err
}
newDataConfig, err := unmarshalYaml(newData)
if err != nil {
return nil, err
}
newEncodedDataConfig, err := unmarshalYaml(newEncodedData)
if err != nil {
return nil, err
}
resultEncodedDataConfig, err := mergeYamlEncodedData(dataConfig, encodeDataConfig, newDataConfig, newEncodedDataConfig)
if err != nil {
return nil, err
}
resultEncodedData, err := yaml.Marshal(&resultEncodedDataConfig)
if err != nil {
return nil, err
}
return resultEncodedData, nil
}
func unmarshalYaml(data []byte) (yaml.MapSlice, error) {
config := make(yaml.MapSlice, 0)
err := yaml.UnmarshalStrict(data, &config)
if err != nil {
return nil, err
}
return config, nil
}
func mergeYamlEncodedData(d, eD, newD, newED interface{}) (interface{}, error) {
dType := reflect.TypeOf(d)
newDType := reflect.TypeOf(newD)
if dType != newDType {
return newED, nil
}
switch newD.(type) {
case yaml.MapSlice:
newDMapSlice := newD.(yaml.MapSlice)
dMapSlice := d.(yaml.MapSlice)
resultMapSlice := make(yaml.MapSlice, len(newDMapSlice))
findDMapItemByKey := func(key interface{}) (int, *yaml.MapItem) {
for ind, elm := range dMapSlice {
if elm.Key == key {
return ind, &elm
}
}
return 0, nil
}
for ind, elm := range newDMapSlice {
newEDMapItem := newED.(yaml.MapSlice)[ind]
resultMapItem := newEDMapItem
dInd, dElm := findDMapItemByKey(elm.Key)
if dElm != nil {
eDMapItem := eD.(yaml.MapSlice)[dInd]
result, err := mergeYamlEncodedData(dMapSlice[dInd], eDMapItem, newDMapSlice[ind], newEDMapItem)
if err != nil {
return nil, err
}
resultMapItem = result.(yaml.MapItem)
}
resultMapSlice[ind] = resultMapItem
}
return resultMapSlice, nil
case yaml.MapItem:
var resultMapItem yaml.MapItem
newDMapItem := newD.(yaml.MapItem)
newEDMapItem := newED.(yaml.MapItem)
dMapItem := d.(yaml.MapItem)
eDMapItem := eD.(yaml.MapItem)
resultMapItem.Key = newDMapItem.Key
resultValue, err := mergeYamlEncodedData(dMapItem.Value, eDMapItem.Value, newDMapItem.Value, newEDMapItem.Value)
if err != nil {
return nil, err
}
resultMapItem.Value = resultValue
return resultMapItem, nil
default:
if !reflect.DeepEqual(d, newD) {
return newED, nil
} else {
return eD, nil
}
}
}
|
package main
import (
"bytes"
"encoding/json"
"flag"
"os/exec"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"text/template"
"github.com/gorilla/mux"
)
// config
var saveFile = "./sites.json"
var saveCaddyFile = "./Caddyfile"
// models
type Site struct {
Id int `json:"id"`
Title string `json:"title"`
Description string `json:"description"`
Domain string `json:"domain"`
Source string `json:"source"`
Target string `json:"target"`
Email string `json:"email"`
}
var sites = make(map[int]Site)
// functions
func getBiggest(map1 map[int]Site) int {
biggest := 0
for k := range map1 {
if k > biggest {
biggest = k
}
}
return biggest
}
// persist config
func load() {
x, _ := ioutil.ReadFile(saveFile)
json.Unmarshal(x, &sites)
}
func save() {
jsonString, _ := json.Marshal(sites)
ioutil.WriteFile(saveFile, jsonString, 0777)
saveCaddy()
restartCaddy()
}
func saveCaddy() {
tmpl, _ := template.New("Caddy").Parse(`
{{.Domain}} {
proxy {{.Source}} {{.Target}}
tls {{.Email}}
}`)
buf := &bytes.Buffer{}
for _, value := range sites {
tmpl.Execute(buf, value)
}
f, err := os.OpenFile(saveCaddyFile, os.O_CREATE|os.O_WRONLY, 0777)
if err != nil {
log.Fatal(err)
}
//clear file before writing
f.Truncate(0)
f.Seek(0, 0)
// do the actual work
f.WriteString(buf.String())
}
func startCaddy() {
cmd := exec.Command("sh nohup caddy -agree &")
stdout, err := cmd.Output()
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Print(string(stdout))
}
func restartCaddy() {
cmd := exec.Command("sh pkill -USR1 caddy")
stdout, err := cmd.Output()
if err != nil {
fmt.Println(err.Error())
return
}
fmt.Print(string(stdout))
}
// site controllers
func getSites(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(sites)
}
func getSite(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
w.Header().Set("Content-Type", "application/json")
id, _ := strconv.Atoi(params["id"])
json.NewEncoder(w).Encode(sites[id])
}
func updateSite(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
params := mux.Vars(r)
id, _ := strconv.Atoi(params["id"])
delete(sites, id)
var site Site
_ = json.NewDecoder(r.Body).Decode(&site)
sites[id] = site
json.NewEncoder(w).Encode(site)
fmt.Println("Changed Site:")
fmt.Println(site)
save()
}
func createSite(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var site Site
_ = json.NewDecoder(r.Body).Decode(&site)
newID := getBiggest(sites) + 1
site.Id = newID
sites[newID] = site
json.NewEncoder(w).Encode(site)
fmt.Println("New Site:")
fmt.Println(site)
save()
}
func deleteSite(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
w.Header().Set("Content-Type", "application/json")
id, _ := strconv.Atoi(params["id"])
delete(sites, id)
json.NewEncoder(w).Encode(true)
fmt.Print("deleted site: ")
fmt.Println(id)
save()
}
func index(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles("./index.html")
t.Execute(w, nil)
}
func main() {
router := mux.NewRouter()
load()
saveCaddy()
startCaddy()
//handlers
router.HandleFunc("/api/Sites", getSites).Methods("GET")
router.HandleFunc("/api/Sites/{id}", getSite).Methods("GET")
router.HandleFunc("/api/Sites", createSite).Methods("POST")
router.HandleFunc("/api/Sites/{id}", updateSite).Methods("PUT")
router.HandleFunc("/api/Sites/{id}", deleteSite).Methods("DELETE")
var dir string
flag.StringVar(&dir, "dir", "./static", "the directory to serve files from. Defaults to the current dir")
flag.Parse()
router.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
router.HandleFunc("/", index).Methods("GET")
error := http.ListenAndServe(":8000", router)
save()
log.Fatal(error)
}
// TODO:
// start caddy with config <- test that
// UI
|
package internal
import (
"encoding/json"
"fmt"
"os"
)
type Config struct {
Tokens []authToken
}
func NewConfig() (config *Config) {
config = &Config{}
err := json.Unmarshal([]byte(os.Getenv("TOKENS")), &config.Tokens)
if err != nil {
panic(fmt.Sprintf("NewConfig, %v", err))
}
// We are associating images to usernames so we can't have two with same name.
checkDuplicits(&config.Tokens)
return
}
func checkDuplicits(tokens *[]authToken) {
unique := make(map[string]interface{})
for _, token := range *tokens {
if _, ok := unique[token.User]; ok {
panic(fmt.Sprintf("checkDuplicits.duplicitUser, %v", token.User))
} else {
unique[token.User] = nil
}
}
}
|
package Solution
import (
"reflect"
"strconv"
"testing"
)
func TestSolution(t *testing.T) {
// 测试用例
cases := []struct {
name string
nums []int
expect int
}{
{"TestCase", []int{3,3,3,3,5,5,5,2,2,7}, 2},
{"TestCase", []int{7,7,7,7,7,7}, 1},
{"TestCase", []int{1000,1000,3,7}, 1},
{"TestCase", []int{1,2,3,4,5,6,7,8,9,10}, 5},
}
// 开始测试
for i, c := range cases {
t.Run(c.name+" "+strconv.Itoa(i), func(t *testing.T) {
got := Solution1(c.nums)
if !reflect.DeepEqual(got, c.expect) {
t.Fatalf("expected: %v, but got: %v, with inputs: %v",
c.expect, got, c.expect)
}
})
}
for i, c := range cases {
t.Run(c.name+" "+strconv.Itoa(i), func(t *testing.T) {
got := Solution2(c.nums)
if !reflect.DeepEqual(got, c.expect) {
t.Fatalf("expected: %v, but got: %v, with inputs: %v",
c.expect, got, c.expect)
}
})
}
}
// 压力测试
func BenchmarkSolution(b *testing.B) {
}
// 使用案列
func ExampleSolution() {
}
|
package pn532
import (
"bytes"
"errors"
"github.com/zyxar/berry/core"
)
var (
ErrPageOutOfRange = errors.New("page out of range")
ErrNoAckReceived = errors.New("no ack received")
ErrInvalidDataLen = errors.New("invalid length of data")
)
// Tries to read an entire 4-byte page at the specified address
// TAG Type PAGES USER START USER STOP
// -------- ----- ---------- ---------
// NTAG 203 42 4 39
// NTAG 213 45 4 39
// NTAG 215 135 4 129
// NTAG 216 231 4 225
func NtagReadPage(device Device, page uint8) ([]byte, error) {
if page >= 231 {
return nil, ErrPageOutOfRange
}
p := []byte{
COMMAND_INDATAEXCHANGE,
1,
MIFARE_CMD_READ,
page,
}
if !SendCommandCheckAck(device, p, defaultTimeoutMs) {
return nil, ErrNoAckReceived
}
p = make([]byte, 26)
if p[7] != 0x00 {
return nil, errors.New("unexpected response")
}
// Copy the 4 data bytes to the output buffer
// Block content starts at byte 9 of a valid response
// Note that the command actually reads 16 byte or 4
// pages at a time ... simply discard the last 12 bytes
return p[8:14], nil
}
// Tries to write an entire 4-byte page at the specified block address
// data should be exactly 4 bytes long
// TAG Type PAGES USER START USER STOP
// -------- ----- ---------- ---------
// NTAG 203 42 4 39
// NTAG 213 45 4 39
// NTAG 215 135 4 129
// NTAG 216 231 4 225
func NtagWritePage(device Device, page uint8, data []byte) error {
if page < 4 || page > 225 {
return ErrPageOutOfRange
}
if len(data) != 4 {
return ErrInvalidDataLen
}
p := []byte{
COMMAND_INDATAEXCHANGE,
1,
MIFARE_ULTRALIGHT_CMD_WRITE,
page,
data[0],
data[1],
data[2],
data[3],
}
if !SendCommandCheckAck(device, p, defaultTimeoutMs) {
return ErrNoAckReceived
}
core.Delay(10)
p = make([]byte, 26)
device.ReadData(p)
return nil
}
/*
Writes an NDEF URI Record starting at the specified page (4..nn)
Note that this function assumes that the NTAG2xx card is
already formatted to work as an "NFC Forum Tag".
The id code (0 = none, 0x01 = "http://www.", etc.)
*/
func NtagWriteNDEFURI(device Device, id uint8, url []byte) (err error) {
length := byte(len(url))
if length < 1 || length+1 > 256-12 {
err = ErrInvalidDataLen
return
}
head := []byte{ // NDEF Lock Control TLV (must be first and always present)
0x01, // Tag Field (0x01 = Lock Control TLV)
0x03, // Payload Length (always 3)
0xA0, // The position inside the tag of the lock bytes (upper 4 = page address, lower 4 = byte offset)
0x10, // Size in bits of the lock area
0x44, // Size in bytes of a page and the number of bytes each lock bit can lock (4 bit + 4 bits)
// NDEF Message TLV - URI Record
0x03, // Tag Field (0x03 = NDEF Message)
length + 5, // Payload Length (not including 0xFE trailer)
0xD1, // NDEF Record Header (TNF=0x1: Well known record + SR + ME + MB)
0x01, // Type Length for the record type indicator
length + 1, // Payload length
0x55, // Record Type Indicator (0x55 or 'U' = URI Record)
id, // URI Prefix (ex. 0x01 = "http://www.")
}
if err = NtagWritePage(device, 4, head[:4]); err != nil {
return err
}
if err = NtagWritePage(device, 5, head[4:8]); err != nil {
return err
}
if err = NtagWritePage(device, 6, head[8:12]); err != nil {
return err
}
currentPage := byte(7)
buf := make([]byte, 4)
b := bytes.NewBuffer(buf)
for length > 0 {
if length < 4 {
b.Reset()
b.Write(url[:length])
b.WriteByte(0xFE)
err = NtagWritePage(device, currentPage, b.Bytes())
return
} else if length == 4 {
b.Reset()
b.Write(url[:4])
if err = NtagWritePage(device, currentPage, b.Bytes()); err != nil {
return
}
b.Reset()
b.WriteByte(0xFE)
currentPage++
err = NtagWritePage(device, currentPage, b.Bytes())
return
} else {
b.Reset()
b.Write(url[:4])
if err = NtagWritePage(device, currentPage, b.Bytes()); err != nil {
return
}
currentPage++
url = url[4:]
length -= 4
}
}
return nil
}
|
package cmd
import (
"fmt"
"github.com/eibhleag/trictrac/core"
"github.com/spf13/cobra"
)
// getCmd represents the get command
var getCmd = &cobra.Command{
Use: "get [key]",
Short: "get the value at a key",
Run: func(cmd *cobra.Command, args []string) {
c := core.OpenCollection()
key := args[0]
value := c.Get(key)
fmt.Printf("%s\n", formatResult(key, value))
c.Close()
},
}
func init() {
RootCmd.AddCommand(getCmd)
}
|
package fullrt
import (
"fmt"
"time"
kaddht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p-kad-dht/crawler"
"github.com/libp2p/go-libp2p-kad-dht/providers"
)
type config struct {
dhtOpts []kaddht.Option
crawlInterval time.Duration
waitFrac float64
bulkSendParallelism int
timeoutPerOp time.Duration
crawler crawler.Crawler
pmOpts []providers.Option
}
func (cfg *config) apply(opts ...Option) error {
for i, o := range opts {
if err := o(cfg); err != nil {
return fmt.Errorf("fullrt dht option %d failed: %w", i, err)
}
}
return nil
}
type Option func(opt *config) error
func DHTOption(opts ...kaddht.Option) Option {
return func(c *config) error {
c.dhtOpts = append(c.dhtOpts, opts...)
return nil
}
}
// WithCrawler sets the crawler.Crawler to use in order to crawl the DHT network.
// Defaults to crawler.DefaultCrawler with parallelism of 200.
func WithCrawler(c crawler.Crawler) Option {
return func(opt *config) error {
opt.crawler = c
return nil
}
}
// WithCrawlInterval sets the interval at which the DHT is crawled to refresh peer store.
// Defaults to 1 hour if unspecified.
func WithCrawlInterval(i time.Duration) Option {
return func(opt *config) error {
opt.crawlInterval = i
return nil
}
}
// WithSuccessWaitFraction sets the fraction of peers to wait for before considering an operation a success defined as a number between (0, 1].
// Defaults to 30% if unspecified.
func WithSuccessWaitFraction(f float64) Option {
return func(opt *config) error {
if f <= 0 || f > 1 {
return fmt.Errorf("success wait fraction must be larger than 0 and smaller or equal to 1; got: %f", f)
}
opt.waitFrac = f
return nil
}
}
// WithBulkSendParallelism sets the maximum degree of parallelism at which messages are sent to other peers. It must be at least 1.
// Defaults to 20 if unspecified.
func WithBulkSendParallelism(b int) Option {
return func(opt *config) error {
if b < 1 {
return fmt.Errorf("bulk send parallelism must be at least 1; got: %d", b)
}
opt.bulkSendParallelism = b
return nil
}
}
// WithTimeoutPerOperation sets the timeout per operation, where operations include putting providers and querying the DHT.
// Defaults to 5 seconds if unspecified.
func WithTimeoutPerOperation(t time.Duration) Option {
return func(opt *config) error {
opt.timeoutPerOp = t
return nil
}
}
// WithProviderManagerOptions sets the options to use when instantiating providers.ProviderManager.
func WithProviderManagerOptions(pmOpts ...providers.Option) Option {
return func(opt *config) error {
opt.pmOpts = pmOpts
return nil
}
}
|
package cancelot
import (
"bytes"
"context"
"log"
"os/exec"
"strings"
"cloud.google.com/go/compute/metadata"
"golang.org/x/oauth2/google"
cloudbuild "google.golang.org/api/cloudbuild/v1"
)
// getProject gets the project ID.
func getProject() (string, error) {
// Test if we're running on GCE.
if metadata.OnGCE() {
// Use the GCE Metadata service.
projectID, err := metadata.ProjectID()
if err != nil {
log.Printf("Failed to get project ID from instance metadata")
return "", err
}
return projectID, nil
}
// Shell out to gcloud.
cmd := exec.Command("gcloud", "config", "get-value", "project")
var out bytes.Buffer
cmd.Stdout = &out
err := cmd.Run()
if err != nil {
log.Printf("Failed to shell out to gcloud: %+v", err)
return "", err
}
projectID := strings.TrimSuffix(out.String(), "\n")
return projectID, nil
}
func gcbClient(ctx context.Context) *cloudbuild.Service {
client, err := google.DefaultClient(ctx, cloudbuild.CloudPlatformScope)
if err != nil {
log.Fatalf("Caught error creating client: %v", err)
}
svc, err := cloudbuild.New(client)
if err != nil {
log.Fatalf("Caught error creating service: %v", err)
}
return svc
}
|
//+build !test
package main
import (
"fmt"
"gitlab.com/gitmate-micro/listen/provider"
"github.com/micro/go-log"
"github.com/micro/go-web"
)
func main() {
fmt.Print(`
___ __
/ (_)____/ /____ ____
/ / / ___/ __/ _ \/ __ \
/ / (__ ) /_/ __/ / / /
/_/_/____/\__/\___/_/ /_/ server catches webhooks :P
`)
// create new web service
service := web.NewService(
web.Name("gitmate.micro.web.listen"),
web.Version("latest"),
)
github := &provider.GitHub{}
gitlab := &provider.GitLab{}
// register call handlers
service.HandleFunc("/github", RejectOtherMethods("POST", github))
service.HandleFunc("/gitlab", RejectOtherMethods("POST", gitlab))
// initialise service
if err := service.Init(); err != nil {
log.Fatal(err)
}
// run service
if err := service.Run(); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"fmt"
"os"
)
func main() {
baseDir := "/home/test/go/src/go-leanring/file/"
file, err := os.Open(baseDir + "astaxie.txt")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
buf := make([]byte, 1024)
for {
n, _ := file.Read(buf)
if n == 0 {
break
}
os.Stdout.Write(buf[:n])
}
}
|
package day8
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
//DayEightOne Day eight task one
func DayEightOne() {
input, err := ioutil.ReadFile("./8/input.txt")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
accumulator := 0
rowsAlreadyChecked := []int{}
row := 0
instrucions := strings.Split(string(input), "\n")
infiniteLoopDetected := false
for !infiniteLoopDetected {
if IsNumberInList(row, rowsAlreadyChecked) {
infiniteLoopDetected = true
break
}
formattedCommand := FormatCommand(instrucions[row])
valueOfCommand := formattedCommand[1]
switch command := formattedCommand[0]; command {
case "acc":
if strings.Contains(valueOfCommand, "-") {
numberToSubtract, err := strconv.Atoi(valueOfCommand[1:])
if err != nil {
os.Exit(1)
}
rowsAlreadyChecked = append(rowsAlreadyChecked, row)
accumulator -= numberToSubtract
row++
} else {
numberToAdd, err := strconv.Atoi(valueOfCommand[1:])
if err != nil {
os.Exit(1)
}
rowsAlreadyChecked = append(rowsAlreadyChecked, row)
accumulator += numberToAdd
row++
}
case "jmp":
if strings.Contains(valueOfCommand, "-") {
numberToSubtract, err := strconv.Atoi(valueOfCommand[1:])
if err != nil {
os.Exit(1)
}
rowsAlreadyChecked = append(rowsAlreadyChecked, row)
row -= numberToSubtract
} else {
numberToAdd, err := strconv.Atoi(valueOfCommand[1:])
if err != nil {
os.Exit(1)
}
rowsAlreadyChecked = append(rowsAlreadyChecked, row)
row += numberToAdd
}
case "nop":
rowsAlreadyChecked = append(rowsAlreadyChecked, row)
row++
}
}
fmt.Println(rowsAlreadyChecked)
fmt.Println(accumulator)
}
//FormatCommand
func FormatCommand(command string) []string {
return strings.Split(command, " ")
}
//IsNumberInList
func IsNumberInList(a int, list []int) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
|
package helper
import (
"crypto/rand"
"fmt"
"math/big"
"os"
"strings"
"testing"
lorem "github.com/drhodes/golorem"
log "github.com/sirupsen/logrus"
)
const (
MaxRand = 1000000
LoremMin = 50
LoremMax = 100
)
func Lipsum() string {
return lorem.Paragraph(LoremMin, LoremMax)
}
func SplitTrim(s string) []string {
arr := strings.Split(s, ",")
if len(arr) == 0 {
return nil
}
result := make([]string, len(arr))
for i := 0; i < len(arr); i++ {
result[i] = strings.TrimSpace(arr[i])
}
return result
}
func VerifyIfSkipIntegrationTests(t *testing.T) {
if os.Getenv("RUN_INTEGRATION_TESTS") != "yes" {
t.Skipf("skipping integration tests")
}
}
// tells if the tests is running in docker
func IsRunningInDocker() bool {
if _, err := os.Stat("/.dockerenv"); os.IsNotExist(err) {
return false
}
return true
}
func GenRandString(prefix string) string {
return fmt.Sprintf("%s%v", prefix, RandNum())
}
func RandNum() int64 {
return RandNumWithMax(MaxRand)
}
func IfElse(cond bool, then, otherwise int) int {
if cond {
return then
}
return otherwise
}
func RandNumWithMax(max int64) int64 {
bg := big.NewInt(max)
n, err := rand.Int(rand.Reader, bg)
if err != nil {
panic(err)
}
return n.Int64()
}
func LogErr(err error) {
if err != nil {
log.Errorf("an error has occurred: %v", err)
}
}
func BleveEscapeTerm(term string) string {
v := strings.TrimSpace(term)
// special chars to be escaped for bleve
specialChars := "+-=&|><!(){}[]^\"~*?:\\/ "
result := ""
for _, c := range v {
if strings.ContainsRune(specialChars, c) {
result += "\\"
}
result += string(c)
}
return result
}
|
package control
import (
"encoding/json"
"testing"
"github.com/square/p2/pkg/pc/fields"
rc_fields "github.com/square/p2/pkg/rc/fields"
"github.com/square/p2/pkg/store/consul/consultest"
"github.com/square/p2/pkg/store/consul/pcstore"
"github.com/square/p2/pkg/store/consul/pcstore/pcstoretest"
"github.com/square/p2/pkg/types"
"k8s.io/kubernetes/pkg/labels"
)
func TestCreate(t *testing.T) {
testAZ := fields.AvailabilityZone("west-coast")
testCN := fields.ClusterName("test")
testPodID := types.PodID("pod")
selector := labels.Everything().
Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
session := consultest.NewSession()
pcstore := pcstoretest.NewFake()
strategy := rc_fields.StaticStrategy
pcController := NewPodCluster(testAZ, testCN, testPodID, pcstore, selector, strategy, 0)
annotations := map[string]string{
"load_balancer_info": "totally",
"pager_information": "555-111-2222",
}
buf, err := json.Marshal(annotations)
if err != nil {
t.Errorf("json marshal error: %v", err)
}
var testAnnotations fields.Annotations
if err := json.Unmarshal(buf, &testAnnotations); err != nil {
t.Errorf("json unmarshal error: %v", err)
}
pc, err := pcController.Create(fields.Annotations(testAnnotations), session)
if err != nil {
t.Errorf("got error during creation: %v", err)
}
if pc.ID == "" {
t.Error("got empty pc ID")
}
if pc.PodID != testPodID {
t.Errorf("Expected to get %s, got: %v", pc.PodID, testPodID)
}
if pc.Name != testCN {
t.Errorf("Expected to get %s, got: %v", testCN, pc.Name)
}
if pc.AvailabilityZone != testAZ {
t.Errorf("Expected to get %s, got: %v", testAZ, pc.AvailabilityZone)
}
if pc.PodSelector.String() != selector.String() {
t.Errorf("Expected to get %s, got: %v", selector, pc.PodSelector)
}
if pc.Annotations["load_balancer_info"] != testAnnotations["load_balancer_info"] {
t.Errorf("Expected to get %s, got: %v", testAnnotations, pc.Annotations)
}
if pc.Annotations["pager_information"] != testAnnotations["pager_information"] {
t.Errorf("Expected to get %s, got: %v", testAnnotations, pc.Annotations)
}
}
func TestUpdateAnnotations(t *testing.T) {
testAZ := fields.AvailabilityZone("west-coast")
testCN := fields.ClusterName("test")
testPodID := types.PodID("pod")
selector := labels.Everything().
Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
session := consultest.NewSession()
pcstore := pcstoretest.NewFake()
strategy := rc_fields.StaticStrategy
pcController := NewPodCluster(testAZ, testCN, testPodID, pcstore, selector, strategy, 0)
var annotations = map[string]string{
"load_balancer_info": "totally",
"pager_information": "555-111-2222",
}
buf, err := json.Marshal(annotations)
if err != nil {
t.Errorf("json marshal error: %v", err)
}
var testAnnotations fields.Annotations
if err := json.Unmarshal(buf, &testAnnotations); err != nil {
t.Errorf("json unmarshal error: %v", err)
}
pc, err := pcController.Create(fields.Annotations(testAnnotations), session)
if err != nil {
t.Fatalf("Unable to create pod cluster due to: %v", err)
}
newAnnotations := map[string]string{
"pager_information": "555-111-2222",
"priority": "1001",
}
buf, err = json.Marshal(newAnnotations)
if err != nil {
t.Errorf("json marshal error: %v", err)
}
var newTestAnnotations fields.Annotations
if err := json.Unmarshal(buf, &newTestAnnotations); err != nil {
t.Errorf("json unmarshal error: %v", err)
}
pc, err = pcController.UpdateAnnotations(newTestAnnotations)
if err != nil {
t.Fatalf("Got error updating PC annotations: %v", err)
}
if pc.Annotations["pager_information"] != newAnnotations["pager_information"] {
t.Errorf("Got unexpected pager_information. Expected %s, got %s", newAnnotations["pager_information"], pc.Annotations["pager_information"])
}
if pc.Annotations["priority"] != newAnnotations["priority"] {
t.Errorf("Got unexpected priority. Expected %s, got %s", newAnnotations["priority"], pc.Annotations["priority"])
}
if pc.Annotations["load_balancer_info"] != nil {
t.Errorf("Expected to erase old annotation field. Instead we have: %s", pc.Annotations["load_balancer_info"])
}
}
func TestPodClusterFromID(t *testing.T) {
testAZ := fields.AvailabilityZone("west-coast")
testCN := fields.ClusterName("test")
testPodID := types.PodID("pod")
selector := labels.Everything().
Add(fields.PodIDLabel, labels.EqualsOperator, []string{testPodID.String()}).
Add(fields.AvailabilityZoneLabel, labels.EqualsOperator, []string{testAZ.String()}).
Add(fields.ClusterNameLabel, labels.EqualsOperator, []string{testCN.String()})
session := consultest.NewSession()
fakePCStore := pcstoretest.NewFake()
strategy := rc_fields.StaticStrategy
pcControllerFromLabels := NewPodCluster(testAZ, testCN, testPodID, fakePCStore, selector, strategy, 0)
pc, err := pcControllerFromLabels.Create(fields.Annotations{}, session)
if err != nil {
t.Fatal(err)
}
pcControllerFromLabels = nil
pcControllerFromID := NewPodClusterFromID(pc.ID, fakePCStore)
retrievedPC, err := pcControllerFromID.Get()
if err != nil {
t.Fatal(err)
}
if pc.ID != retrievedPC.ID {
t.Errorf("Did not get correct PC back from datastore, expected %s, got %s.\n%v", pc.ID, retrievedPC.ID, retrievedPC)
}
errs := pcControllerFromID.Delete()
if len(errs) > 0 {
t.Fatalf("%v", errs)
}
notFoundPC, err := pcControllerFromID.Get()
if err != pcstore.NoPodCluster {
t.Errorf("Expected to get pcstore.NoPodCluster, but got %v", err)
}
if notFoundPC.ID != "" {
t.Errorf("Expected to not find PC but found %v", notFoundPC)
}
}
|
package bitcask
import (
"os"
)
type bufwriter struct {
f *os.File
used int
buf []byte
}
func newBufWriter(f *os.File, bufsz uint32) *bufwriter {
bw := &bufwriter{}
bw.f = f
bw.buf = make([]byte, bufsz, bufsz)
bw.used = 0
return bw
}
func (bw *bufwriter) Write(data []byte) (int, error) {
if len(data)+bw.used <= len(bw.buf) {
copy(bw.buf[bw.used:], data)
bw.used += len(data)
return len(data), nil
}
if bw.used > 0 {
_, err := bw.f.Write(bw.buf[0:bw.used])
if err != nil {
panic(err)
}
bw.used = 0
}
if len(data) > len(bw.buf) {
return bw.f.Write(data)
}
copy(bw.buf, data)
bw.used += len(data)
return len(data), nil
}
func (bw *bufwriter) Flush() {
if bw.used > 0 {
_, err := bw.f.Write(bw.buf[0:bw.used])
if err != nil {
panic(err)
}
bw.used = 0
}
}
func (bw *bufwriter) Buffered() int {
return bw.used
}
func (bw *bufwriter) GetBuffer() []byte {
return bw.buf
}
|
// Implementação do comando echo. Versão 6.
// arataca89@gmail.com
// 20210412
package main
import (
"fmt"
"os"
)
func main() {
for i := 0; i < len(os.Args); i++ {
fmt.Println(i, ": ", os.Args[i])
}
}
/////////////////////////////////////////////////////////////////////
// Esta versão exibe o índice e o valor de cada item em
// os.Args, um por linha.
//
// Exemplo:
//
// go run main.go testando 1 dois III
//
// 0 : C:\Users\nerd\b001\exe\main.exe
// 1 : testando
// 2 : 1
// 3 : dois
// 4 : III
|
package dao
type Manager interface {
Init(addr string,option...string)
CreateSession()(sd SessionData,err error)
GetSessionData(sessionId string)(sd SessionData,err error)
}
|
package workflow
import (
"context"
"fmt"
)
type State string
type WorkflowExecRequest struct {
WorkflowID string `json:"workflow_id"`
}
type WorkflowExecResponse struct{}
type WorkflowArguments map[string]TaskArguments
func (wm *WorkflowManager) Exec(ctx context.Context, w *Workflow, args WorkflowArguments) (*WorkflowExecResponse, error) {
stateMap := make(map[string]WorkflowTask)
for _, t := range w.Tasks {
stateMap[t.Name] = t
task, err := wm.TaskManager.GetTask(GetTaskOptions{ID: t.TaskID})
if err != nil {
return nil, fmt.Errorf("Can't find task %+v: %w", t, err)
}
// Schedule a job
response, err := wm.TaskManager.Exec(ctx, task, args[t.Name])
if err != nil {
return nil, err
}
fmt.Printf("%+v\n", response)
}
return &WorkflowExecResponse{}, nil
}
//
// workflow
// task
|
package category
import (
"github.com/evleria/quiz-cli/pkg/cmd/category/list"
"github.com/evleria/quiz-cli/pkg/cmdutils"
"github.com/spf13/cobra"
)
func NewCategoryCmd(factory *cmdutils.Factory) *cobra.Command {
cmd := &cobra.Command{
Use: "category",
Short: "shows information of categories",
}
cmd.AddCommand(list.NewListCmd(factory))
return cmd
}
|
package main
func (this *Application) LogsAction(args []string) {
}
|
package main
import (
"context"
"os"
"testing"
"cloud.google.com/go/datastore"
)
func TestController_storeMessage(t *testing.T) {
projectID := os.Getenv("GCLOUD_DATASET_ID")
cli, err := datastore.NewClient(context.Background(), projectID)
if err != nil {
t.Error(err)
}
type fields struct {
store *datastore.Client
}
type args struct {
ctx context.Context
message string
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{"base-case", fields{cli}, args{context.Background(), "test"}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Controller{
store: tt.fields.store,
}
if err := c.storeMessage(tt.args.ctx, tt.args.message); (err != nil) != tt.wantErr {
t.Errorf("Controller.storeMessage() error = %v, wantErr %v", err, tt.wantErr)
}
})
}
}
func TestController_queryMessages(t *testing.T) {
projectID := os.Getenv("GCLOUD_DATASET_ID")
cli, err := datastore.NewClient(context.Background(), projectID)
if err != nil {
t.Error(err)
}
type fields struct {
store *datastore.Client
}
type args struct {
ctx context.Context
limit int
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
// TODO: Add test cases.
{"base-case", fields{cli}, args{context.Background(), 10}, false},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c := &Controller{
store: tt.fields.store,
}
_, err := c.queryMessages(tt.args.ctx, tt.args.limit)
if (err != nil) != tt.wantErr {
t.Errorf("Controller.queryMessages() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}
|
package cosmos
import "context"
type User struct {
client Client
db Database
userID string
}
type UserDefinition struct {
Resource
_persmissions string `json:"_persmissions,omitempty"`
}
type Users struct {
client Client
db Database
}
func (u User) Permission(id string) *Permission {
return newPermission(u, id)
}
func (u User) Permissions() *Permissions {
return newPermissions(u)
}
func newUser(db Database, userID string) *User {
db.client.path += "/users/" + userID
db.client.rType = "users"
db.client.rLink = db.client.path
user := &User{
client: db.client,
db: db,
userID: userID,
}
return user
}
func newUsers(db Database) *Users {
db.client.path += "/users"
db.client.rType = "users"
users := &Users{
client: db.client,
db: db,
}
return users
}
// Create a new user
func (u *Users) Create(ctx context.Context, user *UserDefinition, opts ...CallOption) (*UserDefinition, error) {
createdUser := &UserDefinition{}
_, err := u.client.create(ctx, user, &createdUser, opts...)
if err != nil {
return nil, err
}
return createdUser, err
}
// Replace an existing user with a new one.
func (u *User) Replace(ctx context.Context, user *UserDefinition, opts ...CallOption) (*UserDefinition, error) {
updatedUser := &UserDefinition{}
_, err := u.client.replace(ctx, user, &updatedUser, opts...)
if err != nil {
return nil, err
}
return updatedUser, err
}
// ReadAll users in a collection
func (u *Users) ReadAll(ctx context.Context, opts ...CallOption) ([]UserDefinition, error) {
data := struct {
Users []UserDefinition `json:"users,omitempty"`
Count int `json:"_count,omitempty"`
}{}
_, err := u.client.read(ctx, &data, opts...)
if err != nil {
return nil, err
}
return data.Users, err
}
// Delete existing user
func (u *User) Delete(ctx context.Context, opts ...CallOption) (*Response, error) {
return u.client.delete(ctx, opts...)
}
// Read a single user from collection
func (u *User) Read(ctx context.Context, opts ...CallOption) (*UserDefinition, error) {
user := &UserDefinition{}
_, err := u.client.read(ctx, user, opts...)
return user, err
}
|
package yara
import (
)
|
package main
import (
"fmt"
)
func main() {
var nums []int
nums = []int{0, 1, 2, 4, 5, 7}
fmt.Printf("%#v\n", summaryRanges(nums))
nums = []int{0, 2, 3, 4, 6, 8, 9}
fmt.Printf("%#v\n", summaryRanges(nums))
nums = []int{}
fmt.Printf("%#v\n", summaryRanges(nums))
nums = []int{-1}
fmt.Printf("%#v\n", summaryRanges(nums))
nums = []int{0}
fmt.Printf("%#v\n", summaryRanges(nums))
}
func summaryRanges(nums []int) []string {
if len(nums) == 0 {
return []string{}
}
res := [][]int{}
for i := 0; i < len(nums); i++ {
if i == 0 {
res = append(res, []int{nums[i]})
} else if nums[i] == nums[i-1]+1 {
lastIndex := len(res) - 1
res[lastIndex] = append(res[lastIndex], nums[i])
} else {
res = append(res, []int{nums[i]})
}
}
var res2 []string
var tmp string
for i := 0; i < len(res); i++ {
if len(res[i]) == 1 {
tmp = fmt.Sprintf("%d", res[i][0])
res2 = append(res2, tmp)
} else {
tmp = fmt.Sprintf("%d->%d", res[i][0], res[i][len(res[i])-1])
res2 = append(res2, tmp)
}
}
return res2
}
|
package export
import (
"github.com/Zenika/marcel/api/db/plugins"
)
func Plugins(outputFile string, pretty bool) error {
return export(func() (interface{}, error) {
return plugins.List()
}, outputFile, pretty)
}
|
package module
import (
"bytes"
"encoding/json"
"errors"
"github.com/zieckey/simgo"
"io/ioutil"
"log"
"net/http"
"sync"
)
var (
ErrConvert = errors.New("convert error")
mu sync.Mutex
)
//解析查询参数
type SearchRequestParams struct {
Days []interface{} `json:"days"`
Page_size int32 `json:"page_size"`
Page_number int32 `json:"page_number"`
Day string `json:"day"`
Business string `json:"business"`
Keywords map[string]interface{} `json:"keywords"`
Options map[string]interface{} `json:"options"`
// Filters map[string]interface{} `json:"filters"`
}
func NewSearchRequestParams() *SearchRequestParams {
return &SearchRequestParams{
Days: make([]interface{}, 0),
Keywords: make(map[string]interface{}),
Options: make(map[string]interface{}),
// Filters: make(map[string]interface{}),
}
}
type QueryBody struct {
Query *SearchRequestParams `json:"query"`
}
func NewQuery() *QueryBody {
return &QueryBody{
Query: NewSearchRequestParams(),
}
}
type Proxy struct {
poseidon_search_url string
Sqp *SearchRequestParams
}
func New() *Proxy {
return &Proxy{
Sqp: NewSearchRequestParams(),
}
}
func (p *Proxy) Initialize() error {
fw := simgo.DefaultFramework
p.poseidon_search_url, _ = fw.Conf.SectionGet("proxy", "poseidon_search_url")
simgo.HandleFunc("/service/proxy/mdsearch", p.MdsearchAction, p).Methods("POST")
p.Sqp = NewSearchRequestParams()
return nil
}
func (p *Proxy) Uninitialize() error {
return nil
}
/**
* multi day search
* @param {[type]} this *SearchController) MdsearchAction( [description]
* @return {[type]} [description]
*/
func (p *Proxy) MdsearchAction(w http.ResponseWriter, r *http.Request) {
days, err := p.GetDays(r)
if err != nil {
panic(err)
}
tasknum := len(days)
log.Println("tasknum:", tasknum, days)
log.Println(days)
//init result channel container
c := make(chan string, tasknum)
for _, day := range days {
if day == "" {
continue
}
go p.send(day, c)
}
//recieve result
//var response_num string
buf := bytes.NewBuffer([]byte("["))
for i := 0; i < tasknum; i++ {
chanr := <-c
buf.WriteString(chanr)
if i != (tasknum - 1) {
buf.WriteString(",")
}
}
buf.WriteString("]")
w.Write(buf.Bytes())
}
/**
* send request put data into channel
* @param {[type]} this *SearchController) send(day string, c chan string [description]
* @return {[type]} [description]
*/
func (p *Proxy) send(day string, c chan string) {
defer func() {
if err := recover(); err != nil {
c <- "request timeout"
}
}()
b, _ := p.GetPostBody(day)
body := bytes.NewBuffer(b)
req, err := http.NewRequest("POST", p.poseidon_search_url, body)
log.Println("send url ", p.poseidon_search_url)
if err != nil {
panic(err)
}
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
re, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
c <- string(re)
}
func (p *Proxy) getparams(r *http.Request) (*SearchRequestParams, error) {
jsonenc := json.NewDecoder(r.Body)
searchParams := make(map[string]interface{}, 1000)
err := jsonenc.Decode(&searchParams)
if err != nil {
return nil, err
}
query, ok := searchParams["query"].(map[string]interface{})
if !ok {
return nil, ErrConvert
}
p.Sqp.Page_size = int32(query["page_size"].(float64))
p.Sqp.Page_number = int32(query["page_number"].(float64))
p.Sqp.Business = query["business"].(string)
p.Sqp.Keywords = query["keywords"].(map[string]interface{})
p.Sqp.Options = query["options"].(map[string]interface{})
//Sqp.Filters = query["filters"].(map[string]interface{})
if query["day"] != nil {
p.Sqp.Day = query["day"].(string)
}
if query["days"] != nil {
p.Sqp.Days = query["days"].([]interface{})
}
return p.Sqp, nil
}
func (p *Proxy) GetDays(r *http.Request) ([]string, error) {
params, err := p.getparams(r)
if err != nil {
return nil, err
}
//初始化新容器,断离params大对象
days := make([]string, len(params.Days))
for i, day := range params.Days {
if newday, ok := day.(string); ok {
days[i] = newday
}
}
return days, nil
}
func (p *Proxy) GetPostBody(day string) ([]byte, error) {
mu.Lock()
defer mu.Unlock()
p.Sqp.Day = day
query := NewQuery()
query.Query = p.Sqp
body, err := json.Marshal(query)
if err != nil {
return nil, err
}
return body, nil
}
|
package admin
import (
"firstProject/app/http/result"
"fmt"
"math/rand"
"os"
"time"
"github.com/gin-gonic/gin"
)
func UploadImg(c *gin.Context) {
returnData := result.NewResult(c)
// 获取上传文件,返回的是multipart.FileHeader对象,代表一个文件,里面包含了文件名之类的详细信息
// upload是表单字段名字
file, _ := c.FormFile("upload")
// 打印上传的文件名
fmt.Println(file.Filename)
timeObj := time.Now()
var str = timeObj.Format("2006-01-02")
imgPath := GetRandomString(10)
os.Mkdir("storage/"+str, 0666)
imgUrl := "storage/" + str + "/" + imgPath + ".jpg"
// 将上传的文件,保存到./data/1111.jpg 文件中
c.SaveUploadedFile(file, imgUrl)
var data struct {
Url string `json:"url"`
Uploaded bool `json:"uploaded"`
}
data.Url = imgUrl
data.Uploaded = true
returnData.Success(data)
}
func GetRandomString(l int) string {
str := "0123456789abcdefghijklmnopqrstuvwxyz"
bytes := []byte(str)
result := []byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < l; i++ {
result = append(result, bytes[r.Intn(len(bytes))])
}
return string(result)
}
|
package mwords // import "cpl.li/go/cryptor/internal/crypt/mwords"
|
package fifth
import (
"fmt"
"testing"
)
func TestIfElseThen(t *testing.T) {
i := NewInterpreter()
var tests = []struct {
input string
want string
}{
{"0 hello .s", "[1 2]"},
{"1 hello .s", "[2]"},
{"0 foo .s", "[1 2 2]"},
{"1 foo .s", "[2]"},
{"0 hoge .s", "[1 2]"},
{"1 hoge .s", "[3 4]"},
}
i.SetString(`
: hello if 1 then 2 ;
: foo if 0 hello then 1 hello ;
: hoge if 1 2 else 3 4 then ;
see hello
see foo
see hoge`)
if err := i.Run(); err != nil {
t.Error(err.Error())
}
for _, test := range tests {
i.DS.Clear()
i.SetString(test.input)
fmt.Println(test.input)
if err := i.Run(); err != nil {
t.Error(err.Error())
}
// string comparison
if i.DS.String() != test.want {
t.Errorf("%q => %v", test.input, i.DS.data)
}
}
}
|
package aclient
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"time"
"github.com/henglory/Demo_Golang_v0.0.1/spec"
)
type AClient struct {
url string
loggingFn func(info interface{}) error
client interface {
Do(req *http.Request) (*http.Response, error)
}
}
func New(url string, timeout time.Duration, loggingFn func(info interface{}) error) *AClient {
client := &http.Client{
Timeout: timeout,
}
c := &AClient{
url: url,
loggingFn: loggingFn,
client: client,
}
return c
}
func (c *AClient) Request(req spec.AReq) (spec.ARes, error) {
var res spec.ARes
b, err := json.Marshal(req)
if err != nil {
return res, err
}
body := bytes.NewReader(b)
hreq, err := http.NewRequest("POST", c.url, body)
if err != nil {
return res, err
}
defer hreq.Body.Close()
hreq.Header.Set("Content-Type", "application/json;charset=UTF-8")
c.loggingFn(spec.LogAReq{
LogTime: spec.JSONTime(time.Now()),
Info: "StartCallA",
Req: string(b),
})
st := time.Now()
hresp, err := c.client.Do(hreq)
if err != nil {
return res, err
}
resp, err := ioutil.ReadAll(hresp.Body)
c.loggingFn(spec.LogARes{
LogTime: spec.JSONTime(time.Now()),
Info: "EndCallA",
Res: string(resp),
Overhead: time.Since(st).Nanoseconds() / 1000,
})
if err != nil {
return res, err
}
defer hresp.Body.Close()
err = json.Unmarshal(resp, &res)
return res, err
}
|
package utils
import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/core/types"
)
// DecodeTransactionInput : contractName, encodeData
func DecodeTransactionInput(contractName string, encodeData string) (bool, string) {
status := true
methods := make(map[string]abi.Method)
raw, err := ioutil.ReadFile(fmt.Sprintf("./ethereum/contract/%s.json", contractName))
if err != nil {
return false, err.Error()
}
var buildData map[string]interface{}
json.Unmarshal(raw, &buildData)
abiDef, _ := json.Marshal(buildData["abi"])
abiInst, _ := abi.JSON(strings.NewReader(string(abiDef[:])))
for _, method := range abiInst.Methods {
methods[fmt.Sprintf("0x%s", hex.EncodeToString(method.Id()))] = method
}
sign := encodeData[:10]
data, _ := hex.DecodeString(encodeData[10:])
method, hasMethod := methods[sign]
if !hasMethod {
return false, "invalid hash"
}
payloadSize := len(method.Inputs.NonIndexed()) * 32
if len(data) < payloadSize {
status = false
dataSize := len(data)
for i := 0; i < payloadSize-dataSize; i++ {
data = append(data, 0)
}
}
values, unpackErr := method.Inputs.UnpackValues(data)
result := map[string]interface{}{}
if unpackErr == nil {
inputs := map[string]interface{}{}
for i, input := range method.Inputs {
value := values[i]
if strings.HasPrefix(fmt.Sprint(input.Type), "bytes") {
value = reflect.ValueOf(value)
valueStr := fmt.Sprintf("%s", value)
valueStr = strings.TrimRight(valueStr, "\x00")
value = valueStr
}
inputs[input.Name] = value
}
result["Inputs"] = inputs
} else {
status = false
}
resultJSON, _ := json.Marshal(result)
return status, string(resultJSON[:])
}
// DecodeTransactionLog :
func DecodeTransactionLog(contractName string, log *types.Log) (bool, string) {
status := true
events := make(map[string]abi.Event)
raw, err := ioutil.ReadFile(fmt.Sprintf("./ethereum/contract/%s.json", contractName))
if err != nil {
return false, err.Error()
}
var buildData map[string]interface{}
json.Unmarshal(raw, &buildData)
abiDef, _ := json.Marshal(buildData["abi"])
abiInst, _ := abi.JSON(strings.NewReader(string(abiDef[:])))
for _, event := range abiInst.Events {
events[fmt.Sprint(event.Id().Hex())] = event
}
var event abi.Event
var hasEvent bool
for _, topic := range log.Topics {
event, hasEvent = events[topic.Hex()]
if hasEvent {
break
}
}
if !hasEvent {
return false, "invalid hash"
}
values, unpackErr := event.Inputs.UnpackValues(log.Data)
result := map[string]interface{}{}
result["EventName"] = event.Name
if len(event.Inputs) != len(values) {
return false, ""
}
if unpackErr == nil {
inputs := map[string]interface{}{}
for i, input := range event.Inputs {
value := values[i]
if strings.HasPrefix(fmt.Sprint(input.Type), "bytes") {
value = reflect.ValueOf(value)
valueStr := fmt.Sprintf("%s", value)
valueStr = strings.TrimRight(valueStr, "\x00")
value = valueStr
}
inputs[input.Name] = value
}
result["Inputs"] = inputs
} else {
status = false
}
resultJSON, _ := json.Marshal(result)
return status, string(resultJSON[:])
}
|
package wallet
import (
"context"
"sync"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/Secured-Finance/dione/sigs"
_ "github.com/Secured-Finance/dione/sigs/ed25519" // enable ed25519 signatures
"github.com/Secured-Finance/dione/types"
"github.com/filecoin-project/go-address"
"github.com/sirupsen/logrus"
"golang.org/x/xerrors"
)
const (
KNamePrefix = "wallet-"
KDefault = "default"
)
type LocalWallet struct {
keys map[peer.ID]*Key
keystore types.KeyStore
lk sync.Mutex
}
type Default interface {
GetDefault() (address.Address, error)
}
func NewWallet(keystore types.KeyStore) (*LocalWallet, error) {
w := &LocalWallet{
keys: make(map[peer.ID]*Key),
keystore: keystore,
}
return w, nil
}
func KeyWallet(keys ...*Key) *LocalWallet {
m := make(map[peer.ID]*Key)
for _, key := range keys {
m[key.Address] = key
}
return &LocalWallet{
keys: m,
}
}
func (w *LocalWallet) Sign(addr peer.ID, msg []byte) (*types.Signature, error) {
ki, err := w.findKey(addr)
if err != nil {
return nil, err
}
if ki == nil {
return nil, xerrors.Errorf("failed to find private key of %w: %w", addr.String(), types.ErrKeyInfoNotFound)
}
return sigs.Sign(ActSigType(ki.Type), ki.PrivateKey, msg)
}
func (w *LocalWallet) findKey(addr peer.ID) (*Key, error) {
w.lk.Lock()
defer w.lk.Unlock()
k, ok := w.keys[addr]
if ok {
return k, nil
}
if w.keystore == nil {
logrus.Warn("findKey didn't find the key in in-memory wallet")
return nil, nil
}
ki, err := w.tryFind(addr)
if err != nil {
if xerrors.Is(err, types.ErrKeyInfoNotFound) {
return nil, nil
}
return nil, xerrors.Errorf("getting from keystore: %w", err)
}
k, err = NewKey(ki)
if err != nil {
return nil, xerrors.Errorf("decoding from keystore: %w", err)
}
w.keys[k.Address] = k
return k, nil
}
func (w *LocalWallet) tryFind(addr peer.ID) (types.KeyInfo, error) {
ki, err := w.keystore.Get(KNamePrefix + addr.String())
if err == nil {
return ki, err
}
if !xerrors.Is(err, types.ErrKeyInfoNotFound) {
return types.KeyInfo{}, err
}
// We got an ErrKeyInfoNotFound error
// Try again, this time with the testnet prefix
tAddress, err := swapMainnetForTestnetPrefix(addr.String())
if err != nil {
return types.KeyInfo{}, err
}
logrus.Info("tAddress: ", tAddress)
ki, err = w.keystore.Get(KNamePrefix + tAddress)
if err != nil {
return types.KeyInfo{}, err
}
logrus.Info("ki from tryFind: ", ki)
// We found it with the testnet prefix
// Add this KeyInfo with the mainnet prefix address string
err = w.keystore.Put(KNamePrefix+addr.String(), ki)
if err != nil {
return types.KeyInfo{}, err
}
return ki, nil
}
func (w *LocalWallet) GetDefault() (peer.ID, error) {
w.lk.Lock()
defer w.lk.Unlock()
ki, err := w.keystore.Get(KDefault)
if err != nil {
return "", xerrors.Errorf("failed to get default key: %w", err)
}
k, err := NewKey(ki)
if err != nil {
return "", xerrors.Errorf("failed to read default key from keystore: %w", err)
}
return k.Address, nil
}
func (w *LocalWallet) NewKey(typ types.KeyType) (peer.ID, error) {
w.lk.Lock()
defer w.lk.Unlock()
k, err := GenerateKey(typ)
if err != nil {
return "", err
}
if err := w.keystore.Put(KNamePrefix+k.Address.String(), k.KeyInfo); err != nil {
return "", xerrors.Errorf("saving to keystore: %w", err)
}
w.keys[k.Address] = k
_, err = w.keystore.Get(KDefault)
if err != nil {
if !xerrors.Is(err, types.ErrKeyInfoNotFound) {
return "", err
}
if err := w.keystore.Put(KDefault, k.KeyInfo); err != nil {
return "", xerrors.Errorf("failed to set new key as default: %w", err)
}
}
return k.Address, nil
}
func (w *LocalWallet) WalletHas(ctx context.Context, addr peer.ID) (bool, error) {
k, err := w.findKey(addr)
if err != nil {
return false, err
}
return k != nil, nil
}
func swapMainnetForTestnetPrefix(addr string) (string, error) {
aChars := []rune(addr)
prefixRunes := []rune(address.TestnetPrefix)
if len(prefixRunes) != 1 {
return "", xerrors.Errorf("unexpected prefix length: %d", len(prefixRunes))
}
aChars[0] = prefixRunes[0]
return string(aChars), nil
}
|
// Copyright 2017 Vckai Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package api
import (
"github.com/vckai/novel/app/models"
"github.com/vckai/novel/app/services"
)
type HomeController struct {
BaseController
}
// 首页
func (this *HomeController) Index() {
// 获取今日推荐
data := services.NovelService.GetTodayRecs(9, 0)
this.OutJson(0, "", data)
}
// 用户反馈
func (this *HomeController) Feedback() {
// post 数据提交
if this.IsAjax() {
feed := models.NewFeedback()
// 入库参数
feed.Content = this.GetString("content")
feed.Contact = this.GetString("contact")
feed.Ip = this.Ctx.Input.IP()
err := services.FeedbackService.Save(feed)
if err != nil {
this.OutJson(1001, "反馈失败:"+err.Error())
}
this.OutJson(0, "反馈成功")
return
}
this.Data["Title"] = "建议反馈"
this.View("home/feedback.tpl")
}
|
package friend
import (
"errors"
"fmt"
)
// Friend holds the properties for tracking my relationships.
type Friend struct {
name string
age int
ageOffset int
liesAboutAge bool
}
// NewFriend initializes a returns a new friend
func NewFriend(name string, age int) *Friend {
return &Friend{
name: name,
age: age,
}
}
// SetLiesAboutAge helps me remeber if a person wants to be acknowledged with a different age than they actually are.
func (f *Friend) SetLiesAboutAge(offset int) {
f.liesAboutAge = true
f.ageOffset = offset
}
// GreetFriend prints a greeting for name and age of friend
func (f *Friend) GreetFriend() string {
return fmt.Sprintf("Hello %s, who is %d, years old.\n", f.name, f.age)
}
// CelebrateBirthday increments the friends age by one
func (f *Friend) CelebrateBirthday() {
newAge, err := addAge(f.age, 1)
if err != nil {
panic(err)
}
f.age = newAge
}
// GetAge gets friends "age" with any offset they lie about
func (f *Friend) GetAge() int {
if !f.liesAboutAge {
return f.age
}
fakeAge, _ := addAge(f.age, f.ageOffset)
return fakeAge
}
// GetName gets the friend's name
func (f *Friend) GetName() string {
return f.name
}
// addAge checks current age and increment and returns new age, will return error if new age is too big or too small.
func addAge(currentAge int, increment int) (int, error) {
newAge := currentAge + increment
if newAge > 110 || newAge < 0 {
return 0, errors.New("invalid age")
}
return newAge, nil
}
|
// Variables
package main
import "fmt"
func main() {
var a string = "empezar"
fmt.Println("a = ", a)
var b, c, h int = 4, 5, 0
fmt.Println("b, c, h", b, c, h)
var i = 3
fmt.Println("i = ", i)
var d = true
fmt.Println("d = ", d)
var e int
fmt.Println("e = ", e)
var j bool
fmt.Println("j = ", j)
f:= "short"
fmt.Println("f = ", f)
k:= 4 + 6
fmt.Println("k = ", k)
}
|
package main
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
)
type InForMation_All_b42661db struct {
Id int
Url string
Title string
Author string
Source string
Release_datetime string
Content string
Media_type string
Original_title string
Editor string
Reporter string
Contents string
Reading_times string
Abstract_data string
Media string
Media_channel string
Location string
Location_path string
Collection_tool string
User string
Site_url string
Leaf_id string
Task_id string
Task_name string
Get_time string
Keyword string
Pub_time string
//Project_id string
}
func (InForMation_All_b42661db) TableName() string {
return "information_all_b42661db"
}
type BaiDuNews struct {
Id string
Url string
Title string
Author string
Source string
Release_datetime string
Content string
Media_type string
Original_title string
Editor string
Reporter string
Contents string
Reading_times string
Abstract_data string
Media string
Media_channel string
Location string
Locationpath string
Collection_tool string
User string
Site_url string
Leafid string
Taskid string
Taskname string
Get_time string
Keyword string
//Pub_time string
//Project_id string
}
func (BaiDuNews) TableName() string {
return "baidunews"
//return "baidunews_jiemin"
}
var dbInformationAll *gorm.DB
var dbBaiDuNews *gorm.DB
func main() {
/*
db_user := "jiemin"
db_pass := "jiemin2017"
db_host := "127.0.0.1"
db_port := 3306
db_name := "jiemin"
*/
db_user := "funbird"
db_pass := "funbird2017"
db_host := "10.10.206.205"
db_port := 3306
db_name := "funbird_analysis"
dbConnectBaseStr := "%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local"
dbConnect := fmt.Sprintf(dbConnectBaseStr, db_user, db_pass, db_host, db_port, db_name)
fmt.Print(dbConnect)
dbInformationAll, err := gorm.Open("mysql", dbConnect)
if err != nil {
fmt.Println(err)
//fmt.Printf("Connection to MySQL database is Name:%s Failed!\n", dbInformationAll)
}
defer dbInformationAll.Close()
dbInformationAll.LogMode(true)
/*
db_user_b := "jiemin"
db_pass_b := "jiemin2017"
db_host_b := "127.0.0.1"
db_port_b := 3306
db_name_b := "jiemin"
*/
db_user_b := "iscloud"
db_pass_b := "iscloud"
db_host_b := "192.168.95.140"
db_port_b := 3306
db_name_b := "network_data"
dbConnectBaseStr_b := "%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=True&loc=Local"
dbConnect_b := fmt.Sprintf(dbConnectBaseStr_b, db_user_b, db_pass_b, db_host_b, db_port_b, db_name_b)
fmt.Print(dbConnect_b)
dbBaiDuNews, err := gorm.Open("mysql", dbConnect_b)
if err != nil {
fmt.Println(err)
//fmt.Printf("Connection to MySQL database is Name:%s Failed!\n", dbBaiDuNews)
}
defer dbBaiDuNews.Close()
dbBaiDuNews.LogMode(true)
inforall := []*InForMation_All_b42661db{}
//baiDNews := []*BaiDuNews{}
dbInformationAll.Table("information_all_b42661db").Where("pub_time is not null or pub_time !=''").Find(&inforall)
//dbBanknews.Table("baidunews").Count(&baiDNews)
for _, v := range inforall {
//fmt.Print(v)
baiDNews := BaiDuNews{}
baiDNews.Id = v.Leaf_id
baiDNews.Url = v.Url
baiDNews.Title = v.Title
baiDNews.Author = v.Author
baiDNews.Source = v.Source
baiDNews.Release_datetime = v.Pub_time
baiDNews.Content = v.Content
baiDNews.Media_type = v.Media_type
baiDNews.Original_title = v.Original_title
baiDNews.Editor = v.Editor
baiDNews.Reporter = v.Reporter
baiDNews.Contents = v.Contents
baiDNews.Reading_times = v.Reading_times
baiDNews.Abstract_data = v.Abstract_data
baiDNews.Media = v.Media
baiDNews.Media_channel = v.Media_channel
baiDNews.Location = v.Location
baiDNews.Locationpath = v.Location_path
baiDNews.Collection_tool = v.Collection_tool
baiDNews.User = v.User
baiDNews.Site_url = v.Site_url
baiDNews.Leafid = v.Leaf_id
baiDNews.Taskid = v.Task_id
baiDNews.Taskname = v.Task_name
baiDNews.Get_time = v.Get_time
baiDNews.Keyword = v.Keyword
dbBaiDuNews.Table("baidunews").Save(&baiDNews)
//dbBaiDuNews.Table("baidunews_jiemin").Save(&baiDNews)
}
}
|
package svcs
import (
"database/sql"
"errors"
"week02/dao"
"week02/dtos"
)
// GetStudentByID 根据id获取学生数据
func GetStudentByID(id int) (*dtos.Student, error) {
data, err := dao.QueryByID(id)
if errors.Is(err, sql.ErrNoRows) {
return &dtos.Student{ID: "111", Name: "check", Age: 28}, err
}
return data.(*dtos.Student), err
}
|
// Copyright 2020 The Reed Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package miner
import (
"bytes"
"fmt"
bc "github.com/reed/blockchain"
"github.com/reed/blockchain/merkle"
"github.com/reed/consensus/pow"
"github.com/reed/errors"
"github.com/reed/log"
"github.com/reed/types"
"github.com/reed/wallet"
"strconv"
"sync"
)
var (
startErr = errors.New("miner failed to start")
workErr = errors.New("miner failed to work")
)
const (
maxTries = ^uint64(0)
)
type Miner struct {
sync.Mutex
wallet *wallet.Wallet
chain *bc.Chain
working bool
blockReceptionCh chan<- *types.RecvWrap
breakWorkCh <-chan struct{}
stopCh chan struct{}
}
func NewMiner(c *bc.Chain, w *wallet.Wallet, rCh chan<- *types.RecvWrap, bCh <-chan struct{}) *Miner {
return &Miner{
wallet: w,
chain: c,
working: false,
blockReceptionCh: rCh,
breakWorkCh: bCh,
}
}
func (m *Miner) Start() error {
m.Lock()
defer m.Unlock()
if m.working {
return errors.Wrap(startErr, "Miner has started.")
}
m.working = true
go m.work()
fmt.Println("★★Miner Server Start")
return nil
}
func (m *Miner) Stop() {
m.Lock()
defer m.Unlock()
m.working = false
fmt.Println("★★Miner Server Stop")
}
func (m *Miner) fetchBlock() (*types.Block, error) {
highest := m.chain.BlockManager.HighestBlock()
block, err := m.buildBlock(highest)
if err != nil {
log.Logger.Error(workErr, err)
return nil, err
}
return block, nil
}
func (m *Miner) work() {
block, err := m.fetchBlock()
if err != nil {
log.Logger.Fatal(workErr, err)
return
}
for {
select {
case <-m.stopCh:
log.Logger.Info("mining work is stop")
default:
}
repack := m.generateBlock(block)
if repack {
block, err = m.fetchBlock()
if err != nil {
log.Logger.Fatal(workErr, err)
break
}
log.Logger.Info("receive from remote or reorganize chan,repack block complete.")
} else {
block, err = m.buildBlock(block)
if err != nil {
log.Logger.Error(workErr, err)
break
}
log.Logger.Info("mined a block,rebuild a new block complete.")
}
}
}
func (m *Miner) generateBlock(block *types.Block) (repack bool) {
extraNonce := uint64(0)
loop:
for {
select {
case <-m.breakWorkCh:
log.Logger.Info("Received a break single,stop mining.")
repack = true
break loop
default:
// just for no block,do nothing
}
if pow.CheckProofOfWork(block.BigNumber, block.GetHash()) {
m.blockReceptionCh <- &types.RecvWrap{SendBreakWork: false, Block: block}
break loop
} else {
if block.Nonce == maxTries {
// reset nonce
block.Nonce = 0
// change coinbase tx's scriptSig and continue
extraNonce++
m.incrementExtraNonce(extraNonce, block)
} else {
block.Nonce++
}
}
}
return
}
func (m *Miner) buildBlock(pre *types.Block) (*types.Block, error) {
var newBlock *types.Block
if pre == nil {
newBlock = types.GetGenesisBlock()
} else {
newBlock = &types.Block{
BlockHeader: *pre.Copy(),
Transactions: []*types.Tx{},
}
}
newBlock.BigNumber = pow.DifficultyLimit()
txs := m.chain.Txpool.GetTxs()
cbTx, err := types.NewCoinbaseTx(newBlock.Height, m.wallet.Pub, bc.CalcCoinbaseAmt(newBlock.Height))
if err != nil {
return nil, err
}
if len(txs) == 0 {
txs = append(txs, cbTx)
} else {
txs = append(txs, nil)
copy(txs[1:], txs[:len(txs)-1])
txs[0] = cbTx
}
newBlock.Transactions = txs
// recalculate difficulty
newBlock.BigNumber = pow.GetDifficulty(newBlock, m.chain.BlockManager.GetAncestor)
// set tx merkle root
newBlock.MerkleRootHash = merkle.ComputeMerkleRoot(newBlock.Transactions)
return newBlock, nil
}
// when the nonce reaches the maximum value,change the scriptSig value of coinbase transaction
// and reset:nonce=0
func (m *Miner) incrementExtraNonce(extraNonce uint64, b *types.Block) {
b.Transactions[0].TxInput[0].ScriptSig = bytes.Join([][]byte{b.Transactions[0].TxInput[0].ScriptSig, []byte(strconv.FormatUint(extraNonce, 10))}, []byte{})
// recompute merkle root
b.MerkleRootHash = merkle.ComputeMerkleRoot(b.Transactions)
}
|
// Memcache server, which uses YBC library as caching backend.
//
// Thanks to YBC, the server has the following features missing
// in the original memcached:
// * Cache content survives server restarts if it is backed by files.
// * Cache size may exceed available RAM size by multiple orders of magnitude.
// The server should remain fast until the total size of frequently accessed
// items exceeds RAM size. It may remain relatively fast even if frequently
// accessed items don't fit RAM if cache files are located on fast SSDs.
// * The maximum value size is limited by 2Gb.
// * There is no 250 byte limit on key size.
// * Support for 'dogpile effect' handling - see Client.GetDe()
// at github.com/valyala/ybc/libs/go/memcache for details.
// * Support for 'conditional get' command - see Client.Cget()
// at github.com/valyala/ybc/libs/go/memcache for details.
package main
import (
"flag"
"github.com/valyala/ybc/bindings/go/ybc"
"github.com/valyala/ybc/libs/go/memcache"
"github.com/vharitonsky/iniflags"
"log"
"runtime"
"strings"
"time"
)
var (
defaultMaxProcs = runtime.NumCPU()
)
var (
cacheFilesPath = flag.String("cacheFilesPath", "",
"Path to cache file. Leave empty for anonymous non-persistent cache.\n"+
"Enumerate multiple files delimited by comma for creating a cluster of caches.\n"+
"This can increase performance only if frequently accessed items don't fit RAM\n"+
"and each cache file is located on a distinct physical storage.")
cacheSize = flag.Uint64("cacheSize", 64, "Total cache capacity in Megabytes")
deHashtableSize = flag.Int("deHashtableSize", 16, "Dogpile effect hashtable size")
goMaxProcs = flag.Int("goMaxProcs", defaultMaxProcs, "Maximum number of simultaneous Go threads")
hotDataSize = flag.Uint64("hotDataSize", 0, "Hot data size in bytes. 0 disables hot data optimization")
hotItemsCount = flag.Uint64("hotItemsCount", 0, "The number of hot items. 0 disables hot items optimization")
listenAddr = flag.String("listenAddr", ":11211", "TCP address the server will listen to")
maxItemsCount = flag.Uint64("maxItemsCount", 1000*1000, "Maximum number of items the server can cache")
syncInterval = flag.Duration("syncInterval", time.Second*10, "Interval for data syncing. 0 disables data syncing")
osReadBufferSize = flag.Int("osReadBufferSize", 224*1024, "Buffer size in bytes for incoming requests in OS")
osWriteBufferSize = flag.Int("osWriteBufferSize", 224*1024, "Buffer size in bytes for outgoing responses in OS")
readBufferSize = flag.Int("readBufferSize", 56*1024, "Buffer size in bytes for incoming requests")
writeBufferSize = flag.Int("writeBufferSize", 56*1024, "Buffer size in bytes for outgoing responses")
)
func main() {
iniflags.Parse()
runtime.GOMAXPROCS(*goMaxProcs)
syncInterval_ := *syncInterval
if syncInterval_ <= 0 {
syncInterval_ = ybc.ConfigDisableSync
}
config := ybc.Config{
MaxItemsCount: ybc.SizeT(*maxItemsCount),
DataFileSize: ybc.SizeT(*cacheSize) * ybc.SizeT(1024*1024),
HotItemsCount: ybc.SizeT(*hotItemsCount),
HotDataSize: ybc.SizeT(*hotDataSize),
DeHashtableSize: *deHashtableSize,
SyncInterval: syncInterval_,
}
var cache ybc.Cacher
var err error
cacheFilesPath_ := strings.Split(*cacheFilesPath, ",")
cacheFilesCount := len(cacheFilesPath_)
log.Printf("Opening data files. This can take a while for the first time if files are big\n")
if cacheFilesCount < 2 {
if cacheFilesPath_[0] != "" {
config.DataFile = cacheFilesPath_[0] + ".go-memcached.data"
config.IndexFile = cacheFilesPath_[0] + ".go-memcached.index"
}
cache, err = config.OpenCache(true)
if err != nil {
log.Fatalf("Cannot open cache: [%s]", err)
}
} else if cacheFilesCount > 1 {
config.MaxItemsCount /= ybc.SizeT(cacheFilesCount)
config.DataFileSize /= ybc.SizeT(cacheFilesCount)
var configs ybc.ClusterConfig
configs = make([]*ybc.Config, cacheFilesCount)
for i := 0; i < cacheFilesCount; i++ {
cfg := config
cfg.DataFile = cacheFilesPath_[i] + ".go-memcached.data"
cfg.IndexFile = cacheFilesPath_[i] + ".go-memcached.index"
configs[i] = &cfg
}
cache, err = configs.OpenCluster(true)
if err != nil {
log.Fatalf("Cannot open cache cluster: [%s]", err)
}
}
defer cache.Close()
log.Printf("Data files have been opened\n")
s := memcache.Server{
Cache: cache,
ListenAddr: *listenAddr,
ReadBufferSize: *readBufferSize,
WriteBufferSize: *writeBufferSize,
OSReadBufferSize: *osReadBufferSize,
OSWriteBufferSize: *osWriteBufferSize,
}
log.Printf("Starting the server")
if err := s.Serve(); err != nil {
log.Fatalf("Cannot serve traffic: [%s]", err)
}
}
|
package iam
import (
"app-auth/db"
"app-auth/types"
"context"
"github.com/mongodb/mongo-go-driver/bson"
"log"
"strings"
)
type Scope struct {
Scopes []string `json:"scopes"`
App string `json:"app"`
scopes []types.Scopes
permissions []string
}
// toString() representation of the
func (scope Scope) String() string {
return "<Scope " + strings.Join(scope.Scopes, " ") + " />"
}
// ToApp() returns the name of
func (scope Scope) GetApp() string {
return strings.ToUpper(scope.App)
}
func (scope Scope) getScopesFromDB() []types.Scopes {
// ["EDITOR", "READER"]
scopesFilter := bson.D{
{"name", bson.D{{"$in", scope.Scopes}}},
{"$or", bson.A{
bson.D{{"app", scope.App}},
bson.D{{"app", "id.scaratec.com"}},
}},
}
cur, err := db.ScopesCollection.Find(context.TODO(), scopesFilter); if err != nil { log.Println(err) }
var scopes []types.Scopes
defer cur.Close(context.TODO())
// loop through all the
for cur.Next(context.Background()) {
var elem types.Scopes
err := cur.Decode(&elem)
if err != nil { log.Print(err) }
scopes = append(scopes, elem)
}
// try avoiding multiple database calls by setting
// this value as a struct variable
scope.scopes = scopes
return scopes
}
func (scope Scope) getPermissionList() []string {
var scopes = scope.getScopesFromDB()
var permissions []string
for _, scp := range scopes {
permissions = append(permissions, scp.Permissions...)
}
scope.permissions = permissions
return permissions
}
func (scope Scope) permission() Permission {
// this function call gets all the required scopes from the database
var _ = scope.getPermissionList()
return Permission{
Scopes: scope.Scopes,
App: scope.App,
Permissions: scope.permissions,
}
}
// when given a specific, return if the Scope has this specific permission
func (scope Scope) HasPermission(permit string) bool {
return scope.permission().HasPermission(permit)
}
// this util function is for finding user scopes
type FindUserScopes struct {
Id string `json:"id"`
App string `json:"app"`
Email string `json:"email"`
OrganisationId string `json:"organisation_id"`
TeamId string `json:"team_id"`
}
// filter for returning scopes at the organisation level
func (findUserScopes FindUserScopes) GetOrganisationsScopesLevelFilter() bson.D {
return bson.D {
{"organisationid", findUserScopes.OrganisationId},
{"id", findUserScopes.Id},
{"useremail", findUserScopes.Email},
{"app", findUserScopes.App},
}
}
// filter for returing scopes at the team level
func (findUserScopes FindUserScopes) GetTeamsScopesLevelFilter() bson.D {
return bson.D {
{"organisationid", findUserScopes.OrganisationId},
{"teamid", findUserScopes.TeamId},
{"id", findUserScopes.Id},
{"useremail", findUserScopes.Email},
{"app", findUserScopes.App},
}
}
// get app level scopes. for setting and updating IAMS
// filter for returing scopes at the team level
func (findUserScopes FindUserScopes) GetAppScopesLevelFilter() bson.D {
return bson.D {
{"id", findUserScopes.Id},
{"useremail", findUserScopes.Email},
{"app", findUserScopes.App},
}
}
func (findUserScopes FindUserScopes) FindAndReturnUserScopesBasedOnFilter(filter bson.D) []string {
// get a list of user scopes based filter
cur, err := db.UserScopesCollection.Find(context.TODO(), filter); if err != nil {
log.Println(err)
return []string{}
}
defer cur.Close(context.TODO())
var userMemberScopes []string
for cur.Next(context.TODO()) {
var userMemberScope types.UserMemberScope
err := cur.Decode(&userMemberScope)
if err != nil { log.Print(err) }
// append the scopes of the user to the upper string
userMemberScopes = append(userMemberScopes, userMemberScope.Scopes...)
}
return userMemberScopes
}
|
package chatbots
import (
"context"
"fmt"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
)
func cmdStart(c ChatBot, message *tgbotapi.Message) (err error) {
settings, err := c.storage.GetSettings(context.Background())
if err != nil {
_, err = c.botClient.Send(tgbotapi.NewMessage(message.Chat.ID, "请先使用 /settings 修改设置"))
return
}
_, err = c.botClient.Send(tgbotapi.NewMessage(message.Chat.ID, settings.BotInfo))
if err != nil {
return
}
_, err = c.botClient.Send(tgbotapi.NewMessage(message.Chat.ID, settings.WelcomeWords))
return
}
func cmdSettings(c ChatBot, message *tgbotapi.Message) (err error) {
if message.From.ID != c.adminID {
return
}
settings, _ := c.storage.GetSettings(context.Background())
_, err = c.botClient.Send(tgbotapi.MessageConfig{
BaseChat: tgbotapi.BaseChat{
ChatID: message.Chat.ID,
ReplyMarkup: settingsMarkup(),
},
Text: "change settings\n" + settings.String(),
})
return
}
func settingsMarkup() (replyMarkup tgbotapi.InlineKeyboardMarkup) {
changeWelcomeWordsBtn := tgbotapi.NewInlineKeyboardButtonData("change welcome words", "/change_welcome_words")
changeBotInfoBtn := tgbotapi.NewInlineKeyboardButtonData("change bot info", "/change_bot_info")
changeThanksBtn := tgbotapi.NewInlineKeyboardButtonData("change thanks words", "/change_thanks")
changeForwardToChatIDBtn := tgbotapi.NewInlineKeyboardButtonData("change forward to chat id", "/change_forward_to_chat_id")
settingsDoneBtn := tgbotapi.NewInlineKeyboardButtonData("done", "/change_done")
return tgbotapi.NewInlineKeyboardMarkup(
tgbotapi.NewInlineKeyboardRow(changeWelcomeWordsBtn),
tgbotapi.NewInlineKeyboardRow(changeBotInfoBtn),
tgbotapi.NewInlineKeyboardRow(changeThanksBtn),
tgbotapi.NewInlineKeyboardRow(changeForwardToChatIDBtn),
tgbotapi.NewInlineKeyboardRow(settingsDoneBtn),
)
}
func cmdGetChatID(c ChatBot, message *tgbotapi.Message) (err error) {
if message.From.ID != c.adminID {
return
}
_, err = c.botClient.Send(tgbotapi.MessageConfig{
BaseChat: tgbotapi.BaseChat{
ChatID: message.Chat.ID,
},
Text: fmt.Sprintf("%d", message.Chat.ID),
})
return
}
|
package sign
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"unicode"
"golang.org/x/crypto/openpgp"
"github.com/goreleaser/nfpm/v2"
)
// PGPSigner returns a PGP signer that creates a detached non-ASCII-armored
// signature and is compatible with rpmpack's signature API.
func PGPSigner(keyFile, passphrase string) func([]byte) ([]byte, error) {
return func(data []byte) ([]byte, error) {
key, err := readSigningKey(keyFile, passphrase)
if err != nil {
return nil, &nfpm.ErrSigningFailure{Err: err}
}
var signature bytes.Buffer
err = openpgp.DetachSign(&signature, key, bytes.NewReader(data), nil)
if err != nil {
return nil, &nfpm.ErrSigningFailure{Err: err}
}
return signature.Bytes(), nil
}
}
// PGPArmoredDetachSign creates an ASCII-armored detached signature.
func PGPArmoredDetachSign(message io.Reader, keyFile, passphrase string) ([]byte, error) {
key, err := readSigningKey(keyFile, passphrase)
if err != nil {
return nil, fmt.Errorf("armored detach sign: %w", err)
}
var signature bytes.Buffer
err = openpgp.ArmoredDetachSign(&signature, key, message, nil)
if err != nil {
return nil, fmt.Errorf("armored detach sign: %w", err)
}
return signature.Bytes(), nil
}
// PGPVerify is exported for use in tests and verifies a ASCII-armored or non-ASCII-armored
// signature using an ASCII-armored or non-ASCII-armored public key file. The signer
// identity is not explicitly checked, other that the obvious fact that the signer's key must
// be in the armoredPubKeyFile.
func PGPVerify(message io.Reader, signature []byte, armoredPubKeyFile string) error {
keyFileContent, err := ioutil.ReadFile(armoredPubKeyFile)
if err != nil {
return fmt.Errorf("reading armored public key file: %w", err)
}
var keyring openpgp.EntityList
if isASCII(keyFileContent) {
keyring, err = openpgp.ReadArmoredKeyRing(bytes.NewReader(keyFileContent))
if err != nil {
return fmt.Errorf("decoding armored public key file: %w", err)
}
} else {
keyring, err = openpgp.ReadKeyRing(bytes.NewReader(keyFileContent))
if err != nil {
return fmt.Errorf("decoding public key file: %w", err)
}
}
if isASCII(signature) {
_, err = openpgp.CheckArmoredDetachedSignature(keyring, message, bytes.NewReader(signature))
return err
}
_, err = openpgp.CheckDetachedSignature(keyring, message, bytes.NewReader(signature))
return err
}
var (
errMoreThanOneKey = errors.New("more than one signing key in keyring")
errNoKeys = errors.New("no signing key in keyring")
errNoPassword = errors.New("key is encrypted but no passphrase was provided")
)
func readSigningKey(keyFile, passphrase string) (*openpgp.Entity, error) {
fileContent, err := ioutil.ReadFile(keyFile)
if err != nil {
return nil, fmt.Errorf("reading PGP key file: %w", err)
}
var entityList openpgp.EntityList
if isASCII(fileContent) {
entityList, err = openpgp.ReadArmoredKeyRing(bytes.NewReader(fileContent))
if err != nil {
return nil, fmt.Errorf("decoding armored PGP keyring: %w", err)
}
} else {
entityList, err = openpgp.ReadKeyRing(bytes.NewReader(fileContent))
if err != nil {
return nil, fmt.Errorf("decoding PGP keyring: %w", err)
}
}
var key *openpgp.Entity
for _, candidate := range entityList {
if candidate.PrivateKey == nil {
continue
}
if !candidate.PrivateKey.CanSign() {
continue
}
if key != nil {
return nil, errMoreThanOneKey
}
key = candidate
}
if key == nil {
return nil, errNoKeys
}
if key.PrivateKey.Encrypted {
if passphrase == "" {
return nil, errNoPassword
}
err = key.PrivateKey.Decrypt([]byte(passphrase))
if err != nil {
return nil, fmt.Errorf("decrypt secret signing key: %w", err)
}
}
return key, nil
}
func isASCII(s []byte) bool {
for i := 0; i < len(s); i++ {
if s[i] > unicode.MaxASCII {
return false
}
}
return true
}
|
package build
import (
"fmt"
"log"
"math/rand"
"net/url"
"os"
"path/filepath"
"runtime"
"sync"
"time"
)
// App the current app we want to build (never changes so we can make it static)
var App = &Target{}
func init() {
var err error
bind := randomLocalBind()
// We want to build on the first request
App.rwmut = &sync.RWMutex{}
App.rebuild = true
App.Bind = bind
App.URL, err = url.Parse(fmt.Sprintf("http://%s", App.Bind))
if err != nil { // This should not happen
log.Fatal(err)
}
path, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
binName := filepath.Base(path)
if runtime.GOOS == "windows" {
App.BinaryPath = filepath.Join(os.TempDir(), binName+".exe")
} else {
App.BinaryPath = filepath.Join(os.TempDir(), binName)
}
// Monitor the filesytem an flag for rebuild on a change
go func() {
for _ = range FileChanged("./") {
App.Rebuild()
}
}()
}
func randomLocalBind() string {
max := 65536
min := 9000
random := rand.New(rand.NewSource(time.Now().Unix()))
return fmt.Sprintf("localhost:%d", random.Intn(max-min)+min)
}
|
package router
import (
"encoding/json"
"fmt"
"log"
"math/rand"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/cswank/quimby/internal/schema"
"github.com/cswank/quimby/internal/templates"
"github.com/go-chi/chi"
"github.com/gorilla/websocket"
)
// getAll shows all the gadgets
func (g *server) getAll(w http.ResponseWriter, req *http.Request) error {
rand.Seed(time.Now().UnixNano())
gadgets, err := g.gadgets.GetAll()
if err != nil {
return err
}
return render(templates.NewPage(
"Quimby",
"gadgets.ghtml",
templates.WithGadgets(gadgets),
), w, req)
}
// get shows a single gadget
func (g *server) get(w http.ResponseWriter, req *http.Request) error {
gadget, err := g.gadget(req)
if err != nil {
return err
}
gadget.Fetch()
return render(templates.NewPage(
gadget.Name,
"gadget.ghtml",
templates.WithWebsocket(fmt.Sprintf("wss://%s/gadgets/%d/websocket", g.cfg.Host, gadget.ID)),
templates.WithGadget(gadget),
templates.WithScripts([]string{"https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.9.1/underscore-min.js"}),
templates.WithLinks([]templates.Link{{Name: "method", Link: fmt.Sprintf("/gadgets/%d/method", gadget.ID)}}),
), w, req)
}
// method shows the method editor for a gadget
func (g *server) method(w http.ResponseWriter, req *http.Request) error {
gadget, err := g.gadget(req)
if err != nil {
return err
}
return render(templates.NewPage(
"Quimby",
"edit-method.ghtml",
templates.WithScripts([]string{"https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.9.1/underscore-min.js"}),
templates.WithGadget(gadget),
), w, req)
}
func (g *server) runMethod(w http.ResponseWriter, req *http.Request) error {
gadget, err := g.gadget(req)
if err != nil {
return err
}
var m schema.Method
dec := json.NewDecoder(req.Body)
if err := dec.Decode(&m); err != nil {
return err
}
msg := schema.Message{
Type: "method",
Method: m,
}
return gadget.Send(msg)
}
// connect registers with a gogadget instance and starts up
// a websocket. It pushes new messages from the
// instance to the websocket and vice versa.
func (g *server) connect(w http.ResponseWriter, req *http.Request) error {
gadget, err := g.gadget(req)
if err != nil {
return err
}
if err := g.register(gadget); err != nil {
return err
}
ws := make(chan schema.Message)
q := make(chan bool)
ch := make(chan schema.Message)
uuid := schema.UUID()
g.clients.Add(gadget.URL, uuid, ch)
upgrader := websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
CheckOrigin: func(r *http.Request) bool { return true },
}
conn, err := upgrader.Upgrade(w, req, nil)
if err != nil {
return err
}
go listen(conn, ws, q)
for {
select {
case msg := <-ws: // user is sending a command
if err := gadget.Send(msg); err != nil {
return err
}
case msg := <-ch: // gadget is sending an update to all those that care.
sendSocketMessage(conn, msg)
case <-q: // user has left the page where the websocket lived.
g.clients.Delete(gadget.URL, uuid)
return nil
}
}
}
func (g *server) register(gadget schema.Gadget) error {
_, err := gadget.Register(g.cfg.InternalAddress, g.randString())
return err
}
func (g *server) randString() string {
b := make([]rune, 32)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
// Send a message via the web socket.
func sendSocketMessage(conn *websocket.Conn, m schema.Message) {
d, _ := json.Marshal(m)
if err := conn.WriteMessage(websocket.TextMessage, d); err != nil {
log.Println("unable to write to websocket", err)
}
}
func listen(conn *websocket.Conn, ch chan<- schema.Message, q chan<- bool) {
for {
t, p, err := conn.ReadMessage()
if err != nil {
q <- true
return
}
if t == websocket.TextMessage {
var m schema.Message
if err := json.Unmarshal(p, &m); err != nil {
return
}
ch <- m
} else if t == websocket.CloseMessage || t == -1 {
q <- true
return
}
}
}
func (g *server) gadget(req *http.Request) (schema.Gadget, error) {
id, err := strconv.ParseInt(chi.URLParam(req, "id"), 10, 64)
if err != nil {
return schema.Gadget{}, err
}
return g.gadgets.Get(int(id))
}
func (g *server) static() handler {
s := http.FileServer(http.FS(templates.Static))
return func(w http.ResponseWriter, req *http.Request) error {
s.ServeHTTP(w, req)
if strings.Contains(req.URL.Path, ".css.map") {
w.Header()["content-type"] = []string{"text/css"}
}
return nil
}
}
// update is where the gadgets post their updates to the UI.
func (g server) update(w http.ResponseWriter, req *http.Request) error {
var msg schema.Message
if err := json.NewDecoder(req.Body).Decode(&msg); err != nil {
return err
}
g.hc.Update(msg)
g.clients.Update(msg)
return nil
}
// redirect -> /gadgets
func (g server) redirect(w http.ResponseWriter, req *http.Request) {
http.Redirect(w, req, "/gadgets", http.StatusSeeOther)
}
func noQueries(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
req.URL = &url.URL{Path: req.URL.Path}
h.ServeHTTP(w, req)
})
}
|
package vx
/*
#cgo CFLAGS: -mavx -mfma -std=c11
#cgo LDFLAGS: -lm
#include <immintrin.h>
void vx_add(const size_t size, const float *x, const float *y, float *z) {
__m256 *vx = (__m256 *)x;
__m256 *vy = (__m256 *)y;
__m256 *vz = (__m256 *)z;
const size_t l = size / 8;
for (size_t i = 0; i < l; ++i) {
vz[i] = _mm256_add_ps(vx[i], vy[i]);
}
}
void vx_sub(const size_t size, const float *x, const float *y, float *z) {
__m256 *vx = (__m256 *)x;
__m256 *vy = (__m256 *)y;
__m256 *vz = (__m256 *)z;
const size_t l = size / 8;
for (size_t i = 0; i < l; ++i) {
vz[i] = _mm256_sub_ps(vx[i], vy[i]);
}
}
void vx_mul(const size_t size, const float *x, const float *y, float *z) {
__m256 *vx = (__m256 *)x;
__m256 *vy = (__m256 *)y;
__m256 *vz = (__m256 *)z;
const size_t l = size / 8;
for (size_t i = 0; i < l; ++i) {
vz[i] = _mm256_mul_ps(vx[i], vy[i]);
}
}
void vx_div(const size_t size, const float *x, const float *y, float *z) {
__m256 *vx = (__m256 *)x;
__m256 *vy = (__m256 *)y;
__m256 *vz = (__m256 *)z;
const size_t l = size / 8;
for (size_t i = 0; i < l; ++i) {
vz[i] = _mm256_div_ps(vx[i], vy[i]);
}
}
float vx_dot(const size_t size, const float *x, const float *y) {
__m256 vsum = {0};
__m256 *vx = (__m256 *)x;
__m256 *vy = (__m256 *)y;
const size_t l = size / 8;
for (size_t i = 0; i < l; ++i) {
vsum = _mm256_fmadd_ps(vx[i], vy[i], vsum);
}
__attribute__((aligned(32))) float v[8];
_mm256_store_ps(v, vsum);
return v[0] + v[1] + v[2] + v[3] + v[4] + v[5] + v[6] + v[7];
}
*/
import "C"
func Add(size int, x, y, z []float32) {
size = align(size)
C.vx_add((C.size_t)(size), (*C.float)(&x[0]), (*C.float)(&y[0]), (*C.float)(&z[0]))
}
func Sub(size int, x, y, z []float32) {
size = align(size)
C.vx_sub((C.size_t)(size), (*C.float)(&x[0]), (*C.float)(&y[0]), (*C.float)(&z[0]))
}
func Mul(size int, x, y, z []float32) {
size = align(size)
C.vx_mul((C.size_t)(size), (*C.float)(&x[0]), (*C.float)(&y[0]), (*C.float)(&z[0]))
}
func Div(size int, x, y, z []float32) {
size = align(size)
C.vx_div((C.size_t)(size), (*C.float)(&x[0]), (*C.float)(&y[0]), (*C.float)(&z[0]))
}
func Dot(size int, x, y []float32) float32 {
size = align(size)
dot := C.vx_dot((C.size_t)(size), (*C.float)(&x[0]), (*C.float)(&y[0]))
return float32(dot)
}
|
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"time"
)
func copyFile(src, dst string) error {
in, err := os.Open(src)
if err != nil {
return err
}
defer in.Close()
out, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
if e := out.Close(); e != nil {
err = e
}
}()
if _, err := io.Copy(out, in); err != nil {
return err
}
if err := out.Sync(); err != nil {
return err
}
si, err := os.Stat(src)
if err != nil {
return err
}
if err := os.Chmod(dst, si.Mode()); err != nil {
return err
}
if err := os.Chtimes(dst, time.Now(), si.ModTime()); err != nil {
return err
}
return nil
}
func mirrorDir(src, dst string) error {
src = filepath.Clean(src)
dst = filepath.Clean(dst)
sstat, err := os.Stat(src)
if err != nil {
return err
}
if !sstat.IsDir() {
return fmt.Errorf("%s is not directory", src)
}
if err := os.MkdirAll(dst, sstat.Mode()); err != nil {
return err
}
dstentries, err := ioutil.ReadDir(dst)
if err != nil {
return err
}
// Cleanup the destination directory.
for _, dstentry := range dstentries {
srcPath := filepath.Join(src, dstentry.Name())
dstPath := filepath.Join(dst, dstentry.Name())
// if srcPath doesn't exist, delete it.
if _, err := os.Stat(srcPath); os.IsNotExist(err) {
log.Printf("Deleting %s\n", dstPath)
if err := os.RemoveAll(dstPath); err != nil {
return nil
}
}
}
// Copy all of the contents in the source directory to the destination directory
// if the content doesn't exists or isn't newest.
srcentries, err := ioutil.ReadDir(src)
if err != nil {
return err
}
for _, srcentry := range srcentries {
srcPath := filepath.Join(src, srcentry.Name())
dstPath := filepath.Join(dst, srcentry.Name())
if srcentry.IsDir() {
if err := mirrorDir(srcPath, dstPath); err != nil {
return err
}
} else {
si, err := os.Stat(srcPath)
if err != nil {
return err
}
if di, err := os.Stat(dstPath); os.IsNotExist(err) {
log.Printf("Copying %s -> %s\n", srcPath, dstPath)
if err := copyFile(srcPath, dstPath); err != nil {
// check if srcPath is whether in-use or not
if strings.Contains(err.Error(), "The process cannot access the file because it is being used by another process.") {
log.Printf("Ignore %s because it's being used by another process.\n", srcPath)
continue
}
return err
}
} else if isModified(si, di) {
if err := os.RemoveAll(dstPath); err != nil {
return nil
}
log.Printf("Updating %s -> %s\n", srcPath, dstPath)
if err := copyFile(srcPath, dstPath); err != nil {
return err
}
}
}
}
return nil
}
func isModified(src, dst os.FileInfo) bool {
return src.Size() != dst.Size() || !src.ModTime().Equal(dst.ModTime())
}
func main() {
flag.Parse()
var (
from = flag.Arg(0)
to = flag.Arg(1)
)
if from == "" || to == "" {
fmt.Println("gomirror path/to/source path/to/destination")
return
}
if err := mirrorDir(from, to); err != nil {
log.Fatal(err)
}
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package placement
import (
"errors"
"fmt"
"reflect"
"testing"
"github.com/stretchr/testify/require"
)
func TestClone(t *testing.T) {
rule := &Rule{ID: "434"}
newRule := rule.Clone()
newRule.ID = "121"
require.Equal(t, &Rule{ID: "434"}, rule)
require.Equal(t, &Rule{ID: "121"}, newRule)
}
func matchRules(t1, t2 []*Rule, prefix string, t *testing.T) {
require.Equal(t, len(t2), len(t1), prefix)
for i := range t1 {
found := false
for j := range t2 {
ok := reflect.DeepEqual(t2[j], t1[i])
if ok {
found = true
break
}
}
require.True(t, found, "%s\n\ncan not found %d rule\n%+v\n%+v", prefix, i, t1[i], t2)
}
}
func TestNewRuleAndNewRules(t *testing.T) {
type TestCase struct {
name string
input string
replicas uint64
output []*Rule
err error
}
var tests []TestCase
tests = append(tests, TestCase{
name: "empty constraints",
input: "",
replicas: 3,
output: []*Rule{
NewRule(Voter, 3, NewConstraintsDirect()),
},
})
tests = append(tests, TestCase{
name: "zero replicas",
input: "",
replicas: 0,
output: []*Rule{
NewRule(Voter, 0, NewConstraintsDirect()),
},
})
tests = append(tests, TestCase{
name: "normal list constraints",
input: `["+zone=sh", "+region=sh"]`,
replicas: 3,
output: []*Rule{
NewRule(Voter, 3, NewConstraintsDirect(
NewConstraintDirect("zone", In, "sh"),
NewConstraintDirect("region", In, "sh"),
)),
},
})
tests = append(tests, TestCase{
name: "normal dict constraints",
input: `{"+zone=sh,-zone=bj":2, "+zone=sh": 1}`,
output: []*Rule{
NewRule(Voter, 2, NewConstraintsDirect(
NewConstraintDirect("zone", In, "sh"),
NewConstraintDirect("zone", NotIn, "bj"),
)),
NewRule(Voter, 1, NewConstraintsDirect(
NewConstraintDirect("zone", In, "sh"),
)),
},
})
tests = append(tests, TestCase{
name: "normal dict constraints, with count",
input: "{'+zone=sh,-zone=bj':2, '+zone=sh': 1}",
replicas: 4,
err: ErrInvalidConstraintsRelicas,
})
tests = append(tests, TestCase{
name: "zero count in dict constraints",
input: `{"+zone=sh,-zone=bj":0, "+zone=sh": 1}`,
err: ErrInvalidConstraintsMapcnt,
})
tests = append(tests, TestCase{
name: "invalid list constraints",
input: `["ne=sh", "+zone=sh"]`,
replicas: 3,
err: ErrInvalidConstraintsFormat,
})
tests = append(tests, TestCase{
name: "invalid dict constraints",
input: `{+ne=sh,-zone=bj:1, "+zone=sh": 4`,
err: ErrInvalidConstraintsFormat,
})
tests = append(tests, TestCase{
name: "invalid dict constraints",
input: `{"nesh,-zone=bj":1, "+zone=sh": 4}`,
err: ErrInvalidConstraintFormat,
})
tests = append(tests, TestCase{
name: "invalid dict separator",
input: `{+region=us-east-2:2}`,
err: ErrInvalidConstraintsMappingWrongSeparator,
})
for _, tt := range tests {
comment := fmt.Sprintf("[%s]", tt.name)
output, err := NewRules(Voter, tt.replicas, tt.input)
if tt.err == nil {
require.NoError(t, err, comment)
matchRules(tt.output, output, comment, t)
} else {
require.True(t, errors.Is(err, tt.err), "[%s]\n%s\n%s\n", tt.name, err, tt.err)
}
}
}
|
package main
import (
"fmt"
//"ms/sun_old/base"
"ms/sun/shared/x"
"ms/sun/shared/base"
)
func main() {
//x.LogTableSqlReq
n:=0
next := func() int {
n++
return n
}
i := 0
work := func() {
for ; i < 1000000; i++ {
m := next()
p := x.HomeFanout{
OrderId: m,
ForUserId: m,
PostId: m,
PostUserId: m,
}
p.Save(base.DB)
if m%1000 == 0 {
fmt.Println("i: ", m)
}
}
}
for i := 0; i < 9; i++ {
go work()
}
work()
}
|
/*
* @lc app=leetcode.cn id=74 lang=golang
*
* [74] 搜索二维矩阵
*/
package main
import "fmt"
// @lc code=start
func searchMatrix(matrix [][]int, target int) bool {
rows, cols := len(matrix), len(matrix[0])
for i := 0; i < rows; i++ {
for j := 0; j < cols; j++ {
low, high := 0, cols-1
if matrix[i][high] < target {
break
}
for low <= high {
mid := low + (high-low)>>1
if matrix[i][mid] == target {
return true
} else if matrix[i][mid] < target {
low = mid + 1
} else {
high = mid - 1
}
}
}
}
return false
}
// @lc code=end
func main() {
fmt.Println(searchMatrix([][]int{
{1, 3, 5, 7},
{10, 11, 16, 20},
{23, 30, 34, 60},
}, 23))
fmt.Println(searchMatrix([][]int{{1, 1}}, 0))
}
|
/*
- implementation of POP3 server according to rfc1939, rfc2449 in progress
*/
package popgun
import (
"bufio"
"fmt"
"io"
"log"
"net"
"strings"
"time"
)
const (
STATE_AUTHORIZATION = iota + 1
STATE_TRANSACTION
STATE_UPDATE
)
type Config struct {
ListenInterface string `json:"listen_interface"`
}
type Authorizator interface {
Authorize(user, pass string) bool
}
type Backend interface {
Stat(user string) (messages, octets int, err error)
List(user string) (octets []int, err error)
ListMessage(user string, msgId int) (exists bool, octets int, err error)
Retr(user string, msgId int) (message string, err error)
Dele(user string, msgId int) error
Rset(user string) error
Uidl(user string) (uids []string, err error)
UidlMessage(user string, msgId int) (exists bool, uid string, err error)
Update(user string) error
Lock(user string) error
Unlock(user string) error
}
var (
ErrInvalidState = fmt.Errorf("Invalid state")
)
//---------------CLIENT
type Client struct {
commands map[string]Executable
printer *Printer
isAlive bool
currentState int
authorizator Authorizator
backend Backend
user string
pass string
lastCommand string
}
func newClient(authorizator Authorizator, backend Backend) *Client {
commands := make(map[string]Executable)
commands["QUIT"] = QuitCommand{}
commands["USER"] = UserCommand{}
commands["PASS"] = PassCommand{}
commands["STAT"] = StatCommand{}
commands["LIST"] = ListCommand{}
commands["RETR"] = RetrCommand{}
commands["DELE"] = DeleCommand{}
commands["NOOP"] = NoopCommand{}
commands["RSET"] = RsetCommand{}
commands["UIDL"] = UidlCommand{}
commands["CAPA"] = CapaCommand{}
return &Client{
commands: commands,
currentState: STATE_AUTHORIZATION,
authorizator: authorizator,
backend: backend,
}
}
func (c Client) handle(conn net.Conn) {
defer conn.Close()
conn.SetReadDeadline(time.Now().Add(1 * time.Minute))
c.printer = NewPrinter(conn)
c.isAlive = true
reader := bufio.NewReader(conn)
c.printer.Welcome()
for c.isAlive {
// according to RFC commands are terminated by CRLF, but we are removing \r in parseInput
input, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
log.Print("Connection closed by client")
} else {
log.Print("Error reading input: ", err)
}
if len(c.user) > 0 {
log.Printf("Unlocking user %s due to connection error ", c.user)
c.backend.Unlock(c.user)
}
break
}
cmd, args := c.parseInput(input)
exec, ok := c.commands[cmd]
if !ok {
c.printer.Err("Invalid command %s", cmd)
log.Printf("Invalid command: %s", cmd)
continue
}
state, err := exec.Run(&c, args)
if err != nil {
c.printer.Err("Error executing command %s", cmd)
log.Print("Error executing command: ", err)
continue
}
c.lastCommand = cmd
c.currentState = state
}
}
func (c Client) parseInput(input string) (string, []string) {
input = strings.Trim(input, "\r \n")
cmd := strings.Split(input, " ")
return strings.ToUpper(cmd[0]), cmd[1:]
}
//---------------SERVER
type Server struct {
listener net.Listener
config Config
auth Authorizator
backend Backend
}
func NewServer(cfg Config, auth Authorizator, backend Backend) *Server {
return &Server{
config: cfg,
auth: auth,
backend: backend,
}
}
func (s Server) Start() error {
var err error
s.listener, err = net.Listen("tcp", s.config.ListenInterface)
if err != nil {
log.Printf("Error: could not listen on %s", s.config.ListenInterface)
return err
}
go func() {
log.Printf("Server listening on: %s\n", s.config.ListenInterface)
for {
conn, err := s.listener.Accept()
if err != nil {
log.Println("Error: could not accept connection: ", err)
continue
}
c := newClient(s.auth, s.backend)
go c.handle(conn)
}
}()
return nil
}
//---------------PRINTER
type Printer struct {
conn net.Conn
}
func NewPrinter(conn net.Conn) *Printer {
return &Printer{conn}
}
func (p Printer) Welcome() {
fmt.Fprintf(p.conn, "+OK POPgun POP3 server ready\r\n")
}
func (p Printer) Ok(msg string, a ...interface{}) {
fmt.Fprintf(p.conn, "+OK %s\r\n", fmt.Sprintf(msg, a...))
}
func (p Printer) Err(msg string, a ...interface{}) {
fmt.Fprintf(p.conn, "-ERR %s\r\n", fmt.Sprintf(msg, a...))
}
func (p Printer) MultiLine(msgs []string) {
for _, line := range msgs {
line := strings.Trim(line, "\r")
if strings.HasPrefix(line, ".") {
fmt.Fprintf(p.conn, ".%s\r\n", line)
} else {
fmt.Fprintf(p.conn, "%s\r\n", line)
}
}
fmt.Fprint(p.conn, ".\r\n")
}
|
package msg_queue
import (
"testing"
"github.com/nsqio/go-nsq"
"log"
log2 "git.zhuzi.me/zzjz/zhuzi-bootstrap/lib/log"
)
type Handler struct {
}
func (p *Handler) HandleMessage(message *nsq.Message) error {
log.Print(string(message.Body))
return nil
}
func TestPublish(t *testing.T) {
bs := []byte("bs")
err := Publish("test", bs)
if err != nil {
t.Fatal(err)
}
select {}
}
func TestListen(t *testing.T) {
err := Listen("test", "chan-1", &Handler{})
if err != nil {
t.Fatal(err)
}
}
func init() {
log2.SetDebug(true)
Init("127.0.0.1:4150", "127.0.0.1:4161", true)
}
|
package chapter9
import (
"fmt"
"testing"
)
func inOrderTraversal(root *TreeNode) {
if root == nil {
return
} else {
inOrderTraversal(root.Left)
fmt.Println(root.Value)
inOrderTraversal(root.Right)
}
}
func TestReconstructBinaryTreePreInOrders(t *testing.T) {
in := []string{"F", "B", "A", "E", "H", "C", "D", "I", "G"}
pre := []string{"H", "B", "F", "E", "A", "C", "D", "G", "I"}
root := ReconstructBinaryTreePreInOrders(pre, in)
inOrderTraversal(root)
fmt.Println()
}
|
package router
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/julienschmidt/httprouter"
"github.com/yosssi/orgs.io/app/models"
)
func TestNew(t *testing.T) {
config := &models.Config{
App: models.AppConfig{
Env: models.EnvDevelopment,
},
Server: models.ServerConfig{},
}
if rtr := New(config); rtr == nil {
t.Error("rtr should not be nil")
}
}
func Test_newAceProxy(t *testing.T) {
config := &models.Config{
App: models.AppConfig{
Env: models.EnvDevelopment,
},
Server: models.ServerConfig{},
}
aceProxy := newAceProxy(config)
if aceProxy == nil {
t.Error("aceProxy should not be nil")
}
if aceProxy.Opts.DynamicReload != true {
t.Error("aceProxy.Opts.DynamicReload should not be true")
}
}
func Test_aceProxy_funcMap_config(t *testing.T) {
config := &models.Config{
App: models.AppConfig{
Env: models.EnvDevelopment,
},
Server: models.ServerConfig{},
}
aceProxy := newAceProxy(config)
if aceProxy == nil {
t.Error("aceProxy should not be nil")
}
if c := aceProxy.Opts.FuncMap["config"].(func() *models.Config)(); c != config {
t.Errorf("c should be %+v [actual: %+v]", config, c)
}
}
func Test_serveAssets_Err(t *testing.T) {
r, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Errorf("error occurred [error: %q]", err.Error())
}
w := httptest.NewRecorder()
serveAssets(w, r, []httprouter.Param{{Key: "filepath", Value: "/stylesheets/application.css"}})
if w.Code != http.StatusInternalServerError {
t.Errorf("w.Code should be %d [actual: %d]", http.StatusInternalServerError, w.Code)
}
}
func Test_serveAssets(t *testing.T) {
r, err := http.NewRequest("GET", "", nil)
if err != nil {
t.Errorf("error occurred [error: %q]", err.Error())
}
w := httptest.NewRecorder()
serveAssets(w, r, []httprouter.Param{{Key: "filepath", Value: "/not/exist/file"}})
if w.Code != http.StatusNotFound {
t.Errorf("w.Code should be %d [actual: %d]", http.StatusNotFound, w.Code)
}
}
|
package fshelper
import (
"crypto/rand"
"fmt"
"os"
"path"
"strconv"
"testing"
"github.com/mitro42/coback/catalog"
th "github.com/mitro42/testhelper"
"github.com/spf13/afero"
)
func TestNextUnusedFolder(t *testing.T) {
fs := afero.NewMemMapFs()
th.Equals(t, "1", NextUnusedFolder(fs))
th.Equals(t, "1", NextUnusedFolder(fs))
fs.Mkdir("1", 0755)
th.Equals(t, "2", NextUnusedFolder(fs))
fs.MkdirAll("3/some/other", 0755)
th.Equals(t, "2", NextUnusedFolder(fs))
fs.MkdirAll("2/subdir", 0755)
th.Equals(t, "4", NextUnusedFolder(fs))
f, err := fs.Create("4")
th.Ok(t, err)
f.Close()
th.Equals(t, "5", NextUnusedFolder(fs))
fs.MkdirAll("5_", 0755)
f, err = fs.Create("6_file")
th.Ok(t, err)
f.Close()
th.Equals(t, "7", NextUnusedFolder(fs))
fs.MkdirAll("7", 0755)
fs.MkdirAll("2_dir", 0755)
fs.MkdirAll("8_dir", 0755)
fs.MkdirAll("9_other", 0755)
fs.MkdirAll("10-", 0755)
th.Equals(t, "10", NextUnusedFolder(fs))
fs.MkdirAll("10_", 0755)
for i := 11; i <= 102; i++ {
fs.MkdirAll(strconv.Itoa(i), 0755)
th.Equals(t, strconv.Itoa(i+1), NextUnusedFolder(fs))
}
}
func TestEnsureDirectoryExist(t *testing.T) {
fs := afero.NewMemMapFs()
_, err := fs.Stat("test1")
th.Equals(t, true, os.IsNotExist(err))
// directory is created if doesn't exist
err = EnsureDirectoryExist(fs, "test1")
th.Ok(t, err)
fi, err := fs.Stat("test1")
th.Ok(t, err)
th.Equals(t, true, fi.IsDir())
// no error if directory already exists
err = EnsureDirectoryExist(fs, "test1")
th.Ok(t, err)
// fail if path is a file
f, err := fs.Create("test2")
th.Ok(t, err)
f.Close()
err = EnsureDirectoryExist(fs, "test2")
th.NokPrefix(t, err, "Path is a file")
fs = afero.NewReadOnlyFs(fs)
// fail cannot create directory
err = EnsureDirectoryExist(fs, "test3")
th.NokPrefix(t, err, "Cannot create directory 'test3")
}
func TestCopyFile(t *testing.T) {
sourceFs := afero.NewMemMapFs()
destinationFs := afero.NewMemMapFs()
testFile := func(name string, content []byte) {
f, err := sourceFs.Create(name)
th.Ok(t, err)
f.Write(content)
f.Close()
sourceItem, err := catalog.NewItem(sourceFs, name)
th.Ok(t, err)
err = CopyFile(sourceFs, sourceItem.Path, sourceItem.ModificationTime, destinationFs)
th.Ok(t, err)
destinationItem, err := catalog.NewItem(destinationFs, name)
th.Ok(t, err)
th.Equals(t, sourceItem, destinationItem)
}
testFile("test1", []byte("some content"))
testFile("test2", []byte("some more content"))
th.Ok(t, sourceFs.MkdirAll("folder/structure/test", 0755))
buf := make([]byte, 1024*1024)
rand.Read(buf)
testFile("folder/structure/test/big_file", buf)
}
func TestCopyFileErrors(t *testing.T) {
sourceFs := afero.NewMemMapFs()
destinationFs := afero.NewMemMapFs()
// Source file doesn't exist
f, err := sourceFs.Create("test")
th.Ok(t, err)
f.Close()
sourceItem := catalog.Item{Path: "test/file"}
th.Ok(t, err)
err = CopyFile(sourceFs, sourceItem.Path, sourceItem.ModificationTime, destinationFs)
th.NokPrefix(t, err, "Failed to copy file 'test/file'")
// Destination folder cannot be created
sourceFs = afero.NewMemMapFs()
th.Ok(t, sourceFs.Mkdir("test", 0755))
f, err = sourceFs.Create("test/file")
th.Ok(t, err)
f.Write([]byte("some stuff"))
f.Close()
f, err = destinationFs.Create("test")
th.Ok(t, err)
f.Close()
sourceItem2, err := catalog.NewItem(sourceFs, "test/file")
th.Ok(t, err)
err = CopyFile(sourceFs, sourceItem2.Path, sourceItem2.ModificationTime, destinationFs)
th.NokPrefix(t, err, "Failed to copy file 'test/file': Path is a file")
// Destination fs is read only
err = CopyFile(sourceFs, sourceItem2.Path, sourceItem2.ModificationTime, afero.NewReadOnlyFs(afero.NewMemMapFs()))
fmt.Println(err)
th.NokPrefix(t, err, "Failed to copy file 'test/file': Cannot create directory")
// Destination folder is read only
destinationFs = afero.NewMemMapFs()
th.Ok(t, destinationFs.Mkdir("test", 0755))
err = CopyFile(sourceFs, sourceItem2.Path, sourceItem2.ModificationTime, afero.NewReadOnlyFs(destinationFs))
th.NokPrefix(t, err, "Failed to copy file 'test/file': Cannot create destination file")
destinationFs = afero.NewMemMapFs()
sourceItem2.ModificationTime = "Not a valid timestamp"
err = CopyFile(sourceFs, sourceItem2.Path, sourceItem2.ModificationTime, destinationFs)
th.NokPrefix(t, err, "Failed to set file attributes 'test/file': Cannot parse modification time of file 'test/file")
}
func TestCopyFileToFolder(t *testing.T) {
sourceFs := afero.NewMemMapFs()
destinationFs := afero.NewMemMapFs()
testFile := func(filePath string, content []byte, destinationFolderPath string) {
f, err := sourceFs.Create(filePath)
th.Ok(t, err)
f.Write(content)
f.Close()
sourceItem, err := catalog.NewItem(sourceFs, filePath)
th.Ok(t, err)
err = CopyFileToFolder(sourceFs, sourceItem.Path, sourceItem.ModificationTime, destinationFs, destinationFolderPath)
th.Ok(t, err)
_, fileName := path.Split(filePath)
destinationItem, err := catalog.NewItem(destinationFs, path.Join(destinationFolderPath, fileName))
th.Ok(t, err)
th.Equals(t, sourceItem.Size, destinationItem.Size)
th.Equals(t, sourceItem.Md5Sum, destinationItem.Md5Sum)
th.Equals(t, sourceItem.ModificationTime, destinationItem.ModificationTime)
}
testFile("test1", []byte("some content"), "")
testFile("test2", []byte("some more content"), ".")
testFile("test3", []byte("some more content..."), "some_other_folder")
th.Ok(t, sourceFs.MkdirAll("folder/structure/test", 0755))
buf := make([]byte, 1024*1024)
rand.Read(buf)
testFile("folder/structure/test/big_file", buf, "nested/other/folder")
}
func TestCopyFileTo(t *testing.T) {
sourceFs := afero.NewMemMapFs()
destinationFs := afero.NewMemMapFs()
testFile := func(filePath string, content []byte, destinationPath string) {
f, err := sourceFs.Create(filePath)
th.Ok(t, err)
f.Write(content)
f.Close()
sourceItem, err := catalog.NewItem(sourceFs, filePath)
th.Ok(t, err)
err = CopyFileTo(sourceFs, sourceItem.Path, sourceItem.ModificationTime, destinationFs, destinationPath)
th.Ok(t, err)
destinationItem, err := catalog.NewItem(destinationFs, destinationPath)
th.Ok(t, err)
th.Equals(t, sourceItem.Size, destinationItem.Size)
th.Equals(t, sourceItem.Md5Sum, destinationItem.Md5Sum)
th.Equals(t, sourceItem.ModificationTime, destinationItem.ModificationTime)
}
testFile("test1", []byte("some content"), "newTest1")
testFile("test2", []byte("some more content"), "./newTest2")
testFile("test3", []byte("some more content..."), "some_other_folder/test3")
th.Ok(t, sourceFs.MkdirAll("folder/structure/test", 0755))
buf := make([]byte, 1024*1024)
rand.Read(buf)
testFile("folder/structure/test/big_file", buf, "nested/other/folder/bigFile")
}
|
// Copyright 2013 http://gumuz.nl/. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
// "fmt"
// "sync"
// "io"
// "net/http"
// "lixae/settings"
"lixae/treap"
)
type SyncWriteOperation struct {
operation string
args []uint64
result chan *SyncWriteResult
}
type SyncWriteResult struct {
}
type Core struct {
data map[uint64]*Source
syncWriteOperations chan *SyncWriteOperation
}
func New() *Core {
core := &Core{
data: make(map[uint64]*Source),
syncWriteOperations: make(chan *SyncWriteOperation),
}
go core.handleSyncWriteOperations()
return core
}
func (c *Core) init(source uint64) *Source {
_, ok := c.data[source]
if !ok {
c.data[source] = &Source{}
}
return c.data[source]
}
func (c *Core) handleSyncWriteOperations() {
for {
operation := <-c.syncWriteOperations
switch operation.operation {
case "follow":
source := operation.args[0]
targets := operation.args[1:]
s := c.init(source)
s.following = treap.Set(s.following, targets...)
for _, target := range targets {
t := c.init(target)
t.followers = treap.Set(t.followers, source)
}
operation.result <- &SyncWriteResult{}
case "unfollow":
source := operation.args[0]
targets := operation.args[1:]
s := c.init(source)
s.following = treap.Del(s.following, targets...)
for _, target := range targets {
t := c.init(target)
t.followers = treap.Del(t.followers, source)
}
operation.result <- &SyncWriteResult{}
}
}
}
func (c *Core) Follow(source uint64, targets ...uint64) {
followOperation := &SyncWriteOperation{
operation: "follow",
args: append([]uint64{source}, targets...),
result: make(chan *SyncWriteResult, 1),
}
c.syncWriteOperations <- followOperation
<-followOperation.result
}
func (c *Core) Unfollow(source uint64, targets ...uint64) {
unfollowOperation := &SyncWriteOperation{
operation: "unfollow",
args: append([]uint64{source}, targets...),
result: make(chan *SyncWriteResult, 1),
}
c.syncWriteOperations <- unfollowOperation
<-unfollowOperation.result
}
func (c *Core) Followers(source uint64) []uint64 {
result := make([]uint64, 0)
if source, ok := c.data[source]; ok == true {
iterator := treap.NewIterator(source.followers)
for {
target, ok := iterator.Next()
if !ok {
break
}
result = append(result, target)
}
}
return result
}
func (c *Core) Following(source uint64) []uint64 {
result := make([]uint64, 0)
if source, ok := c.data[source]; ok == true {
iterator := treap.NewIterator(source.following)
for {
target, ok := iterator.Next()
if !ok {
break
}
result = append(result, target)
}
}
return result
}
type Source struct {
following *treap.Treap
followers *treap.Treap
}
// func (c *Core) Dispatch() {
// io.WriteString(w, fmt.Sprint("haha"))
// }
|
package fstestutil // import "github.com/chubaoio/cbfs/fuse/fs/fstestutil"
|
package main
import "room"
var Env string = "development"
func main() {
chatServerAddress := room.InitConfig(Env).GetDialAddress("chat")
room.StartApiServer(chatServerAddress)
}
|
// Package json implements a JSON handler.
package json
import (
stdjson "encoding/json"
"io"
"sync"
log "github.com/go-playground/log/v8"
)
// Handler implementation.
type Handler struct {
m sync.Mutex
*stdjson.Encoder
}
// New handler.
func New(w io.Writer) *Handler {
return &Handler{
Encoder: stdjson.NewEncoder(w),
}
}
// Log handles the log entry
func (h *Handler) Log(e log.Entry) {
h.m.Lock()
_ = h.Encoder.Encode(e)
h.m.Unlock()
}
|
package actions
import (
"bytes"
"fmt"
"github.com/deis/helm/log"
"github.com/gobuffalo/buffalo"
"github.com/gobuffalo/pop"
"github.com/kulado/wealthmind/kuladoapi/models"
)
// UploadHandler accepts a file upload
func UploadHandler(c buffalo.Context) error {
tx := c.Value("tx").(*pop.Connection)
request := c.Request()
err := request.ParseMultipartForm(1024 * 1024 * 10) // 10MB
if err != nil {
return c.Error(500, fmt.Errorf("Trouble parsing that form: %s", err.Error()))
}
file, header, err := request.FormFile("file")
if err != nil {
return c.Error(500, fmt.Errorf("Trouble extracting the file: %s", err.Error()))
}
defer file.Close()
log.Info("the file we got is named %s and is %d bytes long", header.Filename, header.Size)
buf := new(bytes.Buffer)
buf.ReadFrom(file)
err = tx.RawQuery(models.Q["insertfile"], header.Filename, buf.Bytes()).Exec()
if err != nil {
return c.Error(500, fmt.Errorf("error inserting file to database: %s", err.Error()))
}
log.Info("processed a file")
message := "success"
return c.Render(200, r.JSON(map[string]string{"message": message}))
}
|
package main
import (
"fmt"
"math"
)
// error is built in type in GO
func sqrt(num float64) (float64, error) {
if(num < 0) {
return 0.0, fmt.Errorf("sqrt of megative value (%f)", num)
}
return math.Sqrt(num), nil // nil is nothing, NULL or None
}
func main() {
s1, err := sqrt(2.0)
if err != nil {
fmt.Printf("Error : %s\n", err)
} else {
fmt.Println(s1)
}
s2, err := sqrt(-2.0)
if err != nil {
fmt.Printf("Error : %s\n", err)
} else {
fmt.Println(s2)
}
}
|
package nectar
import (
"bytes"
"encoding/csv"
"flag"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gholt/brimtext"
)
type CLIInstance struct {
Arg0 string
fatal func(cli *CLIInstance, err error)
fatalf func(cli *CLIInstance, frmt string, args ...interface{})
verbosef func(cli *CLIInstance, frmt string, args ...interface{})
GlobalFlags *flag.FlagSet
globalFlagAuthURL *string
globalFlagAuthTenant *string
globalFlagAuthUser *string
globalFlagAuthKey *string
globalFlagAuthPassword *string
globalFlagOverrideURLs *string
globalFlagStorageRegion *string
GlobalFlagVerbose *bool
globalFlagContinueOnError *bool
globalFlagConcurrency *int
globalFlagInternalStorage *bool
globalFlagHeaders stringListFlag
BenchDeleteFlags *flag.FlagSet
benchDeleteFlagContainers *int
benchDeleteFlagCount *int
benchDeleteFlagCSV *string
benchDeleteFlagCSVOT *string
BenchGetFlags *flag.FlagSet
benchGetFlagContainers *int
benchGetFlagCount *int
benchGetFlagCSV *string
benchGetFlagCSVOT *string
benchGetFlagIterations *int
BenchHeadFlags *flag.FlagSet
benchHeadFlagContainers *int
benchHeadFlagCount *int
benchHeadFlagCSV *string
benchHeadFlagCSVOT *string
benchHeadFlagIterations *int
BenchMixedFlags *flag.FlagSet
benchMixedFlagContainers *int
benchMixedFlagCSV *string
benchMixedFlagCSVOT *string
benchMixedFlagSize *int
benchMixedFlagTime *string
BenchPostFlags *flag.FlagSet
benchPostFlagContainers *int
benchPostFlagCount *int
benchPostFlagCSV *string
benchPostFlagCSVOT *string
BenchPutFlags *flag.FlagSet
benchPutFlagContainers *int
benchPutFlagCount *int
benchPutFlagCSV *string
benchPutFlagCSVOT *string
benchPutFlagSize *int
benchPutFlagMaxSize *int
DownloadFlags *flag.FlagSet
downloadFlagAccount *bool
GetFlags *flag.FlagSet
getFlagRaw *bool
getFlagNameOnly *bool
getFlagMarker *string
getFlagEndMarker *string
getFlagReverse *bool
getFlagLimit *int
getFlagPrefix *string
getFlagDelimiter *string
}
// CLI runs a nectar command-line-interface with the given args (args[0] should
// have the name of the executable). The fatal, fatalf, and verbosef parameters
// may be nil for the defaults. The default fatal and fatalf functions will
// call os.Exit(1) after emitting error (or help) text.
func CLI(args []string, fatal func(cli *CLIInstance, err error), fatalf func(cli *CLIInstance, frmt string, args ...interface{}), verbosef func(cli *CLIInstance, frmt string, args ...interface{})) {
if fatal == nil {
fatal = cliFatal
}
if fatalf == nil {
fatalf = cliFatalf
}
if verbosef == nil {
verbosef = cliVerbosef
}
cli := &CLIInstance{Arg0: args[0], fatal: fatal, fatalf: fatalf, verbosef: verbosef}
var flagbuf bytes.Buffer
cli.GlobalFlags = flag.NewFlagSet(cli.Arg0, flag.ContinueOnError)
cli.GlobalFlags.SetOutput(&flagbuf)
cli.globalFlagAuthURL = cli.GlobalFlags.String("A", os.Getenv("AUTH_URL"), "|<url>| URL to auth system, example: http://127.0.0.1:8080/auth/v1.0 - Env: AUTH_URL")
cli.globalFlagAuthTenant = cli.GlobalFlags.String("T", os.Getenv("AUTH_TENANT"), "|<tenant>| Tenant name for auth system, example: test - Not all auth systems need this. Env: AUTH_TENANT")
cli.globalFlagAuthUser = cli.GlobalFlags.String("U", os.Getenv("AUTH_USER"), "|<user>| User name for auth system, example: tester - Some auth systems allow tenant:user format here, example: test:tester - Env: AUTH_USER")
cli.globalFlagAuthKey = cli.GlobalFlags.String("K", os.Getenv("AUTH_KEY"), "|<key>| Key for auth system, example: testing - Some auth systems use passwords instead, see -P - Env: AUTH_KEY")
cli.globalFlagAuthPassword = cli.GlobalFlags.String("P", os.Getenv("AUTH_PASSWORD"), "|<password>| Password for auth system, example: testing - Some auth system use keys instead, see -K - Env: AUTH_PASSWORD")
cli.globalFlagOverrideURLs = cli.GlobalFlags.String("O", os.Getenv("OVERRIDE_URLS"), "|<url> [url] ...| Override URLs for service endpoint(s); the service endpoint given by auth will be ignored - Env: OVERRIDE_URLS")
cli.globalFlagStorageRegion = cli.GlobalFlags.String("R", os.Getenv("STORAGE_REGION"), "|<region>| Storage region to use if set, otherwise uses the default. Env: STORAGE_REGION")
cli.GlobalFlagVerbose = cli.GlobalFlags.Bool("v", false, "Will activate verbose output.")
cli.globalFlagContinueOnError = cli.GlobalFlags.Bool("continue-on-error", false, "When possible, continue with additional operations even if one or more fail.")
i32, _ := strconv.ParseInt(os.Getenv("CONCURRENCY"), 10, 32)
cli.globalFlagConcurrency = cli.GlobalFlags.Int("C", int(i32), "|<number>| The maximum number of concurrent operations to perform; default is 1. Env: CONCURRENCY")
b, _ := strconv.ParseBool(os.Getenv("STORAGE_INTERNAL"))
cli.globalFlagInternalStorage = cli.GlobalFlags.Bool("I", b, "Internal storage URL resolution, such as Rackspace ServiceNet. Env: STORAGE_INTERNAL")
cli.GlobalFlags.Var(&cli.globalFlagHeaders, "H", "|<name>:[value]| Sets a header to be sent with the request. Useful mostly for PUTs and POSTs, allowing you to set metadata. This option can be specified multiple times for additional headers.")
cli.BenchDeleteFlags = flag.NewFlagSet("bench-delete", flag.ContinueOnError)
cli.BenchDeleteFlags.SetOutput(&flagbuf)
cli.benchDeleteFlagContainers = cli.BenchDeleteFlags.Int("containers", 1, "|<number>| Number of containers in use.")
cli.benchDeleteFlagCount = cli.BenchDeleteFlags.Int("count", 1000, "|<number>| Number of objects to delete, distributed across containers.")
cli.benchDeleteFlagCSV = cli.BenchDeleteFlags.String("csv", "", "|<filename>| Store the timing of each delete into a CSV file.")
cli.benchDeleteFlagCSVOT = cli.BenchDeleteFlags.String("csvot", "", "|<filename>| Store the number of deletes performed over time into a CSV file.")
cli.BenchGetFlags = flag.NewFlagSet("bench-get", flag.ContinueOnError)
cli.BenchGetFlags.SetOutput(&flagbuf)
cli.benchGetFlagContainers = cli.BenchGetFlags.Int("containers", 1, "|<number>| Number of containers to use.")
cli.benchGetFlagCount = cli.BenchGetFlags.Int("count", 1000, "|<number>| Number of objects to get, distributed across containers.")
cli.benchGetFlagCSV = cli.BenchGetFlags.String("csv", "", "|<filename>| Store the timing of each get into a CSV file.")
cli.benchGetFlagCSVOT = cli.BenchGetFlags.String("csvot", "", "|<filename>| Store the number of gets performed over time into a CSV file.")
cli.benchGetFlagIterations = cli.BenchGetFlags.Int("iterations", 1, "|<number>| Number of iterations to perform.")
cli.BenchHeadFlags = flag.NewFlagSet("bench-head", flag.ContinueOnError)
cli.BenchHeadFlags.SetOutput(&flagbuf)
cli.benchHeadFlagContainers = cli.BenchHeadFlags.Int("containers", 1, "|<number>| Number of containers to use.")
cli.benchHeadFlagCount = cli.BenchHeadFlags.Int("count", 1000, "|<number>| Number of objects to head, distributed across containers.")
cli.benchHeadFlagCSV = cli.BenchHeadFlags.String("csv", "", "|<filename>| Store the timing of each head into a CSV file.")
cli.benchHeadFlagCSVOT = cli.BenchHeadFlags.String("csvot", "", "|<filename>| Store the number of heads performed over time into a CSV file.")
cli.benchHeadFlagIterations = cli.BenchHeadFlags.Int("iterations", 1, "|<number>| Number of iterations to perform.")
cli.BenchMixedFlags = flag.NewFlagSet("bench-mixed", flag.ContinueOnError)
cli.BenchMixedFlags.SetOutput(&flagbuf)
cli.benchMixedFlagContainers = cli.BenchMixedFlags.Int("containers", 1, "|<number>| Number of containers to use.")
cli.benchMixedFlagCSV = cli.BenchMixedFlags.String("csv", "", "|<filename>| Store the timing of each request into a CSV file.")
cli.benchMixedFlagCSVOT = cli.BenchMixedFlags.String("csvot", "", "|<filename>| Store the number of requests performed over time into a CSV file.")
cli.benchMixedFlagSize = cli.BenchMixedFlags.Int("size", 4096, "|<bytes>| Number of bytes for each object.")
cli.benchMixedFlagTime = cli.BenchMixedFlags.String("time", "10m", "|<timespan>| Amount of time to run the test, such as 10m or 1h.")
cli.BenchPostFlags = flag.NewFlagSet("bench-post", flag.ContinueOnError)
cli.BenchPostFlags.SetOutput(&flagbuf)
cli.benchPostFlagContainers = cli.BenchPostFlags.Int("containers", 1, "|<number>| Number of containers in use.")
cli.benchPostFlagCount = cli.BenchPostFlags.Int("count", 1000, "|<number>| Number of objects to post, distributed across containers.")
cli.benchPostFlagCSV = cli.BenchPostFlags.String("csv", "", "|<filename>| Store the timing of each post into a CSV file.")
cli.benchPostFlagCSVOT = cli.BenchPostFlags.String("csvot", "", "|<filename>| Store the number of posts performed over time into a CSV file.")
cli.BenchPutFlags = flag.NewFlagSet("bench-put", flag.ContinueOnError)
cli.BenchPutFlags.SetOutput(&flagbuf)
cli.benchPutFlagContainers = cli.BenchPutFlags.Int("containers", 1, "|<number>| Number of containers to use.")
cli.benchPutFlagCount = cli.BenchPutFlags.Int("count", 1000, "|<number>| Number of objects to PUT, distributed across containers.")
cli.benchPutFlagCSV = cli.BenchPutFlags.String("csv", "", "|<filename>| Store the timing of each PUT into a CSV file.")
cli.benchPutFlagCSVOT = cli.BenchPutFlags.String("csvot", "", "|<filename>| Store the number of PUTs performed over time into a CSV file.")
cli.benchPutFlagSize = cli.BenchPutFlags.Int("size", 4096, "|<bytes>| Number of bytes for each object.")
cli.benchPutFlagMaxSize = cli.BenchPutFlags.Int("maxsize", 0, "|<bytes>| This option will vary object sizes randomly between -size and -maxsize")
cli.DownloadFlags = flag.NewFlagSet("download", flag.ContinueOnError)
cli.DownloadFlags.SetOutput(&flagbuf)
cli.downloadFlagAccount = cli.DownloadFlags.Bool("a", false, "Indicates you truly wish to download the entire account; this is to prevent accidentally doing so when giving a single parameter to download.")
cli.GetFlags = flag.NewFlagSet("get", flag.ContinueOnError)
cli.GetFlags.SetOutput(&flagbuf)
cli.getFlagRaw = cli.GetFlags.Bool("r", false, "Emit raw results")
cli.getFlagNameOnly = cli.GetFlags.Bool("n", false, "In listings, emits the names only")
cli.getFlagMarker = cli.GetFlags.String("marker", "", "|<text>| In listings, sets the start marker")
cli.getFlagEndMarker = cli.GetFlags.String("endmarker", "", "|<text>| In listings, sets the stop marker")
cli.getFlagReverse = cli.GetFlags.Bool("reverse", false, "In listings, reverses the order")
cli.getFlagLimit = cli.GetFlags.Int("limit", 0, "|<number>| In listings, limits the results")
cli.getFlagPrefix = cli.GetFlags.String("prefix", "", "|<text>| In listings, returns only those matching the prefix")
cli.getFlagDelimiter = cli.GetFlags.String("delimiter", "", "|<text>| In listings, sets the delimiter and activates delimiter listings")
if err := cli.GlobalFlags.Parse(args[1:]); err != nil || len(cli.GlobalFlags.Args()) == 0 {
cli.fatal(cli, err)
}
if *cli.globalFlagAuthURL == "" {
cli.fatalf(cli, "No Auth URL set; use -A\n")
}
if *cli.globalFlagAuthUser == "" {
cli.fatalf(cli, "No Auth User set; use -U\n")
}
if *cli.globalFlagAuthKey == "" && *cli.globalFlagAuthPassword == "" {
cli.fatalf(cli, "No Auth Key or Password set; use -K or -P\n")
}
c, resp := NewClient(*cli.globalFlagAuthTenant, *cli.globalFlagAuthUser, *cli.globalFlagAuthPassword, *cli.globalFlagAuthKey, *cli.globalFlagStorageRegion, *cli.globalFlagAuthURL, *cli.globalFlagInternalStorage, strings.Split(*cli.globalFlagOverrideURLs, " "))
if resp != nil {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "Auth responded with %d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
cmd := ""
args = append([]string{}, cli.GlobalFlags.Args()...)
if len(args) > 0 {
cmd = args[0]
args = args[1:]
}
switch cmd {
case "auth":
cli.auth(c, args)
case "bench-delete":
cli.benchDelete(c, args)
case "bench-get":
cli.benchGet(c, args)
case "bench-head":
cli.benchHead(c, args)
case "bench-mixed":
cli.benchMixed(c, args)
case "bench-post":
cli.benchPost(c, args)
case "bench-put":
cli.benchPut(c, args)
case "delete":
cli.delet(c, args)
case "download":
cli.download(c, args)
case "get":
cli.get(c, args)
case "head":
cli.head(c, args)
case "post":
cli.post(c, args)
case "put":
cli.put(c, args)
case "upload":
cli.upload(c, args)
default:
cli.fatalf(cli, "Unknown command: %s\n", cmd)
}
}
func cliFatal(cli *CLIInstance, err error) {
if err == flag.ErrHelp || err == nil {
fmt.Println(cli.Arg0, `[options] <subcommand> ...`)
fmt.Println(brimtext.Wrap(`
Tool for accessing a Hummingbird/Swift cluster. Some global options can also be set via environment variables. These will be noted at the end of the description with Env: NAME. The following global options are available:
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.GlobalFlags))
fmt.Println()
fmt.Println(brimtext.Wrap(`
The following subcommands are available:`, 0, "", ""))
fmt.Println("\nauth")
fmt.Println(brimtext.Wrap(`
Displays information retrieved after authentication, such as the Account URL.
`, 0, " ", " "))
fmt.Println("\nbench-delete [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests DELETEs. By default, 1000 DELETEs are done against the named <container>. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used. Generally, you would use bench-put to populate the containers and objects, and then use bench-delete with the same options to test the deletions.
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.BenchDeleteFlags))
fmt.Println("\nbench-get [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests GETs. By default, 1000 GETs are done from the named <container>. If you specify [object] it will be used as the prefix for the object names, otherwise "bench-" will be used. Generally, you would use bench-put to populate the containers and objects, and then use bench-get with the same options with the possible addition of -iterations to lengthen the test time.
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.BenchGetFlags))
fmt.Println("\nbench-head [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests HEADs. By default, 1000 HEADs are done from the named <container>. If you specify [object] it will be used as the prefix for the object names, otherwise "bench-" will be used. Generally, you would use bench-put to populate the containers and objects, and then use bench-head with the same options with the possible addition of -iterations to lengthen the test time.
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.BenchHeadFlags))
fmt.Println("\nbench-mixed [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests mixed request workloads. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used. This test is made to be run for a specific span of time (10 minutes by default). You probably want to run with the -continue-on-error global flag; due to the eventual consistency model of Swift|Hummingbird, a few requests may 404.
Note: The concurrency setting for this test will be used for each request type separately. So, with five request types (PUT, POST, GET, HEAD, DELETE), this means five times the concurrency value specified.
`, 0, " ", " "))
fmt.Println()
fmt.Print(cli.HelpFlags(cli.BenchMixedFlags))
fmt.Println("\nbench-post [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests POSTs. By default, 1000 POSTs are done against the named <container>. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used. Generally, you would use bench-put to populate the containers and objects, and then use bench-post with the same options to test POSTing.
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.BenchPostFlags))
fmt.Println("\nbench-put [options] <container> [object]")
fmt.Println(brimtext.Wrap(`
Benchmark tests PUTs. By default, 1000 PUTs are done into the named <container>. If you specify [object] it will be used as a prefix for the object names, otherwise "bench-" will be used.
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.BenchPutFlags))
fmt.Println("\ndelete [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a DELETE request. A DELETE, as probably expected, is used to remove the target.
`, 0, " ", " "))
fmt.Println("\ndownload [options] [container] [object] <destpath>")
fmt.Println(brimtext.Wrap(`
Downloads an object or objects to a local file or files. The <destpath> indicates where you want the file or files to be created. If you don't give [container] [object] the entire account will be downloaded (requires -a for confirmation). If you just give [container] that entire container will be downloaded. Perhaps obviously, if you give [container] [object] just that object will be downloaded.
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.DownloadFlags))
fmt.Println("\nget [options] [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a GET request. A GET on an account or container will output the listing of containers or objects, respectively. A GET on an object will output the content of the object to standard output.
`, 0, " ", " "))
fmt.Print(cli.HelpFlags(cli.GetFlags))
fmt.Println("\nhead [options] [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a HEAD request, giving overall information about the account, container, or object.
`, 0, " ", " "))
fmt.Println("\npost [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a POST request. POSTs allow you to update the metadata for the target.
`, 0, " ", " "))
fmt.Println("\nput [container] [object]")
fmt.Println(brimtext.Wrap(`
Performs a PUT request. A PUT to an account or container will create them. A PUT to an object will create it using the content from standard input.
`, 0, " ", " "))
fmt.Println("\nupload [options] <sourcepath> [container] [object]")
fmt.Println(brimtext.Wrap(`
Uploads local files as objects. If you don't specify [container] the name of the current directory will be used. If you don't specify [object] the relative path name from the current directory will be used. If you do specify [object] while uploading a directory, [object] will be used as a prefix to the resulting object names. Note that when uploading a directory, only regular files will be uploaded.
`, 0, " ", " "))
fmt.Println("\n[container] [object] can also be specified as [container]/[object]")
} else {
msg := err.Error()
if strings.HasPrefix(msg, "flag provided but not defined: ") {
msg = "No such option: " + msg[len("flag provided but not defined: "):]
}
fmt.Fprintln(os.Stderr, msg)
}
os.Exit(1)
}
func cliFatalf(cli *CLIInstance, frmt string, args ...interface{}) {
fmt.Fprintf(os.Stderr, frmt, args...)
os.Exit(1)
}
func cliVerbosef(cli *CLIInstance, frmt string, args ...interface{}) {
if *cli.GlobalFlagVerbose {
fmt.Fprintf(os.Stderr, frmt, args...)
}
}
// HelpFlags returns the formatted help text for the FlagSet given.
func (cli *CLIInstance) HelpFlags(flags *flag.FlagSet) string {
var data [][]string
firstWidth := 0
flags.VisitAll(func(f *flag.Flag) {
n := " -" + f.Name
u := strings.TrimSpace(f.Usage)
if u != "" && u[0] == '|' {
s := strings.SplitN(u, "|", 3)
if len(s) == 3 {
n += " " + strings.TrimSpace(s[1])
u = strings.TrimSpace(s[2])
}
}
if len(n) > firstWidth {
firstWidth = len(n)
}
data = append(data, []string{n, u})
})
opts := brimtext.NewDefaultAlignOptions()
opts.Widths = []int{0, brimtext.GetTTYWidth() - firstWidth - 2}
return brimtext.Align(data, opts)
}
func (cli *CLIInstance) auth(c Client, args []string) {
uc, ok := c.(*userClient)
if ok {
surls := uc.GetURLs()
if len(surls) == 0 {
fmt.Println("Account URL:")
} else if len(surls) == 1 {
fmt.Println("Account URL:", surls[0])
} else {
fmt.Println("Account URLs:", strings.Join(surls, " "))
}
} else {
fmt.Println("Account URL:", c.GetURL())
}
if ct, ok := c.(ClientToken); ok {
fmt.Println("Token:", ct.GetToken())
}
}
func (cli *CLIInstance) benchDelete(c Client, args []string) {
if err := cli.BenchDeleteFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
container, object := parsePath(cli.BenchDeleteFlags.Args())
if container == "" {
cli.fatalf(cli, "bench-delete requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *cli.benchDeleteFlagContainers
if containers < 1 {
containers = 1
}
count := *cli.benchDeleteFlagCount
if count < 1 {
count = 1000
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *cli.benchDeleteFlagCSV != "" {
csvf, err := os.Create(*cli.benchDeleteFlagCSV)
if err != nil {
cli.fatal(cli, err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
csvw.Flush()
}
var csvotw *csv.Writer
if *cli.benchDeleteFlagCSVOT != "" {
csvotf, err := os.Create(*cli.benchDeleteFlagCSVOT)
if err != nil {
cli.fatal(cli, err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
csvotw.Flush()
}
concurrency := *cli.globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
i--
deleteContainer := container
if containers > 1 {
deleteContainer = fmt.Sprintf("%s%d", deleteContainer, i%containers)
}
deleteObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "DELETE %s/%s\n", deleteContainer, deleteObject)
if csvw != nil {
start = time.Now()
}
resp := c.DeleteObject(deleteContainer, deleteObject, cli.globalFlagHeaders.Headers())
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
deleteContainer + "/" + deleteObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "DELETE %s/%s - %d %s - %s\n", deleteContainer, deleteObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "DELETE %s/%s - %d %s - %s\n", deleteContainer, deleteObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-DELETE of %d objects from 1 container, at %d concurrency...", count, concurrency)
} else {
fmt.Printf("Bench-DELETE of %d objects, distributed across %d containers, at %d concurrency...", count, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d DELETEs so far, %.05f DELETEs per second...", float64(elapsed)/float64(time.Second), soFar, float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
csvotw.Flush()
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
if containers == 1 {
fmt.Printf("Attempting to delete container...")
cli.verbosef(cli, "DELETE %s\n", container)
resp := c.DeleteContainer(container, cli.globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fmt.Fprintf(os.Stderr, "DELETE %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
} else {
fmt.Printf("Attempting to delete the %d containers...", containers)
for x := 0; x < containers; x++ {
deleteContainer := fmt.Sprintf("%s%d", container, x)
cli.verbosef(cli, "DELETE %s\n", deleteContainer)
resp := c.DeleteContainer(deleteContainer, cli.globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
fmt.Fprintf(os.Stderr, "DELETE %s - %d %s - %s\n", deleteContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
}
fmt.Println()
fmt.Printf("%.05fs total time, %.05f DELETEs per second.\n", float64(elapsed)/float64(time.Second), float64(count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", count-lastSoFar),
})
csvotw.Flush()
}
}
func (cli *CLIInstance) benchGet(c Client, args []string) {
if err := cli.BenchGetFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
container, object := parsePath(cli.BenchGetFlags.Args())
if container == "" {
cli.fatalf(cli, "bench-get requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *cli.benchGetFlagContainers
if containers < 1 {
containers = 1
}
count := *cli.benchGetFlagCount
if count < 1 {
count = 1000
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *cli.benchGetFlagCSV != "" {
csvf, err := os.Create(*cli.benchGetFlagCSV)
if err != nil {
cli.fatal(cli, err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "headers_elapsed_nanoseconds", "elapsed_nanoseconds"})
csvw.Flush()
}
var csvotw *csv.Writer
if *cli.benchGetFlagCSVOT != "" {
csvotf, err := os.Create(*cli.benchGetFlagCSVOT)
if err != nil {
cli.fatal(cli, err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
csvotw.Flush()
}
iterations := *cli.benchGetFlagIterations
if iterations < 1 {
iterations = 1
}
concurrency := *cli.globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
var start time.Time
var headers_elapsed int64
for {
i := <-benchChan
if i == 0 {
break
}
i--
getContainer := container
if containers > 1 {
getContainer = fmt.Sprintf("%s%d", getContainer, i%containers)
}
getObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "GET %s/%s\n", getContainer, getObject)
if csvw != nil {
start = time.Now()
}
resp := c.GetObject(getContainer, getObject, cli.globalFlagHeaders.Headers())
if csvw != nil {
headers_elapsed = time.Now().Sub(start).Nanoseconds()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s/%s - %d %s - %s\n", getContainer, getObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "GET %s/%s - %d %s - %s\n", getContainer, getObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
getContainer + "/" + getObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", headers_elapsed),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-GET of %d (%d distinct) objects, from 1 container, at %d concurrency...", iterations*count, count, concurrency)
} else {
fmt.Printf("Bench-GET of %d (%d distinct) objects, distributed across %d containers, at %d concurrency...", iterations*count, count, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for iteration := 0; iteration < iterations; iteration++ {
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := iteration*count + i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d GETs so far, %.05f GETs per second...", float64(elapsed)/float64(time.Second), soFar, float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
csvotw.Flush()
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05f GETs per second.\n", float64(elapsed)/float64(time.Second), float64(iterations*count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", iterations*count-lastSoFar),
})
csvotw.Flush()
}
}
func (cli *CLIInstance) benchHead(c Client, args []string) {
if err := cli.BenchHeadFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
container, object := parsePath(cli.BenchHeadFlags.Args())
if container == "" {
cli.fatalf(cli, "bench-head requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *cli.benchHeadFlagContainers
if containers < 1 {
containers = 1
}
count := *cli.benchHeadFlagCount
if count < 1 {
count = 1000
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *cli.benchHeadFlagCSV != "" {
csvf, err := os.Create(*cli.benchHeadFlagCSV)
if err != nil {
cli.fatal(cli, err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "headers_elapsed_nanoseconds", "elapsed_nanoseconds"})
csvw.Flush()
}
var csvotw *csv.Writer
if *cli.benchHeadFlagCSVOT != "" {
csvotf, err := os.Create(*cli.benchHeadFlagCSVOT)
if err != nil {
cli.fatal(cli, err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
csvotw.Flush()
}
iterations := *cli.benchHeadFlagIterations
if iterations < 1 {
iterations = 1
}
concurrency := *cli.globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
var start time.Time
var headers_elapsed int64
for {
i := <-benchChan
if i == 0 {
break
}
i--
headContainer := container
if containers > 1 {
headContainer = fmt.Sprintf("%s%d", headContainer, i%containers)
}
headObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "HEAD %s/%s\n", headContainer, headObject)
if csvw != nil {
start = time.Now()
}
resp := c.HeadObject(headContainer, headObject, cli.globalFlagHeaders.Headers())
if csvw != nil {
headers_elapsed = time.Now().Sub(start).Nanoseconds()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "HEAD %s/%s - %d %s - %s\n", headContainer, headObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "HEAD %s/%s - %d %s - %s\n", headContainer, headObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
headContainer + "/" + headObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", headers_elapsed),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-HEAD of %d (%d distinct) objects, from 1 container, at %d concurrency...", iterations*count, count, concurrency)
} else {
fmt.Printf("Bench-HEAD of %d (%d distinct) objects, distributed across %d containers, at %d concurrency...", iterations*count, count, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for iteration := 0; iteration < iterations; iteration++ {
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := iteration*count + i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d HEADs so far, %.05f HEADs per second...", float64(elapsed)/float64(time.Second), soFar, float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
csvotw.Flush()
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05f HEADs per second.\n", float64(elapsed)/float64(time.Second), float64(iterations*count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", iterations*count-lastSoFar),
})
csvotw.Flush()
}
}
func (cli *CLIInstance) benchMixed(c Client, args []string) {
if err := cli.BenchMixedFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
container, object := parsePath(cli.BenchMixedFlags.Args())
if container == "" {
cli.fatalf(cli, "bench-mixed requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *cli.benchMixedFlagContainers
if containers < 1 {
containers = 1
}
size := int64(*cli.benchMixedFlagSize)
if size < 0 {
size = 4096
}
timespan, err := time.ParseDuration(*cli.benchMixedFlagTime)
if err != nil {
cli.fatal(cli, err)
}
const (
delet = iota
get
head
post
put
)
methods := []string{
"DELETE",
"GET",
"HEAD",
"POST",
"PUT",
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *cli.benchMixedFlagCSV != "" {
csvf, err := os.Create(*cli.benchMixedFlagCSV)
if err != nil {
cli.fatal(cli, err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "method", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
csvw.Flush()
}
var csvotw *csv.Writer
if *cli.benchMixedFlagCSVOT != "" {
csvotf, err := os.Create(*cli.benchMixedFlagCSVOT)
if err != nil {
cli.fatal(cli, err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "DELETE", "GET", "HEAD", "POST", "PUT"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0", "0", "0", "0", "0", "0"})
csvotw.Flush()
}
if containers == 1 {
fmt.Printf("Ensuring container exists...")
cli.verbosef(cli, "PUT %s\n", container)
resp := c.PutContainer(container, cli.globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
} else {
cli.fatalf(cli, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
} else {
fmt.Printf("Ensuring %d containers exist...", containers)
for x := 0; x < containers; x++ {
putContainer := fmt.Sprintf("%s%d", container, x)
cli.verbosef(cli, "PUT %s\n", putContainer)
resp := c.PutContainer(putContainer, cli.globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
}
fmt.Println()
concurrency := *cli.globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
timespanTicker := time.NewTicker(timespan)
doneChan := make(chan bool)
go func() {
<-timespanTicker.C
close(doneChan)
}()
deleteChan := make(chan int, concurrency)
getChan := make(chan int, concurrency)
headChan := make(chan int, concurrency)
postChan := make(chan int, concurrency)
putChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
var deletes int64
var gets int64
var heads int64
var posts int64
var puts int64
for x := 0; x < concurrency; x++ {
wg.Add(1)
go func() {
var start time.Time
op := delet
var i int
for {
select {
case <-doneChan:
wg.Done()
return
case i = <-deleteChan:
}
opContainer := container
if containers > 1 {
opContainer = fmt.Sprintf("%s%d", opContainer, i%containers)
}
opObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "%s %s/%s\n", methods[op], opContainer, opObject)
if csvw != nil {
start = time.Now()
}
resp := c.DeleteObject(opContainer, opObject, cli.globalFlagHeaders.Headers())
atomic.AddInt64(&deletes, 1)
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
methods[op],
opContainer + "/" + opObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
}
}()
wg.Add(1)
go func() {
var start time.Time
op := get
var i int
for {
select {
case <-doneChan:
wg.Done()
return
case i = <-getChan:
}
opContainer := container
if containers > 1 {
opContainer = fmt.Sprintf("%s%d", opContainer, i%containers)
}
opObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "%s %s/%s\n", methods[op], opContainer, opObject)
if csvw != nil {
start = time.Now()
}
resp := c.GetObject(opContainer, opObject, cli.globalFlagHeaders.Headers())
atomic.AddInt64(&gets, 1)
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
methods[op],
opContainer + "/" + opObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
}
}()
wg.Add(1)
go func() {
var start time.Time
op := head
var i int
for {
select {
case <-doneChan:
wg.Done()
return
case i = <-headChan:
}
opContainer := container
if containers > 1 {
opContainer = fmt.Sprintf("%s%d", opContainer, i%containers)
}
opObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "%s %s/%s\n", methods[op], opContainer, opObject)
if csvw != nil {
start = time.Now()
}
resp := c.HeadObject(opContainer, opObject, cli.globalFlagHeaders.Headers())
atomic.AddInt64(&heads, 1)
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
methods[op],
opContainer + "/" + opObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
}
}()
wg.Add(1)
go func() {
var start time.Time
op := post
var i int
for {
select {
case <-doneChan:
wg.Done()
return
case i = <-postChan:
}
opContainer := container
if containers > 1 {
opContainer = fmt.Sprintf("%s%d", opContainer, i%containers)
}
opObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "%s %s/%s\n", methods[op], opContainer, opObject)
if csvw != nil {
start = time.Now()
}
headers := cli.globalFlagHeaders.Headers()
headers["X-Object-Meta-Bench-Mixed"] = strconv.Itoa(i)
resp := c.PostObject(opContainer, opObject, headers)
atomic.AddInt64(&posts, 1)
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
methods[op],
opContainer + "/" + opObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
}
}()
wg.Add(1)
go func() {
rnd := NewRand(time.Now().UnixNano())
var start time.Time
op := put
var i int
for {
select {
case <-doneChan:
wg.Done()
return
case i = <-putChan:
}
opContainer := container
if containers > 1 {
opContainer = fmt.Sprintf("%s%d", opContainer, i%containers)
}
opObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "%s %s/%s\n", methods[op], opContainer, opObject)
if csvw != nil {
start = time.Now()
}
resp := c.PutObject(opContainer, opObject, cli.globalFlagHeaders.Headers(), &io.LimitedReader{R: rnd, N: size})
atomic.AddInt64(&puts, 1)
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
methods[op],
opContainer + "/" + opObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "%s %s/%s - %d %s - %s\n", methods[op], opContainer, opObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
} else {
io.Copy(ioutil.Discard, resp.Body)
}
resp.Body.Close()
}
}()
}
if containers == 1 {
fmt.Printf("Bench-Mixed for %s, each object is %d bytes, into 1 container, at %d concurrency...", timespan, size, concurrency)
} else {
fmt.Printf("Bench-Mixed for %s, each object is %d bytes, distributed across %d containers, at %d concurrency...", timespan, size, containers, concurrency)
}
updateTicker := time.NewTicker(time.Minute)
start := time.Now()
var lastDeletes int64
var lastGets int64
var lastHeads int64
var lastPosts int64
var lastPuts int64
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case <-doneChan:
return
case <-updateTicker.C:
now := time.Now()
elapsed := now.Sub(start)
snapshotDeletes := atomic.LoadInt64(&deletes)
snapshotGets := atomic.LoadInt64(&gets)
snapshotHeads := atomic.LoadInt64(&heads)
snapshotPosts := atomic.LoadInt64(&posts)
snapshotPuts := atomic.LoadInt64(&puts)
total := snapshotDeletes + snapshotGets + snapshotHeads + snapshotPosts + snapshotPuts
fmt.Printf("\n%.05fs for %d requests so far, %.05f requests per second...", float64(elapsed)/float64(time.Second), total, float64(total)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", snapshotDeletes-lastDeletes),
fmt.Sprintf("%d", snapshotGets-lastGets),
fmt.Sprintf("%d", snapshotHeads-lastHeads),
fmt.Sprintf("%d", snapshotPosts-lastPosts),
fmt.Sprintf("%d", snapshotPuts-lastPuts),
})
csvotw.Flush()
lastDeletes = snapshotDeletes
lastGets = snapshotGets
lastHeads = snapshotHeads
lastPosts = snapshotPosts
lastPuts = snapshotPuts
}
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var i int
for {
hi := int(atomic.LoadInt64(&puts)) - 10000 // TODO: CLI option
if i > hi {
select {
case <-doneChan:
return
default:
}
time.Sleep(time.Second)
continue
}
select {
case <-doneChan:
return
case deleteChan <- i:
i++
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var i int
for {
lo := int(atomic.LoadInt64(&deletes)) + concurrency*2
if i < lo {
i = lo
}
hi := int(atomic.LoadInt64(&puts)) - concurrency*2
if i > hi {
i = lo
}
if i > hi {
select {
case <-doneChan:
return
default:
}
time.Sleep(time.Second)
continue
}
select {
case <-doneChan:
return
case getChan <- i:
i++
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var i int
for {
lo := int(atomic.LoadInt64(&deletes)) + concurrency*2
if i < lo {
i = lo
}
hi := int(atomic.LoadInt64(&puts)) - concurrency*2
if i > hi {
i = lo
}
if i > hi {
select {
case <-doneChan:
return
default:
}
time.Sleep(time.Second)
continue
}
select {
case <-doneChan:
return
case headChan <- i:
i++
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var i int
for {
lo := int(atomic.LoadInt64(&deletes)) + concurrency*2
if i < lo {
i = lo
}
hi := int(atomic.LoadInt64(&puts)) - concurrency*2
if i > hi {
i = lo
}
if i > hi {
select {
case <-doneChan:
return
default:
}
time.Sleep(time.Second)
continue
}
select {
case <-doneChan:
return
case postChan <- i:
i++
}
}
}()
wg.Add(1)
go func() {
defer wg.Done()
var i int
for {
select {
case <-doneChan:
return
case putChan <- i:
i++
}
}
}()
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
timespanTicker.Stop()
updateTicker.Stop()
fmt.Println()
total := deletes + gets + heads + posts + puts
fmt.Printf("%.05fs for %d requests, %.05f requests per second.\n", float64(elapsed)/float64(time.Second), total, float64(total)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", deletes-lastDeletes),
fmt.Sprintf("%d", gets-lastGets),
fmt.Sprintf("%d", heads-lastHeads),
fmt.Sprintf("%d", posts-lastPosts),
fmt.Sprintf("%d", puts-lastPuts),
})
csvotw.Flush()
}
}
func (cli *CLIInstance) benchPost(c Client, args []string) {
if err := cli.BenchPostFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
container, object := parsePath(cli.BenchPostFlags.Args())
if container == "" {
cli.fatalf(cli, "bench-post requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *cli.benchPostFlagContainers
if containers < 1 {
containers = 1
}
count := *cli.benchPostFlagCount
if count < 1 {
count = 1000
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *cli.benchPostFlagCSV != "" {
csvf, err := os.Create(*cli.benchPostFlagCSV)
if err != nil {
cli.fatal(cli, err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
csvw.Flush()
}
var csvotw *csv.Writer
if *cli.benchPostFlagCSVOT != "" {
csvotf, err := os.Create(*cli.benchPostFlagCSVOT)
if err != nil {
cli.fatal(cli, err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
csvotw.Flush()
}
concurrency := *cli.globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
i--
postContainer := container
if containers > 1 {
postContainer = fmt.Sprintf("%s%d", postContainer, i%containers)
}
postObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "POST %s/%s\n", postContainer, postObject)
if csvw != nil {
start = time.Now()
}
resp := c.PostObject(postContainer, postObject, cli.globalFlagHeaders.Headers())
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
postContainer + "/" + postObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "POST %s/%s - %d %s - %s\n", postContainer, postObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "POST %s/%s - %d %s - %s\n", postContainer, postObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
if containers == 1 {
fmt.Printf("Bench-POST of %d objects in 1 container, at %d concurrency...", count, concurrency)
} else {
fmt.Printf("Bench-POST of %d objects, distributed across %d containers, at %d concurrency...", count, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d POSTs so far, %.05f POSTs per second...", float64(elapsed)/float64(time.Second), soFar, float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
csvotw.Flush()
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05f POSTs per second.\n", float64(elapsed)/float64(time.Second), float64(count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", count-lastSoFar),
})
csvotw.Flush()
}
}
func (cli *CLIInstance) benchPut(c Client, args []string) {
if err := cli.BenchPutFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
container, object := parsePath(cli.BenchPutFlags.Args())
if container == "" {
cli.fatalf(cli, "bench-put requires <container>\n")
}
if object == "" {
object = "bench-"
}
containers := *cli.benchPutFlagContainers
if containers < 1 {
containers = 1
}
count := *cli.benchPutFlagCount
if count < 1 {
count = 1000
}
size := int64(*cli.benchPutFlagSize)
if size < 0 {
size = 4096
}
maxsize := int64(*cli.benchPutFlagMaxSize)
if maxsize < size {
maxsize = size
}
var csvw *csv.Writer
var csvlk sync.Mutex
if *cli.benchPutFlagCSV != "" {
csvf, err := os.Create(*cli.benchPutFlagCSV)
if err != nil {
cli.fatal(cli, err)
}
csvw = csv.NewWriter(csvf)
defer func() {
csvw.Flush()
csvf.Close()
}()
csvw.Write([]string{"completion_time_unix_nano", "object_name", "transaction_id", "status", "elapsed_nanoseconds"})
csvw.Flush()
}
var csvotw *csv.Writer
if *cli.benchPutFlagCSVOT != "" {
csvotf, err := os.Create(*cli.benchPutFlagCSVOT)
if err != nil {
cli.fatal(cli, err)
}
csvotw = csv.NewWriter(csvotf)
defer func() {
csvotw.Flush()
csvotf.Close()
}()
csvotw.Write([]string{"time_unix_nano", "count_since_last_time"})
csvotw.Write([]string{fmt.Sprintf("%d", time.Now().UnixNano()), "0"})
csvotw.Flush()
}
if containers == 1 {
fmt.Printf("Ensuring container exists...")
cli.verbosef(cli, "PUT %s\n", container)
resp := c.PutContainer(container, cli.globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
} else {
cli.fatalf(cli, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
} else {
fmt.Printf("Ensuring %d containers exist...", containers)
for x := 0; x < containers; x++ {
putContainer := fmt.Sprintf("%s%d", container, x)
cli.verbosef(cli, "PUT %s\n", putContainer)
resp := c.PutContainer(putContainer, cli.globalFlagHeaders.Headers())
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "PUT %s - %d %s - %s\n", putContainer, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
}
fmt.Println()
concurrency := *cli.globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
benchChan := make(chan int, concurrency)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for x := 0; x < concurrency; x++ {
go func() {
rnd := NewRand(time.Now().UnixNano())
var start time.Time
for {
i := <-benchChan
if i == 0 {
break
}
i--
putContainer := container
if containers > 1 {
putContainer = fmt.Sprintf("%s%d", putContainer, i%containers)
}
putObject := fmt.Sprintf("%s%d", object, i)
cli.verbosef(cli, "PUT %s/%s\n", putContainer, putObject)
if csvw != nil {
start = time.Now()
}
sz := size
if maxsize > size {
sz += int64(rnd.Intn(int(maxsize - size)))
}
resp := c.PutObject(putContainer, putObject, cli.globalFlagHeaders.Headers(), &io.LimitedReader{R: rnd, N: sz})
if csvw != nil {
stop := time.Now()
elapsed := stop.Sub(start).Nanoseconds()
csvlk.Lock()
csvw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
putContainer + "/" + putObject,
resp.Header.Get("X-Trans-Id"),
fmt.Sprintf("%d", resp.StatusCode),
fmt.Sprintf("%d", elapsed),
})
csvw.Flush()
csvlk.Unlock()
}
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s/%s - %d %s - %s\n", putContainer, putObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "PUT %s/%s - %d %s - %s\n", putContainer, putObject, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
}
wg.Done()
}()
}
var sz string
if maxsize > size {
sz = fmt.Sprintf("%d-%d", size, maxsize)
} else {
sz = fmt.Sprintf("%d", size)
}
if containers == 1 {
fmt.Printf("Bench-PUT of %d objects, each %s bytes, into 1 container, at %d concurrency...", count, sz, concurrency)
} else {
fmt.Printf("Bench-PUT of %d objects, each %s bytes, distributed across %d containers, at %d concurrency...", count, sz, containers, concurrency)
}
ticker := time.NewTicker(time.Minute)
start := time.Now()
lastSoFar := 0
for i := 1; i <= count; i++ {
waiting := true
for waiting {
select {
case <-ticker.C:
soFar := i - concurrency
now := time.Now()
elapsed := now.Sub(start)
fmt.Printf("\n%.05fs for %d PUTs so far, %.05f PUTs per second...", float64(elapsed)/float64(time.Second), soFar, float64(soFar)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", now.UnixNano()),
fmt.Sprintf("%d", soFar-lastSoFar),
})
csvotw.Flush()
lastSoFar = soFar
}
case benchChan <- i:
waiting = false
}
}
}
close(benchChan)
wg.Wait()
stop := time.Now()
elapsed := stop.Sub(start)
ticker.Stop()
fmt.Println()
fmt.Printf("%.05fs total time, %.05f PUTs per second.\n", float64(elapsed)/float64(time.Second), float64(count)/float64(elapsed/time.Second))
if csvotw != nil {
csvotw.Write([]string{
fmt.Sprintf("%d", stop.UnixNano()),
fmt.Sprintf("%d", count-lastSoFar),
})
csvotw.Flush()
}
}
func (cli *CLIInstance) delet(c Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.DeleteObject(container, object, cli.globalFlagHeaders.Headers())
} else if container != "" {
resp = c.DeleteContainer(container, cli.globalFlagHeaders.Headers())
} else {
resp = c.DeleteAccount(cli.globalFlagHeaders.Headers())
}
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func (cli *CLIInstance) get(c Client, args []string) {
if err := cli.GetFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
container, object := parsePath(cli.GetFlags.Args())
if *cli.getFlagRaw || object != "" {
var resp *http.Response
if object != "" {
resp = c.GetObject(container, object, cli.globalFlagHeaders.Headers())
} else if container != "" {
resp = c.GetContainerRaw(container, *cli.getFlagMarker, *cli.getFlagEndMarker, *cli.getFlagLimit, *cli.getFlagPrefix, *cli.getFlagDelimiter, *cli.getFlagReverse, cli.globalFlagHeaders.Headers())
} else {
resp = c.GetAccountRaw(*cli.getFlagMarker, *cli.getFlagEndMarker, *cli.getFlagLimit, *cli.getFlagPrefix, *cli.getFlagDelimiter, *cli.getFlagReverse, cli.globalFlagHeaders.Headers())
}
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *cli.getFlagRaw || object == "" {
data := [][]string{}
ks := []string{}
for k := range resp.Header {
ks = append(ks, k)
}
sort.Strings(ks)
for _, k := range ks {
for _, v := range resp.Header[k] {
data = append(data, []string{k + ":", v})
}
}
fmt.Println(resp.StatusCode, http.StatusText(resp.StatusCode))
opts := brimtext.NewDefaultAlignOptions()
fmt.Print(brimtext.Align(data, opts))
}
if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
cli.fatal(cli, err)
}
return
}
if container != "" {
entries, resp := c.GetContainer(container, *cli.getFlagMarker, *cli.getFlagEndMarker, *cli.getFlagLimit, *cli.getFlagPrefix, *cli.getFlagDelimiter, *cli.getFlagReverse, cli.globalFlagHeaders.Headers())
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *cli.getFlagNameOnly {
for _, entry := range entries {
if entry.Subdir != "" {
fmt.Println(entry.Subdir)
} else {
fmt.Println(entry.Name)
}
}
} else {
var data [][]string
data = [][]string{{"Name", "Bytes", "Content Type", "Last Modified", "Hash"}}
for _, entry := range entries {
if entry.Subdir != "" {
data = append(data, []string{entry.Subdir, "", "", "", ""})
} else {
data = append(data, []string{entry.Name, fmt.Sprintf("%d", entry.Bytes), entry.ContentType, entry.LastModified, entry.Hash})
}
}
fmt.Print(brimtext.Align(data, nil))
}
return
}
entries, resp := c.GetAccount(*cli.getFlagMarker, *cli.getFlagEndMarker, *cli.getFlagLimit, *cli.getFlagPrefix, *cli.getFlagDelimiter, *cli.getFlagReverse, cli.globalFlagHeaders.Headers())
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
if *cli.getFlagNameOnly {
for _, entry := range entries {
fmt.Println(entry.Name)
}
} else {
var data [][]string
data = [][]string{{"Name", "Count", "Bytes"}}
for _, entry := range entries {
data = append(data, []string{entry.Name, fmt.Sprintf("%d", entry.Count), fmt.Sprintf("%d", entry.Bytes)})
}
fmt.Print(brimtext.Align(data, nil))
}
return
}
func (cli *CLIInstance) head(c Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.HeadObject(container, object, cli.globalFlagHeaders.Headers())
} else if container != "" {
resp = c.HeadContainer(container, cli.globalFlagHeaders.Headers())
} else {
resp = c.HeadAccount(cli.globalFlagHeaders.Headers())
}
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
if resp.StatusCode/100 != 2 {
cli.fatalf(cli, "%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
data := [][]string{}
ks := []string{}
kls := map[string]string{}
for k := range resp.Header {
ks = append(ks, k)
kls[k] = k
}
sort.Strings(ks)
for _, k := range ks {
for _, v := range resp.Header[kls[k]] {
data = append(data, []string{k + ":", v})
}
}
fmt.Println(resp.StatusCode, http.StatusText(resp.StatusCode))
fmt.Print(brimtext.Align(data, brimtext.NewDefaultAlignOptions()))
}
func (cli *CLIInstance) put(c Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.PutObject(container, object, cli.globalFlagHeaders.Headers(), os.Stdin)
} else if container != "" {
resp = c.PutContainer(container, cli.globalFlagHeaders.Headers())
} else {
resp = c.PutAccount(cli.globalFlagHeaders.Headers())
}
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func (cli *CLIInstance) post(c Client, args []string) {
container, object := parsePath(args)
var resp *http.Response
if object != "" {
resp = c.PostObject(container, object, cli.globalFlagHeaders.Headers())
} else if container != "" {
resp = c.PostContainer(container, cli.globalFlagHeaders.Headers())
} else {
resp = c.PostAccount(cli.globalFlagHeaders.Headers())
}
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "%d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
}
func (cli *CLIInstance) upload(c Client, args []string) {
if len(args) == 0 {
cli.fatalf(cli, "<sourcepath> is required for upload.\n")
}
sourcepath := args[0]
container, object := parsePath(args[1:])
if container == "" {
abscwd, err := filepath.Abs(".")
if err != nil {
cli.fatalf(cli, "Could not determine current working directory: %s\n", err)
}
container = filepath.Base(abscwd)
}
cli.verbosef(cli, "Ensuring container %q exists.\n", container)
resp := c.PutContainer(container, cli.globalFlagHeaders.Headers())
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "PUT %s - %d %s - %s\n", container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
uploadfn := func(path string, appendPath bool) {
opath := object
if appendPath {
opath += path
}
cli.verbosef(cli, "Uploading %q to %q %q.\n", path, container, opath)
f, err := os.Open(path)
if err != nil {
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Cannot open %s while attempting to upload to %s/%s: %s\n", path, container, opath, err)
return
} else {
cli.fatalf(cli, "Cannot open %s while attempting to upload to %s/%s: %s\n", path, container, opath, err)
}
}
resp := c.PutObject(container, opath, cli.globalFlagHeaders.Headers(), f)
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
f.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "PUT %s/%s - %d %s - %s\n", container, opath, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
return
} else {
cli.fatalf(cli, "PUT %s/%s - %d %s - %s\n", container, opath, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
f.Close()
}
fi, err := os.Stat(sourcepath)
if err != nil {
cli.fatalf(cli, "Could not stat %s: %s\n", sourcepath, err)
}
// This "if" is so a single file upload that happens to be a symlink will work.
if fi.Mode().IsRegular() {
uploadfn(sourcepath, false)
} else {
concurrency := *cli.globalFlagConcurrency
if concurrency < 1 {
concurrency = 1
}
uploadChan := make(chan string, concurrency-1)
wg := sync.WaitGroup{}
wg.Add(concurrency)
for i := 0; i < concurrency; i++ {
go func() {
for {
path := <-uploadChan
if path == "" {
break
}
uploadfn(path, true)
}
wg.Done()
}()
}
// This "if" is to handle when the user-given path is a symlink to a directory; we normally want to skip symlinks, but not in this initial case.
if !strings.HasSuffix(sourcepath, string(os.PathSeparator)) {
sourcepath += string(os.PathSeparator)
}
filepath.Walk(sourcepath, func(path string, info os.FileInfo, err error) error {
if err != nil || !info.Mode().IsRegular() {
return nil
}
uploadChan <- path
return nil
})
close(uploadChan)
wg.Wait()
}
}
func (cli *CLIInstance) download(c Client, args []string) {
if err := cli.DownloadFlags.Parse(args); err != nil {
cli.fatal(cli, err)
}
args = cli.DownloadFlags.Args()
if len(args) == 0 {
cli.fatalf(cli, "<destpath> is required for download.\n")
}
destpath := args[len(args)-1]
container, object := parsePath(args[:len(args)-1])
concurrency := *cli.globalFlagConcurrency
// Need at least 2 to queue object downloads while reading a container listing.
if concurrency < 2 {
concurrency = 2
}
type downloadTask struct {
container string
object string
destpath string
}
downloadChan := make(chan *downloadTask, concurrency-1)
var dirExistsLock sync.Mutex
dirExists := map[string]bool{}
taskWG := sync.WaitGroup{}
taskWG.Add(concurrency)
containerWG := sync.WaitGroup{}
for i := 0; i < concurrency; i++ {
go func() {
for {
task := <-downloadChan
if task == nil {
break
}
if task.object == "" {
entries, resp := c.GetContainer(task.container, "", "", 0, "", "", false, cli.globalFlagHeaders.Headers())
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
containerWG.Done()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s - %d %s - %s\n", task.container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "GET %s - %d %s - %s\n", task.container, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
resp.Body.Close()
for _, entry := range entries {
if entry.Name != "" {
downloadChan <- &downloadTask{container: task.container, object: entry.Name, destpath: filepath.Join(task.destpath, filepath.FromSlash(entry.Name))}
}
}
containerWG.Done()
continue
}
cli.verbosef(cli, "Downloading %s/%s to %s.\n", task.container, task.object, task.destpath)
if dstdr := filepath.Dir(task.destpath); dstdr != "." {
dirExistsLock.Lock()
if !dirExists[dstdr] {
if err := os.MkdirAll(dstdr, 0755); err != nil {
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not make directory path %s: %s\n", dstdr, err)
} else {
cli.fatalf(cli, "Could not make directory path %s: %s\n", dstdr, err)
}
}
dirExists[dstdr] = true
}
dirExistsLock.Unlock()
}
f, err := os.Create(task.destpath)
if err != nil {
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not create %s: %s\n", task.destpath, err)
continue
} else {
cli.fatalf(cli, "Could not create %s: %s\n", task.destpath, err)
}
}
resp := c.GetObject(task.container, task.object, cli.globalFlagHeaders.Headers())
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
f.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "GET %s/%s - %d %s - %s\n", task.container, task.object, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
continue
} else {
cli.fatalf(cli, "GET %s/%s - %d %s - %s\n", task.container, task.object, resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
}
if _, err = io.Copy(f, resp.Body); err != nil {
resp.Body.Close()
f.Close()
if *cli.globalFlagContinueOnError {
fmt.Fprintf(os.Stderr, "Could not complete content transfer from %s/%s to %s: %s\n", task.container, task.object, task.destpath, err)
continue
} else {
cli.fatalf(cli, "Could not complete content transfer from %s/%s to %s: %s\n", task.container, task.object, task.destpath, err)
}
}
resp.Body.Close()
f.Close()
}
taskWG.Done()
}()
}
if object != "" {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
cli.fatalf(cli, "Could not stat %s: %s\n", destpath, err)
}
} else if fi.IsDir() {
destpath = filepath.Join(destpath, object)
}
downloadChan <- &downloadTask{container: container, object: object, destpath: destpath}
} else if container != "" {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
cli.fatalf(cli, "Could not stat %s: %s\n", destpath, err)
}
} else if !fi.IsDir() {
cli.fatalf(cli, "Cannot download a container to a single file: %s\n", destpath)
}
containerWG.Add(1)
downloadChan <- &downloadTask{container: container, object: "", destpath: destpath}
} else if !*cli.downloadFlagAccount {
cli.fatalf(cli, "You must specify -a if you wish to download the entire account.\n")
} else {
fi, err := os.Stat(destpath)
if err != nil {
if !os.IsNotExist(err) {
cli.fatalf(cli, "Could not stat %s: %s\n", destpath, err)
}
} else if !fi.IsDir() {
cli.fatalf(cli, "Cannot download an account to a single file: %s\n", destpath)
}
entries, resp := c.GetAccount("", "", 0, "", "", false, cli.globalFlagHeaders.Headers())
cli.verbosef(cli, "X-Trans-Id: %q\n", resp.Header.Get("X-Trans-Id"))
if resp.StatusCode/100 != 2 {
bodyBytes, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
cli.fatalf(cli, "GET - %d %s - %s\n", resp.StatusCode, http.StatusText(resp.StatusCode), string(bodyBytes))
}
resp.Body.Close()
for _, entry := range entries {
if entry.Name != "" {
containerWG.Add(1)
downloadChan <- &downloadTask{container: entry.Name, object: "", destpath: filepath.Join(destpath, entry.Name)}
}
}
}
containerWG.Wait()
close(downloadChan)
taskWG.Wait()
}
func parsePath(args []string) (string, string) {
if len(args) == 0 {
return "", ""
}
path := ""
for _, arg := range args {
if path == "" {
path = arg
continue
}
if strings.HasSuffix(path, "/") {
path += arg
} else {
path += "/" + arg
}
}
parts := strings.SplitN(path, "/", 2)
if len(parts) == 1 {
return parts[0], ""
}
return parts[0], parts[1]
}
type stringListFlag []string
func (slf *stringListFlag) Set(value string) error {
*slf = append(*slf, value)
return nil
}
func (slf *stringListFlag) String() string {
return strings.Join(*slf, " ")
}
func (slf *stringListFlag) Headers() map[string]string {
headers := map[string]string{}
for _, parameter := range *slf {
splitParameters := strings.SplitN(parameter, ":", 2)
if len(splitParameters) == 2 {
headers[strings.TrimSpace(splitParameters[0])] = strings.TrimSpace(splitParameters[1])
} else {
headers[strings.TrimSpace(splitParameters[0])] = ""
}
}
return headers
}
// lockedSource allows a random number generator to be used by multiple goroutines concurrently.
// The code is very similar to math/rand.lockedSource.
type lockedSource struct {
mut sync.Mutex
src rand.Source64
}
// NewRand returns a rand.Rand that is threadsafe.
func NewRand(seed int64) *rand.Rand {
return rand.New(&lockedSource{src: rand.NewSource(seed).(rand.Source64)})
}
func (r *lockedSource) Int63() (n int64) {
r.mut.Lock()
n = r.src.Int63()
r.mut.Unlock()
return
}
func (r *lockedSource) Uint64() (n uint64) {
r.mut.Lock()
n = r.src.Uint64()
r.mut.Unlock()
return
}
// Seed implements Seed() of Source
func (r *lockedSource) Seed(seed int64) {
r.mut.Lock()
r.src.Seed(seed)
r.mut.Unlock()
}
|
package ants
import (
"runtime"
"time"
)
// 类似于Java的Runnable
type goWorker struct {
pool *Pool // 所属协程池
task chan func() // 实际运行的方法
recycleTime time.Time // expiry time
}
func (w *goWorker) run() {
// count + 1
w.pool.increaseRunning()
go func() {
// 回收资源
defer func() {
// count - 1
w.pool.decreaseRunning()
// 放回sync.Pool等待下次retrieveGoWorker
w.pool.workerCache.Put(w)
if p := recover(); p != nil {
if ph := w.pool.options.PanicHandler; ph != nil {
ph(p)
} else {
w.pool.options.Logger.Printf("worker exits from a panic: %v \n", p)
var buf [4096]byte
n := runtime.Stack(buf[:], false) // 异常堆栈信息
w.pool.options.Logger.Printf("worker exits from panic: %s\n", string(buf[:n]))
}
}
}()
// 传递整个函数(无参)
for f := range w.task {
if f == nil {
return
}
// real function
f()
if ok := w.pool.revertWorker(w); !ok {
return
}
}
}()
}
|
package setting
import (
"io/ioutil"
yaml "gopkg.in/yaml.v2"
)
// ServerSettingType is a struct has properties in setting.yml
type ServerSettingType struct {
Port string `yaml:"Port"`
Debug bool `yaml:"Debug"`
TextdataDir string `yaml:"TextdataDir"`
DBHost string `yaml:"DBHost"`
DBPort string `yaml:"DBPort"`
DBUser string `yaml:"DBUser"`
DBPass string `yaml:"DBPass"`
DBName string `yaml:"DBName"`
AuthDisabled bool `yaml:"AuthDisabled"`
}
// ServerSetting has properties in setting.yml
var ServerSetting *ServerSettingType
// LoadSetting is a function that load properties from setting.yml to ServerSetting
func LoadSetting() error {
bytes, err := ioutil.ReadFile(`./setting.yml`)
if err != nil {
return err
}
s := ServerSettingType{}
err = yaml.Unmarshal(bytes, &s)
if err != nil {
return err
}
ServerSetting = &s
return nil
}
|
// Copyright 2020 Thomas.Hoehenleitner [at] seerose.net
// Use of this source code is governed by a license that can be found in the LICENSE file.
package decoder
import (
"fmt"
"io"
"github.com/rokath/trice/internal/id"
)
// Bare is the Decoder instance for bare encoded trices.
type Bare struct {
Decoding
payload []int // value payload
}
// NewBareDecoder provides an BareDecoder instance.
// l is the trice id list in slice of struct format.
// in is the usable reader for the input bytes.
func NewBareDecoder(l []id.Item, in io.Reader, endian bool) Decoding {
p := &Bare{}
p.in = in
p.syncBuffer = make([]byte, 0, defaultSize)
p.lut = MakeLut(l)
p.endian = endian
return p.Decoding
}
// Read is the provided read method for bare decoding of next string as byte slice.
// It uses inner reader p.in and internal id look-up table to fill b with a string.
// b is a slice of bytes with a len for the max expected string size.
// n is the count of read bytes inside b.
// Read returns one trice string (optionally starting wth a channel specifier).
// A line can contain several trice strings.
func (p *Bare) Read(b []byte) (n int, err error) {
sizeMsg := fmt.Sprintln("e:buf too small, expecting", defaultSize, "bytes.")
if len(b) < len(sizeMsg) {
return
}
if len(b) < defaultSize {
n = copy(b, sizeMsg)
return
}
p.b = b
// fill intermediate read buffer for pack encoding
n, err = p.in.Read(b) // use b as intermediate buffer to avoid allocation
// p.syncBuffer can contain unprocessed bytes from last call.
p.syncBuffer = append(p.syncBuffer, b[:n]...) // merge with leftovers
n = 0
if nil != err && io.EOF != err {
return
}
// Even err could be io.EOF some valid data possibly in p.syncBuffer.
// In case of file input (JLINK usage) a plug off is not detectable here.
for {
if len(p.syncBuffer) < 4 {
return // wait
}
p.bc = 4
head := int(p.readU32(p.syncBuffer[0:4]))
if 0x89abcdef == uint(head) { // sync trice
p.rub(4)
continue
}
triceID := head >> 16 // 2 msb bytes are the ID
p.payload = append(p.payload, 0xffff&head) // next 2 bytes are payload
if 8 < len(p.payload) {
p.payload = p.payload[:0]
return p.outOfSync(fmt.Sprintf("too much payload data %d", len(p.payload)))
}
if 0 == triceID {
p.rub(4)
continue
}
var ok bool
p.trice, ok = p.lut[triceID] // check lookup table
if !ok {
p.payload = p.payload[:0]
return p.outOfSync(fmt.Sprintf("unknown triceID %5d", triceID))
}
if !p.payloadLenOk() {
p.payload = p.payload[:0]
return p.outOfSync(fmt.Sprintf("unecpected payload len %d", p.expectedPayloadLen()))
}
p.rub(4)
// ID and count are ok
return p.sprintTrice()
}
}
// payloadLenOk returns true if the transmitted count information matches the expected count
func (p *Bare) payloadLenOk() bool {
x := p.expectedPayloadLen()
return len(p.payload) == x || -1 == x
}
// expectedPayloadLen returns expected len for triceType.
// It returns -1 for an unknown value an -2 for unknown triceType.
func (p *Bare) expectedPayloadLen() int {
switch p.trice.Type {
case "TRICE0", "TRICE8_1", "TRICE8_2", "TRICE16_1":
return 1
case "TRICE8_3", "TRICE8_4", "TRICE16_2", "TRICE32_1":
return 2
case "TRICE8_5", "TRICE8_6", "TRICE16_3":
return 3
case "TRICE8_7", "TRICE8_8", "TRICE16_4", "TRICE32_2", "TRICE64_1":
return 4
case "TRICE32_3":
return 6
case "TRICE32_4", "TRICE64_2":
return 8
case "TRICE_S":
return -1 // unknown count
default:
return -2 // unknown trice type
}
}
// sprintTrice generates the trice string.
func (p *Bare) sprintTrice() (n int, err error) {
switch p.trice.Type {
case "TRICE0":
return p.trice0()
case "TRICE8_1":
return p.trice81()
case "TRICE8_2":
return p.trice82()
case "TRICE8_3":
return p.trice83()
case "TRICE8_4":
return p.trice84()
case "TRICE8_5":
return p.trice85()
case "TRICE8_6":
return p.trice86()
case "TRICE8_7":
return p.trice87()
case "TRICE8_8":
return p.trice88()
case "TRICE16_1":
return p.trice161()
case "TRICE16_2":
return p.trice162()
case "TRICE16_3":
return p.trice163()
case "TRICE16_4":
return p.trice164()
case "TRICE32_1":
return p.trice321()
case "TRICE32_2":
return p.trice322()
case "TRICE32_3":
return p.trice323()
case "TRICE32_4":
return p.trice324()
case "TRICE64_1":
return p.trice641()
case "TRICE64_2":
return p.trice642()
}
return p.outOfSync(fmt.Sprintf("Unexpected trice.Type %s", p.trice.Type))
}
func (p *Bare) trice0() (n int, e error) {
n = copy(p.b, fmt.Sprintf(p.trice.Strg))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice81() (n int, e error) {
// to do: Evaluate p.trice.Strg for %u, change to %d and use uint8 than.
b0 := int8(p.payload[0])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice82() (n int, e error) {
b0 := int8(p.payload[0] >> 8)
b1 := int8(p.payload[0])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0, b1))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice83() (n int, e error) {
b0 := int8(p.payload[0] >> 8)
b1 := int8(p.payload[0])
b2 := int8(p.payload[1])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0, b1, b2))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice84() (n int, e error) {
b0 := int8(p.payload[0] >> 8)
b1 := int8(p.payload[0])
b2 := int8(p.payload[1] >> 8)
b3 := int8(p.payload[1])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0, b1, b2, b3))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice85() (n int, e error) {
b0 := int8(p.payload[0] >> 8)
b1 := int8(p.payload[0])
b2 := int8(p.payload[1] >> 8)
b3 := int8(p.payload[1])
b4 := int8(p.payload[2])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0, b1, b2, b3, b4))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice86() (n int, e error) {
b0 := int8(p.payload[0] >> 8)
b1 := int8(p.payload[0])
b2 := int8(p.payload[1] >> 8)
b3 := int8(p.payload[1])
b4 := int8(p.payload[2] >> 8)
b5 := int8(p.payload[2])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0, b1, b2, b3, b4, b5))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice87() (n int, e error) {
b0 := int8(p.payload[0] >> 8)
b1 := int8(p.payload[0])
b2 := int8(p.payload[1] >> 8)
b3 := int8(p.payload[1])
b4 := int8(p.payload[2] >> 8)
b5 := int8(p.payload[2])
b6 := int8(p.payload[3])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0, b1, b2, b3, b4, b5, b6))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice88() (n int, e error) {
b0 := int8(p.payload[0] >> 8)
b1 := int8(p.payload[0])
b2 := int8(p.payload[1] >> 8)
b3 := int8(p.payload[1])
b4 := int8(p.payload[2] >> 8)
b5 := int8(p.payload[2])
b6 := int8(p.payload[3] >> 8)
b7 := int8(p.payload[3])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, b0, b1, b2, b3, b4, b5, b6, b7))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice161() (n int, e error) {
d0 := int16(p.payload[0])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice162() (n int, e error) {
d0 := int16(p.payload[0])
d1 := int16(p.payload[1])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0, d1))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice163() (n int, e error) {
d0 := int16(p.payload[0])
d1 := int16(p.payload[1])
d2 := int16(p.payload[2])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0, d1, d2))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice164() (n int, e error) {
d0 := int16(p.payload[0])
d1 := int16(p.payload[1])
d2 := int16(p.payload[2])
d3 := int16(p.payload[3])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0, d1, d2, d3))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice321() (n int, e error) {
d0 := int32(p.payload[0])<<16 | int32(p.payload[1])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice322() (n int, e error) {
d0 := int32(p.payload[0]<<16 | p.payload[1])
d1 := int32(p.payload[2]<<16 | p.payload[3])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0, d1))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice323() (n int, e error) {
d0 := int32(p.payload[0]<<16 | p.payload[1])
d1 := int32(p.payload[2]<<16 | p.payload[3])
d2 := int32(p.payload[4]<<16 | p.payload[5])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0, d1, d2))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice324() (n int, e error) {
d0 := int32(p.payload[0]<<16 | p.payload[1])
d1 := int32(p.payload[2]<<16 | p.payload[3])
d2 := int32(p.payload[4]<<16 | p.payload[5])
d3 := int32(p.payload[6]<<16 | p.payload[7])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0, d1, d2, d3))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice641() (n int, e error) {
d0 := int64(p.payload[0]<<48 | p.payload[1]<<32 | p.payload[2]<<16 | p.payload[3])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0))
p.payload = p.payload[:0]
return
}
func (p *Bare) trice642() (n int, e error) {
d0 := int64(p.payload[0]<<48 | p.payload[1]<<32 | p.payload[2]<<16 | p.payload[3])
d1 := int64(p.payload[4]<<48 | p.payload[5]<<32 | p.payload[6]<<16 | p.payload[7])
n = copy(p.b, fmt.Sprintf(p.trice.Strg, d0, d1))
p.payload = p.payload[:0]
return
}
|
package product
import (
"errors"
"gin-webapi/database"
"net/http"
"time"
product "gin-webapi/models/product"
"github.com/gin-gonic/gin"
"gopkg.in/mgo.v2/bson"
)
const ProductCollection = "product"
var (
errNotExist = errors.New("Products are not exist")
errInvalidID = errors.New("Invalid ID")
errInvalidBody = errors.New("Invalid request body")
errInsertionFailed = errors.New("Error in the product insertion")
errUpdationFailed = errors.New("Error in the product updation")
errDeletionFailed = errors.New("Error in the product deletion")
)
// GetAllProduct Endpoint
func GetAllProduct(c *gin.Context) {
db := database.GetMongoDB()
products := product.Products{}
err := db.C(ProductCollection).Find(bson.M{}).All(&products)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errNotExist.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"status": "success", "products": &products})
}
// GetProduct Endpoint
func GetProduct(c *gin.Context) {
var id bson.ObjectId = bson.ObjectIdHex(c.Param("id"))
product, err := product.ProductInfo(id, ProductCollection)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errInvalidID.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"status": "success", "product": &product})
}
// CreateProduct Endpoint
func CreateProduct(c *gin.Context) {
db := database.GetMongoDB()
product := product.Product{}
err := c.Bind(&product)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errInvalidBody.Error()})
return
}
product.ID = bson.NewObjectId()
product.CreatedAt = time.Now()
product.UpdatedAt = time.Now()
err = db.C(ProductCollection).Insert(product)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errInsertionFailed.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"status": "success", "product": &product})
}
// UpdateProduct Endpoint
func UpdateProduct(c *gin.Context) {
db := database.GetMongoDB()
var id bson.ObjectId = bson.ObjectIdHex(c.Param("id"))
existingProduct, err := product.ProductInfo(id, ProductCollection)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errInvalidID.Error()})
return
}
err = c.Bind(&existingProduct)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errInvalidBody.Error()})
return
}
existingProduct.ID = id
existingProduct.UpdatedAt = time.Now()
err = db.C(ProductCollection).Update(bson.M{"_id": &id}, existingProduct)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errUpdationFailed.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"status": "success", "product": &existingProduct})
}
// DeleteProduct Endpoint
func DeleteProduct(c *gin.Context) {
db := database.GetMongoDB()
var id bson.ObjectId = bson.ObjectIdHex(c.Param("id"))
err := db.C(ProductCollection).Remove(bson.M{"_id": &id})
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"status": "failed", "message": errDeletionFailed.Error()})
return
}
c.JSON(http.StatusOK, gin.H{"status": "success", "message": "Product deleted successfully"})
}
|
package main
import (
"fmt"
"testing"
)
func TestHandleWord(t *testing.T) {
sensitiveList := LoadSensitiveWords()
input := "hellboy wankycd dsviagra"
util := NewDFAUtil(sensitiveList)
newInput := util.HandleWord(input, '*')
expected := "****boy *****cd ds******"
if newInput != expected {
t.Errorf("Expected %s, but got %s", expected, newInput)
} else {
fmt.Println("newInput", newInput)
}
}
|
package rtk
/*
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <ctype.h>
#include <stdio.h>
#include <rtklib.h>
void ppk_raw_to_rindex(gtime_t gpst, const char *bin,
const char *ofile, const char *nfile,
const char *gfile) {
rnxopt_t rnxopt = {0};
int i;
int format = STRFMT_RTCM3;
char file[1024], *outfile[6], ofile_[6][1024] = {""}, *p;
char buff[256], tstr[32];
for (i = 0; i < 6; i++)
outfile[i] = ofile_[i];
strcpy(file, bin);
rnxopt.rnxver = RNX3VER;
strcpy(outfile[0], ofile);
strcpy(outfile[1], nfile);
if (gfile != "") {
strcpy(outfile[2], gfile);
}
rnxopt.trtcm = gpst;
rnxopt.navsys = 0x3;
rnxopt.obstype = 0xF;
rnxopt.freqtype = 0x3;
convrnx(format, &rnxopt, file, outfile);
}
*/
import "C"
import "unsafe"
func RawToRIndex(gpst GTime, binfile, ofile, nfile, gfile string) {
cbinfile := C.CString(binfile)
cofile := C.CString(ofile)
cnfile := C.CString(nfile)
cgfile := C.CString(gfile)
defer C.free(unsafe.Pointer(cbinfile))
defer C.free(unsafe.Pointer(cofile))
defer C.free(unsafe.Pointer(cnfile))
defer C.free(unsafe.Pointer(cgfile))
C.ppk_raw_to_rindex(gpst.t, cbinfile, cofile, cnfile, cgfile)
}
|
package keeper
import (
"context"
"fmt"
"strings"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/provenance-io/provenance/x/metadata/types"
)
type msgServer struct {
Keeper
}
// NewMsgServerImpl returns an implementation of the distribution MsgServer interface
// for the provided Keeper.
func NewMsgServerImpl(keeper Keeper) types.MsgServer {
return &msgServer{Keeper: keeper}
}
var _ types.MsgServer = msgServer{}
func (k msgServer) MemorializeContract(
goCtx context.Context,
msg *types.MsgMemorializeContractRequest,
) (*types.MsgMemorializeContractResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
// TODO (contract keeper class methods to process contract execution, scope keeper methods to record it)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, msg.Notary),
),
)
return nil, fmt.Errorf("not implemented")
}
func (k msgServer) ChangeOwnership(
goCtx context.Context,
msg *types.MsgChangeOwnershipRequest,
) (*types.MsgChangeOwnershipResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
// TODO (contract keeper class methods to process contract execution, scope keeper methods to record it)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, msg.Notary),
),
)
return nil, fmt.Errorf("not implemented")
}
func (k msgServer) AddScope(
goCtx context.Context,
msg *types.MsgAddScopeRequest,
) (*types.MsgAddScopeResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
existing, _ := k.GetScope(ctx, msg.Scope.ScopeId)
if err := k.ValidateScopeUpdate(ctx, existing, msg.Scope, msg.Signers); err != nil {
return nil, err
}
k.SetScope(ctx, msg.Scope)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgAddScopeResponse{}, nil
}
func (k msgServer) DeleteScope(
goCtx context.Context,
msg *types.MsgDeleteScopeRequest,
) (*types.MsgDeleteScopeResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
existing, _ := k.GetScope(ctx, msg.ScopeId)
// validate that all fields can be unset with the given list of signers
if err := k.ValidateScopeRemove(ctx, existing, types.Scope{ScopeId: msg.ScopeId}, msg.Signers); err != nil {
return nil, err
}
k.RemoveScope(ctx, msg.ScopeId)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(types.AttributeKeyScopeID, string(msg.ScopeId)),
),
)
return &types.MsgDeleteScopeResponse{}, nil
}
func (k msgServer) AddSession(
goCtx context.Context,
msg *types.MsgAddSessionRequest,
) (*types.MsgAddSessionResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
existing, _ := k.GetSession(ctx, msg.Session.SessionId)
if err := k.ValidateSessionUpdate(ctx, existing, *msg.Session, msg.Signers); err != nil {
return nil, err
}
audit := existing.Audit.UpdateAudit(ctx.BlockTime(), strings.Join(msg.Signers, ", "), "")
*msg.Session.Audit = *audit
k.SetSession(ctx, *msg.Session)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgAddSessionResponse{}, nil
}
func (k msgServer) AddRecord(
goCtx context.Context,
msg *types.MsgAddRecordRequest,
) (*types.MsgAddRecordResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
scopeUUID, err := msg.Record.SessionId.ScopeUUID()
if err != nil {
return nil, err
}
recordID := types.RecordMetadataAddress(scopeUUID, msg.Record.Name)
existing, _ := k.GetRecord(ctx, recordID)
if err := k.ValidateRecordUpdate(ctx, existing, *msg.Record, msg.Signers); err != nil {
return nil, err
}
k.SetRecord(ctx, *msg.Record)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgAddRecordResponse{}, nil
}
func (k msgServer) DeleteRecord(
goCtx context.Context,
msg *types.MsgDeleteRecordRequest,
) (*types.MsgDeleteRecordResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
existing, _ := k.GetRecord(ctx, msg.RecordId)
if err := k.ValidateRecordRemove(ctx, existing, msg.RecordId, msg.Signers); err != nil {
return nil, err
}
k.RemoveRecord(ctx, msg.RecordId)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgDeleteRecordResponse{}, nil
}
func (k msgServer) AddScopeSpecification(
goCtx context.Context,
msg *types.MsgAddScopeSpecificationRequest,
) (*types.MsgAddScopeSpecificationResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
var existing *types.ScopeSpecification = nil
if e, found := k.GetScopeSpecification(ctx, msg.Specification.SpecificationId); found {
existing = &e
if err := k.ValidateAllOwnersAreSigners(existing.OwnerAddresses, msg.Signers); err != nil {
return nil, err
}
}
if err := k.ValidateScopeSpecUpdate(ctx, existing, msg.Specification); err != nil {
return nil, err
}
k.SetScopeSpecification(ctx, msg.Specification)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgAddScopeSpecificationResponse{}, nil
}
func (k msgServer) DeleteScopeSpecification(
goCtx context.Context,
msg *types.MsgDeleteScopeSpecificationRequest,
) (*types.MsgDeleteScopeSpecificationResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
existing, found := k.GetScopeSpecification(ctx, msg.SpecificationId)
if !found {
return nil, fmt.Errorf("scope specification not found with id %s", msg.SpecificationId)
}
if err := k.ValidateAllOwnersAreSigners(existing.OwnerAddresses, msg.Signers); err != nil {
return nil, err
}
if err := k.RemoveScopeSpecification(ctx, msg.SpecificationId); err != nil {
return nil, fmt.Errorf("cannot delete scope specification with id %s: %w", msg.SpecificationId, err)
}
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgDeleteScopeSpecificationResponse{}, nil
}
func (k msgServer) AddContractSpecification(
goCtx context.Context,
msg *types.MsgAddContractSpecificationRequest,
) (*types.MsgAddContractSpecificationResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
var existing *types.ContractSpecification = nil
if e, found := k.GetContractSpecification(ctx, msg.Specification.SpecificationId); found {
existing = &e
if err := k.ValidateAllOwnersAreSigners(existing.OwnerAddresses, msg.Signers); err != nil {
return nil, err
}
}
if err := k.ValidateContractSpecUpdate(ctx, existing, msg.Specification); err != nil {
return nil, err
}
k.SetContractSpecification(ctx, msg.Specification)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgAddContractSpecificationResponse{}, nil
}
func (k msgServer) DeleteContractSpecification(
goCtx context.Context,
msg *types.MsgDeleteContractSpecificationRequest,
) (*types.MsgDeleteContractSpecificationResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
existing, found := k.GetContractSpecification(ctx, msg.SpecificationId)
if !found {
return nil, fmt.Errorf("contract specification not found with id %s", msg.SpecificationId)
}
if err := k.ValidateAllOwnersAreSigners(existing.OwnerAddresses, msg.Signers); err != nil {
return nil, err
}
// Remove all record specifications associated with this contract specification.
recSpecs, recSpecErr := k.GetRecordSpecificationsForContractSpecificationID(ctx, msg.SpecificationId)
if recSpecErr != nil {
return nil, fmt.Errorf("could not get record specifications to delete with contract specification with id %s: %w",
msg.SpecificationId, recSpecErr)
}
var delRecSpecErr error = nil
removedRecSpecs := []*types.RecordSpecification{}
for _, recSpec := range recSpecs {
if err := k.RemoveRecordSpecification(ctx, recSpec.SpecificationId); err != nil {
delRecSpecErr = fmt.Errorf("failed to delete record specification %s (name: %s) while trying to delete contract specification %d: %w",
recSpec.SpecificationId, recSpec.Name, msg.SpecificationId, err)
break
}
removedRecSpecs = append(removedRecSpecs, recSpec)
}
if delRecSpecErr != nil {
// Put the deleted record specifications back since not all of them could be deleted (and neither can this contract spec)
for _, recSpec := range removedRecSpecs {
k.SetRecordSpecification(ctx, *recSpec)
}
return nil, delRecSpecErr
}
// Remove the contract specification itself
if err := k.RemoveContractSpecification(ctx, msg.SpecificationId); err != nil {
return nil, fmt.Errorf("cannot delete contract specification with id %s: %w", msg.SpecificationId, err)
}
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgDeleteContractSpecificationResponse{}, nil
}
func (k msgServer) AddRecordSpecification(
goCtx context.Context,
msg *types.MsgAddRecordSpecificationRequest,
) (*types.MsgAddRecordSpecificationResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
contractSpecID, err := msg.Specification.SpecificationId.AsContractSpecAddress()
if err != nil {
return nil, err
}
contractSpec, contractSpecFound := k.GetContractSpecification(ctx, contractSpecID)
if !contractSpecFound {
contractSpecUUID, _ := contractSpecID.ContractSpecUUID()
return nil, fmt.Errorf("contract specification not found with id %s (uuid %s) required for adding or updating record specification with id %s",
contractSpecID, contractSpecUUID, msg.Specification.SpecificationId)
}
if err := k.ValidateAllOwnersAreSigners(contractSpec.OwnerAddresses, msg.Signers); err != nil {
return nil, err
}
var existing *types.RecordSpecification = nil
if e, found := k.GetRecordSpecification(ctx, msg.Specification.SpecificationId); found {
existing = &e
}
if err := k.ValidateRecordSpecUpdate(ctx, existing, msg.Specification); err != nil {
return nil, err
}
k.SetRecordSpecification(ctx, msg.Specification)
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgAddRecordSpecificationResponse{}, nil
}
func (k msgServer) DeleteRecordSpecification(
goCtx context.Context,
msg *types.MsgDeleteRecordSpecificationRequest,
) (*types.MsgDeleteRecordSpecificationResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
_, found := k.GetRecordSpecification(ctx, msg.SpecificationId)
if !found {
return nil, fmt.Errorf("record specification not found with id %s", msg.SpecificationId)
}
contractSpecID, err := msg.SpecificationId.AsContractSpecAddress()
if err != nil {
return nil, err
}
contractSpec, contractSpecFound := k.GetContractSpecification(ctx, contractSpecID)
if !contractSpecFound {
return nil, fmt.Errorf("contract specification not found with id %s required for deleting record specification with id %s",
contractSpecID, msg.SpecificationId)
}
if err := k.ValidateAllOwnersAreSigners(contractSpec.OwnerAddresses, msg.Signers); err != nil {
return nil, err
}
if err := k.RemoveRecordSpecification(ctx, msg.SpecificationId); err != nil {
return nil, fmt.Errorf("cannot delete record specification with id %s: %w", msg.SpecificationId, err)
}
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgDeleteRecordSpecificationResponse{}, nil
}
func (k msgServer) AddP8EContractSpec(
goCtx context.Context,
msg *types.MsgAddP8EContractSpecRequest,
) (*types.MsgAddP8EContractSpecResponse, error) {
ctx := sdk.UnwrapSDKContext(goCtx)
proposed, newrecords, err := types.ConvertP8eContractSpec(&msg.Contractspec, msg.Signers)
if err != nil {
return nil, err
}
var existing *types.ContractSpecification = nil
if e, found := k.GetContractSpecification(ctx, proposed.SpecificationId); found {
existing = &e
if err := k.ValidateAllOwnersAreSigners(existing.OwnerAddresses, msg.Signers); err != nil {
return nil, err
}
}
if err := k.ValidateContractSpecUpdate(ctx, existing, proposed); err != nil {
return nil, err
}
k.SetContractSpecification(ctx, proposed)
for _, proposedRecord := range newrecords {
var existing *types.RecordSpecification = nil
if e, found := k.GetRecordSpecification(ctx, proposedRecord.SpecificationId); found {
existing = &e
}
if err := k.ValidateRecordSpecUpdate(ctx, existing, proposedRecord); err != nil {
return nil, err
}
k.SetRecordSpecification(ctx, proposedRecord)
}
ctx.EventManager().EmitEvent(
sdk.NewEvent(
sdk.EventTypeMessage,
sdk.NewAttribute(sdk.AttributeKeyModule, types.AttributeValueCategory),
sdk.NewAttribute(sdk.AttributeKeySender, strings.Join(msg.Signers, ",")),
),
)
return &types.MsgAddP8EContractSpecResponse{}, nil
}
|
/*
Resistors are electrical components that add resistance to a circuit. Resistance is measured in ohms. When resistors are connected in series, the total resistance is merely the sum of the individual resistances:
Rtotal = R1 + R2 + R3 + ...
When resistors are connected in parallel, the reciprocal of the total resistance is equal to the sum of the reciprocals of the individual resistances:
1/(Rtotal) = 1/R1 + 1/R2 + 1/R3 + ...
Let's specify that series resistors be designated by enclosing them in parentheses, and parallel resistors by enclosing them in square brackets. Now we can calculate the equivalent resistance of the network:
(2, 3, 6) = 11 ohms
[2, 3, 6]= 1 ohm
Nesting these structures in the same way tuples and arrays are nested allows us to model complex resistor networks.
Create a function that takes a nested network as a string and returns the equivalent resistance of the network. Round results to the nearest tenth of an ohm.
Examples
resist("(10, [20, 30])") ➞ 22.0
// 10 in series with [20, 30] in parallel.
resist("[10, (20, 30)]") ➞ 8.3
// 10 in parallel with (20, 30) in series.
resist("([10, 20], (30, 40))") ➞ 76.7
// [10, 20] in parallel in series with (30, 40) in series.
resist("(1, [12, 4, (1, [10, (2, 8)])])") ➞ 3.0
resist("(6, [8, (4, [8, (4, [6, (8, [6, (10, 2)])])])])") ➞ 10
Notes
This is the schematic for the last example above:
https://www.electronics-tutorials.ws/wp-content/uploads/2018/05/resistor-res54.gif
*/
package main
import (
"math"
"strconv"
"strings"
"text/scanner"
)
func main() {
assert(eq(resist("(2, 3, 6)"), 11))
assert(eq(resist("[2, 3, 6]"), 1))
assert(eq(resist("[10, 20, [30, (40, 50), 60, (70, 80)], 90]"), 4.4))
assert(eq(resist("(1, [12, 4, (1, [10, (2, 8)])])"), 3))
assert(eq(resist("(10, [20, 30])"), 22))
assert(eq(resist("[10, (20, 30)]"), 8.3))
assert(eq(resist("[22, 6, (10, 18, [33, 15]), (10, [63, 50], 45)]"), 4.0))
assert(eq(resist("[([(470, 1000), 330], 470), 680]"), 354.3))
assert(eq(resist("([10, 20], (30, 40))"), 76.7))
assert(eq(resist("(1, [12, 4, (1, [10, (2, 8)])])"), 3))
assert(eq(resist("([([(470, 680), 330], 1000), 470], 680)"), 1022))
assert(eq(resist("(6, [8, (4, [8, (4, [6, (8, [6, (10, 2)])])])])"), 10))
}
func resist(s string) float64 {
var (
sc scanner.Scanner
sp []rune
lasttok rune
)
res := make([]float64, 1)
sc.Init(strings.NewReader(s))
loop:
for {
tok := sc.Scan()
switch tok {
case scanner.EOF:
break loop
case scanner.Int, scanner.Float:
l := len(sp)
if lasttok == scanner.Int || lasttok == scanner.Float || l == 0 {
return -1
}
v, err := strconv.ParseFloat(sc.TokenText(), 64)
if err != nil {
return -2
}
if sp[l-1] == '[' {
v = 1 / v
}
res[l] += v
case '(', '[':
sp = append(sp, tok)
res = append(res, 0)
case ')', ']':
l := len(sp)
if l == 0 {
return -3
}
if tok == ')' && sp[l-1] != '(' {
return -4
}
if tok == ']' && sp[l-1] != '[' {
return -5
}
if tok == ']' {
res[l] = 1 / res[l]
}
sp = sp[:l-1]
l = len(sp)
v := res[l+1]
if l > 0 && sp[l-1] == '[' {
v = 1 / v
}
res[l] += v
res = res[:l+1]
case ',':
if !(lasttok == scanner.Int || lasttok == scanner.Float || lasttok == ']' || lasttok == ')') {
return -6
}
default:
return -7
}
lasttok = tok
}
if len(sp) > 0 {
return -1
}
return res[0]
}
func eq(x, y float64) bool {
return math.Abs(x-y) < 1e-1
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
|
package main
import (
"listing17/handlers"
"log"
"net/http"
)
func main() {
handlers.Routes()
log.Println("웹 서비스 실행 중: 포트: 4000")
http.ListenAndServe(":4000", nil)
}
|
package controller
import (
"context"
log "github.com/sirupsen/logrus"
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/yaml"
"github.com/argoproj/argo/config"
"github.com/argoproj/argo/errors"
"github.com/argoproj/argo/persist/sqldb"
"github.com/argoproj/argo/util/instanceid"
"github.com/argoproj/argo/workflow/hydrator"
)
func (wfc *WorkflowController) updateConfig(config config.Config) error {
bytes, err := yaml.Marshal(config)
if err != nil {
return err
}
log.Info("Configuration:\n" + string(bytes))
if wfc.cliExecutorImage == "" && config.ExecutorImage == "" {
return errors.Errorf(errors.CodeBadRequest, "ConfigMap does not have executorImage")
}
wfc.Config = config
if wfc.session != nil {
err := wfc.session.Close()
if err != nil {
return err
}
}
wfc.session = nil
wfc.offloadNodeStatusRepo = sqldb.ExplosiveOffloadNodeStatusRepo
wfc.wfArchive = sqldb.NullWorkflowArchive
wfc.archiveLabelSelector = labels.Everything()
persistence := wfc.Config.Persistence
if persistence != nil {
log.Info("Persistence configuration enabled")
session, tableName, err := sqldb.CreateDBSession(wfc.kubeclientset, wfc.namespace, persistence)
if err != nil {
return err
}
log.Info("Persistence Session created successfully")
err = sqldb.NewMigrate(session, persistence.GetClusterName(), tableName).Exec(context.Background())
if err != nil {
return err
}
wfc.session = session
if persistence.NodeStatusOffload {
wfc.offloadNodeStatusRepo, err = sqldb.NewOffloadNodeStatusRepo(session, persistence.GetClusterName(), tableName)
if err != nil {
return err
}
log.Info("Node status offloading is enabled")
} else {
log.Info("Node status offloading is disabled")
}
if persistence.Archive {
instanceIDService := instanceid.NewService(wfc.Config.InstanceID)
wfc.archiveLabelSelector, err = persistence.GetArchiveLabelSelector()
if err != nil {
return err
}
wfc.wfArchive = sqldb.NewWorkflowArchive(session, persistence.GetClusterName(), wfc.managedNamespace, instanceIDService)
log.Info("Workflow archiving is enabled")
} else {
log.Info("Workflow archiving is disabled")
}
} else {
log.Info("Persistence configuration disabled")
}
wfc.hydrator = hydrator.New(wfc.offloadNodeStatusRepo)
return nil
}
// executorImage returns the image to use for the workflow executor
func (wfc *WorkflowController) executorImage() string {
if wfc.cliExecutorImage != "" {
return wfc.cliExecutorImage
}
return wfc.Config.ExecutorImage
}
// executorImagePullPolicy returns the imagePullPolicy to use for the workflow executor
func (wfc *WorkflowController) executorImagePullPolicy() apiv1.PullPolicy {
if wfc.cliExecutorImagePullPolicy != "" {
return apiv1.PullPolicy(wfc.cliExecutorImagePullPolicy)
} else if wfc.Config.Executor != nil && wfc.Config.Executor.ImagePullPolicy != "" {
return wfc.Config.Executor.ImagePullPolicy
} else {
return apiv1.PullPolicy(wfc.Config.ExecutorImagePullPolicy)
}
}
|
package raw_client
import (
"context"
)
type GetAppStatusRequest struct {
App string `json:"app"`
}
type GetAppStatusResponse struct {
Enable bool `json:"enable"`
States map[string]GetAppStatusRequestState `json:"states"`
Actions []GetAppStatusRequestAction `json:"actions"`
Revision string `json:"revision"`
}
type GetAppStatusRequestState struct {
Name string `json:"name"`
Index string `json:"index"`
}
type GetAppStatusRequestAction struct {
Name string `json:"name"`
From string `json:"from"`
To string `json:"to"`
}
func GetAppStatus(ctx context.Context, apiClient *ApiClient, req GetAppStatusRequest) (*GetAppStatusResponse, error) {
apiRequest := ApiRequest{
Method: "GET",
Scheme: "https",
Path: "/k/v1/app/status.json",
Json: req,
}
var GetAppStatusResponse GetAppStatusResponse
if err := apiClient.Call(ctx, apiRequest, &GetAppStatusResponse); err != nil {
return nil, err
}
return &GetAppStatusResponse, nil
}
|
/*
Author:
Nicholas Siow | nick@siow.me
Alani Douglas | fresh@alani.style
Description:
Core webserver for http://alanick.us
*/
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
)
//------------------------------------------------------------
// CONFIGURATION
//------------------------------------------------------------
var config_path = "/etc/alanick.conf"
type Config struct {
SiteRoot string
LogFile string
Debug bool
}
var config Config
/*
functions to run on initialization
*/
func init() {
configSetup()
loggingSetup()
}
/*
function to read in and validate configuration
*/
func configSetup() {
configFile, err := os.Open(config_path)
if err != nil {
fmt.Println("Error opening config file", err.Error())
os.Exit(1)
}
defer configFile.Close()
jsonParser := json.NewDecoder(configFile)
if err = jsonParser.Decode(&config); err != nil {
fmt.Println("Error parsing config file", err.Error())
os.Exit(1)
}
}
//------------------------------------------------------------
// LOGGING
//------------------------------------------------------------
/*
set up server logging
*/
func loggingSetup() {
f, err := os.OpenFile(config.LogFile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
fmt.Println("Error opening log file", err.Error())
os.Exit(1)
}
log.SetOutput(f)
}
/*
helper function for debug logging
*/
func debug(message string) {
if config.Debug {
log.Printf("[DEBUG] %s", message)
}
}
/*
helper function to standardize error logging
*/
func logError(request string, message string, statusCode int) {
log.Printf("[ERROR] %d for %s :: %s", statusCode, request, message)
}
//------------------------------------------------------------
// CLEANUP
//------------------------------------------------------------
/*
clean up server loose ends
*/
func cleanup() {
}
/*
start listening and serving requests
*/
func main() {
// serve static css files at /static
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
// set up the main page handler
http.HandleFunc("/", serve)
http.ListenAndServe(":8080", nil)
}
/*
base handler for serving html pages and directories
*/
func serve(w http.ResponseWriter, r *http.Request) {
debug(fmt.Sprintf("Received request: %+v", r))
// pull out the requested path
requested_path := r.URL.Path
if strings.Contains(requested_path, ".") {
http.Error(w, "Don't be a dick...", 500)
logError(requested_path, "Directory traversal", 500)
return
}
// put the requested path in the context of the local file system
path_to_html := path.Join(config.SiteRoot, requested_path)
// try both options for file vs directory
final_path := path.Join(path_to_html, "this.html")
if _, err := os.Stat(final_path); os.IsNotExist(err) {
final_path = path_to_html + ".html"
}
// verify that final_path is valid
if _, err := os.Stat(final_path); os.IsNotExist(err) {
http.Error(w, "This page does not exist, sorry!", 404)
logError(requested_path, "Page not found", 404)
return
}
// serve up page
file, err := ioutil.ReadFile(final_path)
if err != nil {
http.Error(w, "Internal server error", 500)
logError(requested_path, err.Error(), 500)
return
}
w.Write(file)
}
|
package main
func main() {
var ints = []int{0, 1, 0, 2, 1, 0, 1, 3, 2, 1, 2, 1}
//准备两个游标
front := 1
end := 2
//记录front--end中间的砖块数量
var countQut = 0
//记录front--end中间间的总体积)
var countAll = 0
for {
if ints[end] >= ints[front] {
front = end //进行下一次循环
}
end++
}
}
//面试题 17.21. 直方图的水量
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"github.com/guromityan/go-imgmd/lib"
"gopkg.in/alecthomas/kingpin.v2"
)
const version = "1.0.0"
var (
app = kingpin.New("imgmd", "Convert image to Markdown.")
target = app.Arg("target", "Target directory containing images.").Required().ExistingDir()
output = app.Flag("output", "Markdown file name to output.").Short('o').String()
)
func main() {
app.Version(version)
kingpin.MustParse(app.Parse(os.Args[1:]))
if *output == "" {
*output = *target + ".md"
}
stmts, err := lib.Dirwalk(*target)
if err != nil {
log.Fatalln(err)
}
file, err := os.Create(*output)
if err != nil {
log.Fatalf("Failed to create %v: %v", output, err)
}
defer file.Close()
w := bufio.NewWriter(file)
for _, line := range stmts {
fmt.Fprintln(w, line)
}
err = w.Flush()
if err != nil {
log.Fatalf("Failed to write file %v: %v", output, err)
}
}
|
// +build ignore
package gorules
import "github.com/quasilyte/go-ruleguard/dsl/fluent"
func _(m fluent.Matcher) {
m.Match(`typeTest($x + $y)`).
Where(m["x"].Type.Is(`string`) && m["y"].Type.Is("string")).
Report(`concat`)
m.Match(`typeTest($x + $y)`).
Where(m["x"].Type.Is(`string`) && m["y"].Type.Is("string")).
Report(`concat`)
m.Match(`typeTest($x + $y)`).
Where(m["x"].Type.Is(`int`) && m["y"].Type.Is("int")).
Report(`addition`)
m.Match(`typeTest($x > $y)`).
Where(!m["x"].Type.Is(`int`)).
Report(`$x !is(int)`)
m.Match(`typeTest($x > $y)`).
Where(!m["x"].Type.Is(`string`) && m["x"].Pure).
Report(`$x !is(string) && pure`)
m.Match(`typeTest($s, $s)`).
Where(m["s"].Type.Is(`[]string`)).
Report(`$s is([]string)`)
m.Match(`pureTest($x)`).
Where(m["x"].Pure).
Report("pure")
m.Match(`pureTest($x)`).
Where(!m["x"].Pure).
Report("!pure")
}
|
package resolver
import (
"github.com/kivutar/chainz/service"
"github.com/op/go-logging"
"golang.org/x/net/context"
)
// Author resolves an author graphql query
func (r *Resolver) Author(ctx context.Context, args struct {
ID string
}) (*AuthorResolver, error) {
authorService := ctx.Value("services").(*service.Container).AuthorServer
logger := ctx.Value("logger").(*logging.Logger)
author, err := authorService.FindByID(args.ID)
if err != nil {
logger.Errorf("Graphql error : %v", err)
return nil, err
}
logger.Debugf("Retrieved author by author_id[%s] : %v", author.ID, author)
return &AuthorResolver{&author}, nil
}
|
package diceprinter
import (
"fmt"
"github.com/appliedgocourses/dice"
"github.com/common-nighthawk/go-figure"
)
func Roll(sides int) {
fmt.Printf("Rolling a %d-sided die: %d\n", sides, dice.Roll(sides))
}
func Pretty(sides int) {
out := fmt.Sprintf("%d-sided roll: %d", sides, dice.Roll(sides))
f := figure.NewFigure(out, "", true)
f.Print()
}
|
package vastflow
import (
"github.com/jack0liu/logs"
"reflect"
)
type AtlanticFlow interface {
Success(headwaters *Headwaters) error
Fail(headwaters *Headwaters) error
}
type AtlanticStream interface {
runSuccess(headwaters *Headwaters, flow AtlanticFlow)
runFail(headwaters *Headwaters, flow AtlanticFlow)
setId(id string)
getId() string
setState(state streamState)
getState() streamState
setWaterId(waterId string)
getWaterId() string
}
type Atlantic struct {
id string
state streamState
waterId string // used to restore flow
}
func (at *Atlantic) updateWater(headwaters *Headwaters) error {
if err := updateHeadwaters(headwaters); err != nil {
logs.Error("update water fail, err:%s", err.Error())
return err
}
return nil
}
func (at *Atlantic) runSuccess(headwaters *Headwaters, flow AtlanticFlow) {
//logs.Debug("%v run , id :%s", reflect.ValueOf(flow).Elem().Type(), at.id)
defer func() {
if e := recover(); e != nil {
logs.Error("%v", e)
PrintStack()
_ = setFlowEnd(at.id, stateFail.String(), "got an panic")
}
}()
at.releaseWnd()
switch at.state {
case stateInit:
if err := setFlowStart(at.id, stateRunning.String()); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return
}
fallthrough
case stateRunning:
job, _ := GetJobByRequestId(headwaters.RequestId)
if job != nil {
_ = UpdateJobStatus(job.Id, JobSuccess)
}
if err := flow.Success(headwaters); err != nil {
if err := setFlowEnd(at.id, stateFail.String(), err.Error()); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return
}
}
if err := at.updateWater(headwaters); err != nil {
logs.Warn("update water fail, err:%s", err.Error())
}
if err := setFlowEnd(at.id, stateSuccess.String(), ""); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return
}
fallthrough
case stateSuccess:
logs.Info("[%s]run to atlantic success", headwaters.RequestId)
default:
logs.Error("invalid basin state:%s", at.state.String())
}
}
func (at *Atlantic) runFail(headwaters *Headwaters, flow AtlanticFlow) {
logs.Debug("%v run , id :%s, state:%s", reflect.ValueOf(flow).Elem().Type(), at.id, at.state.String())
defer func() {
if e := recover(); e != nil {
logs.Error("%v", e)
PrintStack()
_ = setFlowEnd(at.id, stateFail.String(), "got an panic")
}
}()
at.releaseWnd()
switch at.state {
case stateInit:
if err := setFlowStart(at.id, stateRunning.String()); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return
}
fallthrough
case stateRunning:
job, _ := GetJobByRequestId(headwaters.RequestId)
if job != nil {
_ = UpdateJobStatus(job.Id, JobFailed)
}
if err := flow.Fail(headwaters); err != nil {
if err := setFlowEnd(at.id, stateFail.String(), err.Error()); err != nil {
logs.Error("update state fail, err:%s", err.Error())
}
}
if err := at.updateWater(headwaters); err != nil {
logs.Warn("update water fail, err:%s", err.Error())
}
if err := setFlowEnd(at.id, stateFail.String(), ""); err != nil {
logs.Error("update state fail, err:%s", err.Error())
return
}
fallthrough
case stateFail:
logs.Info("[%s]run to atlantic failed", headwaters.RequestId)
default:
logs.Error("invalid basin state:%s", at.state.String())
}
}
func (at *Atlantic) setId(id string) {
at.id = id
}
func (at *Atlantic) getId() string {
return at.id
}
func (at *Atlantic) setState(state streamState) {
at.state = state
}
func (at *Atlantic) getState() streamState {
return at.state
}
func (at *Atlantic) setWaterId(waterId string) {
at.waterId = waterId
}
func (at *Atlantic) getWaterId() string {
return at.waterId
}
func (at *Atlantic) releaseWnd() {
flowWnd.Lock()
flowWnd.Dec()
flowWnd.Unlock()
}
|
package main
import (
"crypto/tls"
"flag"
"fmt"
"io/ioutil"
"net"
"time"
"github.com/armon/go-socks5"
"github.com/foomo/htpasswd"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"golang.org/x/net/context"
"gopkg.in/yaml.v2"
)
var logger *zap.Logger
func init() {
l, _ := zap.NewProduction()
logger = l
}
type socksAuthenticator struct {
Destinations map[string]*Destination
resolvedNames map[string][]string
}
func newSocksAuthenticator(destinations map[string]*Destination) (suxx5 *socksAuthenticator, err error) {
suxx5 = &socksAuthenticator{
Destinations: destinations,
}
names := []string{}
for name := range destinations {
names = append(names, name)
}
resolvedNames, errResolveNames := resolveNames(names)
if errResolveNames != nil {
err = errResolveNames
return
}
suxx5.resolvedNames = resolvedNames
go func() {
time.Sleep(time.Second * 10)
resolvedNames, errResolveNames := resolveNames(names)
if errResolveNames == nil {
suxx5.resolvedNames = resolvedNames
} else {
logger.Warn("could not resolve names: " + errResolveNames.Error())
}
}()
return
}
func resolveNames(names []string) (newResolvedNames map[string][]string, err error) {
newResolvedNames = map[string][]string{}
for _, name := range names {
addrs, errLookup := net.LookupHost(name)
if errLookup != nil {
err = errLookup
return
}
newResolvedNames[name] = addrs
}
return
}
func (suxx5 *socksAuthenticator) Allow(ctx context.Context, req *socks5.Request) (newCtx context.Context, allowed bool) {
allowed = false
newCtx = ctx
zapTo := zap.String("to", req.DestAddr.String())
zapUser := zap.String("for", req.AuthContext.Payload["Username"])
for name, ips := range suxx5.resolvedNames {
zapName := zap.String("name", name)
for _, ip := range ips {
if ip == req.DestAddr.IP.String() {
destination, destinationOK := suxx5.Destinations[name]
if destinationOK {
for _, allowedPort := range destination.Ports {
if allowedPort == req.DestAddr.Port {
if len(destination.Users) == 0 {
allowed = true
}
if !allowed {
userNameInContext, userNameInContextOK := req.AuthContext.Payload["Username"]
if !userNameInContextOK {
// explicit user expected, but not found
logger.Info("denied - no user found", zapName, zapTo)
return
}
for _, userName := range destination.Users {
if userName == userNameInContext {
allowed = true
break
}
}
if !allowed {
logger.Info(
"denied",
zapName,
zapTo,
zapUser,
)
return
}
}
if allowed {
logger.Info(
"allowed",
zapName,
zapTo,
zapUser,
)
allowed = true
return
}
}
}
}
}
}
}
logger.Info("denied", zapTo, zapUser)
return
}
type Credentials map[string]string
func (s Credentials) Valid(user, password string) bool {
hashedPassword, ok := s[user]
if !ok {
return false
}
errHash := bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(password))
return errHash == nil
}
func must(err error, comment ...interface{}) {
if err != nil {
logger.Fatal(fmt.Sprint(comment...), zap.Error(err))
}
}
type Destination struct {
Users []string
Ports []int
}
func main() {
defer logger.Sync()
flagAddr := flag.String("addr", "", "where to listen like 127.0.0.1:8000")
flagHtpasswdFile := flag.String("auth", "", "basic auth file")
flagDestinationsFile := flag.String("destinations", "", "file with destinations config")
flagCert := flag.String("cert", "", "path to server cert.pem")
flagKey := flag.String("key", "", "path to server key.pem")
flag.Parse()
destinationBytes, errReadDestinationBytes := ioutil.ReadFile(*flagDestinationsFile)
must(errReadDestinationBytes, "can not read destinations config")
destinations := map[string]*Destination{}
must(yaml.Unmarshal(destinationBytes, destinations), "can not parse destinations")
passwordHashes, errParsePasswords := htpasswd.ParseHtpasswdFile(*flagHtpasswdFile)
must(errParsePasswords, "basic auth file sucks")
credentials := Credentials(passwordHashes)
suxx5, errSuxx5 := newSocksAuthenticator(destinations)
if errSuxx5 != nil {
panic(errSuxx5)
}
autenticator := socks5.UserPassAuthenticator{Credentials: credentials}
conf := &socks5.Config{
Rules: suxx5,
AuthMethods: []socks5.Authenticator{autenticator},
}
server, err := socks5.New(conf)
if err != nil {
panic(err)
}
logger.Info(
"starting tls server",
zap.String("addr", *flagAddr),
zap.String("cert", *flagCert),
zap.String("key", *flagKey),
)
cert, errLoadKeyPair := tls.LoadX509KeyPair(*flagCert, *flagKey)
if errLoadKeyPair != nil {
logger.Fatal("could not load server key pair", zap.Error(errLoadKeyPair))
}
listener, errListen := tls.Listen("tcp", *flagAddr, &tls.Config{Certificates: []tls.Certificate{cert}})
if errListen != nil {
logger.Fatal(
"could not listen for tcp / tls",
zap.String("addr", *flagAddr),
zap.Error(errListen),
)
}
logger.Fatal(
"server fucked up",
zap.Error(server.Serve(listener)),
)
}
|
package smfimage
import (
"image/color"
)
type Option func(*smfimage)
func Background(name string) Option {
return func(s *smfimage) {
switch name {
case "black":
s.backgroundColor = color.Black
case "white":
s.backgroundColor = color.White
case "transparent":
s.backgroundColor = color.Transparent
default:
s.backgroundColor = color.Black
}
// s.backgroundColor = color.RGBA{red, green, blue, 255}
}
}
func BeatsInGrid() Option {
return func(s *smfimage) {
s.beatsInGrid = true
}
}
func Curve() Option {
return func(s *smfimage) {
s.drawer = &s.curve
// s.curveOnly = true
/*
s.getImage = getCurveImage
s.curve.TS = [2]uint8{4, 4}
s.getPreHeight = getPreHeightCurve
s._drawBar = drawBarOnImageCurve
s._makeMonoChromeImage = makeMonoChromeImageCurve
s._makePaletteImage = makePaletteImageCurve
s._draw = drawNoteCurve
s.makeHarmonic = makeHarmonicCurve
s.drawBars = drawBarsCurve
s.mkSongPicture = mkSongPictureCurve
s.savePNG = SavePNGCurve
*/
}
}
/*
func Radius(r int) Option {
return func(s *smfimage) {
s.curveRadius = r
}
}
*/
func SingleBar() Option {
return func(s *smfimage) {
s.singleBar = true
}
}
func Monochrome() Option {
return func(s *smfimage) {
s.monochrome = true
}
}
func NoBackground() Option {
return func(s *smfimage) {
s.backgroundColor = nil
}
}
func NoBarLines() Option {
return func(s *smfimage) {
s.noBarLines = true
}
}
// BaseNote sets the reference/base note
// smfimage.C , smfimage.CSharp etc.
func BaseNote(n Note) Option {
return func(s *smfimage) {
s.baseNote = n
s.baseNoteSet = true
}
}
// Height of 32thnote in pixel (default = 4)
func Height(height int) Option {
return func(s *smfimage) {
s.noteHeight = height
}
}
// Width of 32thnote in pixel (default = 4)
func Width(width int) Option {
return func(s *smfimage) {
s.noteWidth = width
}
}
// TrackBorder in pixel (default = 2)
func TrackBorder(border int) Option {
return func(s *smfimage) {
s.trackBorder = border
}
}
func TrackOrder(order ...int) Option {
return func(s *smfimage) {
s.trackOrder = order
}
}
func SkipTracks(tracks ...int) Option {
return func(s *smfimage) {
for _, tr := range tracks {
s.skipTracks[tr] = true
}
}
}
func Colors(cm ColorMapper) Option {
return func(s *smfimage) {
s.colorMapper = cm
}
}
/*
// TODO: implement
func DrumTracks(tracks ...uint16) Option {
return func(s *SMF) {
for _, tr := range tracks {
s.drumTracks[tr] = true
}
}
}
*/
|
// Copyright 2019 Radiation Detection and Imaging (RDI), LLC
// Use of this source code is governed by the BSD 3-clause
// license that can be found in the LICENSE file.
package data
import (
"github.com/rditech/rdi-live/model/rdi/currentmode"
"github.com/proio-org/go-proio"
)
func CorrelateCmEvent(event *proio.Event) {
for _, entryId := range event.TaggedEntries("Mapped") {
frame, ok := event.GetEntry(entryId).(*currentmode.Frame)
if !ok {
continue
}
nSamples := len(frame.Sample)
if nSamples == 0 {
continue
}
nAxes := len(frame.Sample[0].Axis)
sum := make([]float64, nAxes)
prodSum := make([][]float64, nAxes)
cov := make([][]float64, nAxes)
for i := 0; i < nAxes; i++ {
prodSum[i] = make([]float64, nAxes)
cov[i] = make([]float64, nAxes)
}
for i := 0; i < nSamples; i++ {
sample := frame.Sample[i]
for j := 0; j < nAxes; j++ {
axisJSum := float64(sample.Axis[j].Sum)
sum[j] += axisJSum
for k := j; k < nAxes; k++ {
axisKSum := float64(sample.Axis[k].Sum)
prodSum[j][k] += axisJSum * axisKSum
}
}
}
for i := 0; i < nAxes; i++ {
for j := i; j < nAxes; j++ {
cov[i][j] = prodSum[i][j] - sum[i]*sum[j]/float64(nSamples)
}
}
corr := float64(1.0)
for i := 0; i < nAxes; i++ {
for j := i + 1; j < nAxes; j++ {
corr *= cov[i][j] * cov[i][j] / (cov[i][i] * cov[j][j])
}
}
frame.Correlation = float32(corr)
}
}
type Correlator struct {
NFrames int
Default float32
}
func (c *Correlator) CorrelateCmEvent(input <-chan *proio.Event, output chan<- *proio.Event) {
i := 0
for event := range input {
for _, entryId := range event.TaggedEntries("Mapped") {
i++
frame, ok := event.GetEntry(entryId).(*currentmode.Frame)
if !ok {
continue
}
if c.NFrames == 0 || i <= c.NFrames || (c.NFrames < 0 && i > -c.NFrames) {
nSamples := len(frame.Sample)
if nSamples == 0 {
continue
}
nAxes := len(frame.Sample[0].Axis)
sum := make([]float64, nAxes)
prodSum := make([][]float64, nAxes)
cov := make([][]float64, nAxes)
for i := 0; i < nAxes; i++ {
prodSum[i] = make([]float64, nAxes)
cov[i] = make([]float64, nAxes)
}
for i := 0; i < nSamples; i++ {
sample := frame.Sample[i]
for j := 0; j < nAxes; j++ {
axisJSum := float64(sample.Axis[j].Sum)
sum[j] += axisJSum
for k := j; k < nAxes; k++ {
axisKSum := float64(sample.Axis[k].Sum)
prodSum[j][k] += axisJSum * axisKSum
}
}
}
for i := 0; i < nAxes; i++ {
for j := i; j < nAxes; j++ {
cov[i][j] = prodSum[i][j] - sum[i]*sum[j]/float64(nSamples)
}
}
corr := float64(1.0)
for i := 0; i < nAxes; i++ {
for j := i + 1; j < nAxes; j++ {
corr *= cov[i][j] * cov[i][j] / (cov[i][i] * cov[j][j])
}
}
frame.Correlation = float32(corr)
continue
}
frame.Correlation = c.Default
}
output <- event
}
}
|
package main
import (
"encoding/json"
"log"
"net/http"
"strings"
"sync"
"sync/atomic"
"github.com/marema31/namecheck/checker"
"github.com/marema31/namecheck/github"
"github.com/marema31/namecheck/twitter"
)
// Declare a real http.Client that we will override in tests
var web = http.DefaultClient
// Declare a usage counter that will be updated atomically by the goroutine
var usageCount uint32
// Declare a counter of request for a user that will be updated with mutexes
var nameChecked map[string]uint32 = map[string]uint32{}
// The mutex
var mu sync.Mutex
type availability struct {
Platform string `json:socialNetwork`
Valid bool
Available bool
}
type response struct {
Username string
Requested uint32
RequestID uint32
Availability []availability
}
var checkers []checker.Checker = []checker.Checker{
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
&twitter.Twitter{},
&github.Github{},
}
func checkUser(wg *sync.WaitGroup, ch chan availability, username string, c checker.Checker) {
defer wg.Done()
var message availability
message.Platform = c.Name()
if c.Check(username) {
message.Valid = true
available, err := c.IsAvailable(web, username)
if err != nil {
log.Printf("No way to contact Twitter: %s", err)
// On peut aussi redeclarer:
// type wrapper interface {
// Unwrap() error
//}
// et remplacer le if par err, ok := err.(wrapper); ok
if err, ok := err.(interface{ Unwrap() error }); ok {
log.Fatal(err.Unwrap())
}
}
message.Available = available
}
ch <- message
}
func sayHello(w http.ResponseWriter, r *http.Request) {
calledurl := r.URL.Path
username := strings.TrimPrefix(calledurl, "/")
if username == "favicon.ico" {
return
}
mu.Lock()
//The brace are not mandatory but add a visual indication of the lock section
{
nameChecked[username]++
}
mu.Unlock()
count := atomic.AddUint32(&usageCount, 1)
message := response{Username: username, RequestID: count, Requested: nameChecked[username]}
var wg sync.WaitGroup
ch := make(chan availability, 4)
for _, c := range checkers {
wg.Add(1)
go checkUser(&wg, ch, username, c)
}
// Must be in goroutine because we can not close the channel before all the checkUser goroutine have finished,
// but if the chan as a capacity less important than the number of checkUser those will not be able to write to the
// channel and will block therefore the wg.Wait will never goes backz
go func() {
wg.Wait()
close(ch)
}()
for availabilityResponse := range ch {
message.Availability = append(message.Availability, availabilityResponse)
}
messageJSON, _ := json.MarshalIndent(message, "", " ")
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(messageJSON))
}
func main() {
http.HandleFunc("/", sayHello)
if err := http.ListenAndServe(":8080", nil); err != nil {
panic(err)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.