text
stringlengths 11
4.05M
|
|---|
package main
import (
"fmt"
"crypto/sha1"
"encoding/hex"
)
func main() {
}
|
// Copyright 2020 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gobmap
import (
"context"
"strings"
"testing"
"go.chromium.org/luci/gae/impl/memory"
"go.chromium.org/luci/gae/service/datastore"
pb "go.chromium.org/luci/cv/api/config/v2"
"go.chromium.org/luci/cv/internal/config"
. "github.com/smartystreets/goconvey/convey"
)
func TestGobMapUpdateAndLookup(t *testing.T) {
t.Parallel()
ctx := memory.Use(context.Background())
datastore.GetTestable(ctx).AutoIndex(true)
datastore.GetTestable(ctx).Consistent(true)
// First set up an example project with two config groups to show basic
// regular usage; there is a "main" group which matches a main ref, and
// another fallback group that matches many other refs, but not all.
tc := config.TestController{}
tc.Create(ctx, "chromium", &pb.Config{
ConfigGroups: []*pb.ConfigGroup{
{
Name: "group_main",
Gerrit: []*pb.ConfigGroup_Gerrit{
{
Url: "https://cr-review.gs.com/",
Projects: []*pb.ConfigGroup_Gerrit_Project{
{
Name: "cr/src",
RefRegexp: []string{"refs/heads/main"},
},
},
},
},
},
{
// This is the fallback group, so "refs/heads/main" should be
// handled by the main group but not this one, even though it
// matches the include regexp list.
Name: "group_other",
Fallback: pb.Toggle_YES,
Gerrit: []*pb.ConfigGroup_Gerrit{
{
Url: "https://cr-review.gs.com/",
Projects: []*pb.ConfigGroup_Gerrit_Project{
{
Name: "cr/src",
RefRegexp: []string{"refs/heads/.*"},
RefRegexpExclude: []string{"refs/heads/123"},
},
},
},
},
},
},
})
Convey("Update with nonexistent project stores nothing", t, func() {
So(Update(ctx, "bogus"), ShouldBeNil)
mps := []*mapPart{}
q := datastore.NewQuery(mapKind)
So(datastore.GetAll(ctx, q, &mps), ShouldBeNil)
So(mps, ShouldBeEmpty)
})
Convey("Lookup nonexistent project returns empty result", t, func() {
So(
lookup(ctx, "foo-review.gs.com", "repo", "refs/heads/main"),
ShouldBeEmpty)
})
Convey("Basic behavior with one project", t, func() {
So(Update(ctx, "chromium"), ShouldBeNil)
Convey("Lookup with main ref returns main group", func() {
// Note that even though the other config group also matches,
// only the main config group is applicable since the other one
// is the fallback config group.
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/main"),
ShouldResemble,
map[string][]string{
"chromium": {"group_main"},
})
})
Convey("Lookup with other ref returns other group", func() {
// refs/heads/something matches other group, but not main group.
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/something"),
ShouldResemble,
map[string][]string{
"chromium": {"group_other"},
})
})
Convey("Lookup excluded ref returns nothing", func() {
// refs/heads/123 is specifically excluded from the "other" group,
// and also not included in main group.
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/123"),
ShouldBeEmpty)
})
Convey("For a ref with no matching groups the result is empty", func() {
// If a ref doesn't match any include patterns then no groups
// match.
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/branch-heads/beta"),
ShouldBeEmpty)
})
})
Convey("Lookup again returns nothing for disabled project", t, func() {
// Simulate deleting project. Projects that are deleted are first
// disabled in practice.
tc.Disable(ctx, "chromium")
So(Update(ctx, "chromium"), ShouldBeNil)
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/main"),
ShouldBeEmpty)
})
Convey("With two matches and no fallback...", t, func() {
// Simulate the project being updated so that the "other" group is no
// longer a fallback group. Now some refs will match both groups.
tc.Enable(ctx, "chromium")
tc.Update(ctx, "chromium", &pb.Config{
ConfigGroups: []*pb.ConfigGroup{
{
Name: "group_main",
Gerrit: []*pb.ConfigGroup_Gerrit{
{
Url: "https://cr-review.gs.com/",
Projects: []*pb.ConfigGroup_Gerrit_Project{
{
Name: "cr/src",
RefRegexp: []string{"refs/heads/main"},
},
},
},
},
},
{
Name: "group_other",
Gerrit: []*pb.ConfigGroup_Gerrit{
{
Url: "https://cr-review.gs.com/",
Projects: []*pb.ConfigGroup_Gerrit_Project{
{
Name: "cr/src",
RefRegexp: []string{"refs/heads/.*"},
RefRegexpExclude: []string{"refs/heads/123"},
},
},
},
},
Fallback: pb.Toggle_NO,
},
},
})
Convey("Lookup main ref matching two refs", func() {
// This adds coverage for matching two groups.
So(Update(ctx, "chromium"), ShouldBeNil)
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/main"),
ShouldResemble,
map[string][]string{"chromium": {"group_main", "group_other"}})
})
})
Convey("With two repos in main group and no other group...", t, func() {
// This update includes both additions and removals,
// and also tests multiple hosts.
tc.Update(ctx, "chromium", &pb.Config{
ConfigGroups: []*pb.ConfigGroup{
{
Name: "group_main",
Gerrit: []*pb.ConfigGroup_Gerrit{
{
Url: "https://cr-review.gs.com/",
Projects: []*pb.ConfigGroup_Gerrit_Project{
{
Name: "cr/src",
RefRegexp: []string{"refs/heads/main"},
},
},
},
{
Url: "https://cr2-review.gs.com/",
Projects: []*pb.ConfigGroup_Gerrit_Project{
{
Name: "cr2/src",
RefRegexp: []string{"refs/heads/main"},
},
},
},
},
},
},
})
So(Update(ctx, "chromium"), ShouldBeNil)
Convey("main group matches two different hosts", func() {
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/main"),
ShouldResemble,
map[string][]string{"chromium": {"group_main"}})
So(
lookup(ctx, "cr2-review.gs.com", "cr2/src", "refs/heads/main"),
ShouldResemble,
map[string][]string{"chromium": {"group_main"}})
})
Convey("other group no longer exists", func() {
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/something"),
ShouldBeEmpty)
})
})
Convey("With another project matching the same ref...", t, func() {
// Below another project is created that watches the same repo and ref.
// This tests multiple projects matching for one Lookup.
tc.Create(ctx, "foo", &pb.Config{
ConfigGroups: []*pb.ConfigGroup{
{
Name: "group_foo",
Gerrit: []*pb.ConfigGroup_Gerrit{
{
Url: "https://cr-review.gs.com/",
Projects: []*pb.ConfigGroup_Gerrit_Project{
{
Name: "cr/src",
RefRegexp: []string{"refs/heads/main"},
},
},
},
},
},
},
})
So(Update(ctx, "foo"), ShouldBeNil)
Convey("main group matches two different projects", func() {
So(
lookup(ctx, "cr-review.gs.com", "cr/src", "refs/heads/main"),
ShouldResemble,
map[string][]string{
"chromium": {"group_main"},
"foo": {"group_foo"},
})
})
})
}
// lookup is a test helper function to return just the projects and config
// group names returned by Lookup.
func lookup(ctx context.Context, host, repo, ref string) map[string][]string {
ret := map[string][]string{}
ac, err := Lookup(ctx, host, repo, ref)
So(err, ShouldBeNil)
for _, p := range ac.Projects {
var names []string
for _, id := range p.ConfigGroupIds {
parts := strings.Split(id, "/")
So(len(parts), ShouldEqual, 2)
names = append(names, parts[1])
}
ret[p.Name] = names
}
return ret
}
|
package main
import (
"fmt"
"net/http"
"strconv"
"strings"
"time"
)
func printRequest(r *http.Request) {
logD.Printf(">> [%s %s]", r.Method, r.URL.Path)
logD.Printf(">> %v", r)
}
type loggingResponseWriter struct {
http.ResponseWriter
response string
statusCode int
}
func (lrw *loggingResponseWriter) Write(response []byte) (int, error) {
lrw.response = string(response)
return lrw.ResponseWriter.Write(response)
}
func (lrw *loggingResponseWriter) WriteHeader(statusCode int) {
lrw.statusCode = statusCode
lrw.ResponseWriter.WriteHeader(statusCode)
}
func briefDescr(req *http.Request) string {
return fmt.Sprintf("[%s %s]", req.Method, req.URL.Path)
}
func httpError(errMsg string, errData interface{}, status int, w http.ResponseWriter) {
errMsg = fmt.Sprintf("%s: %v", errMsg, errData)
logE.Println(errMsg)
logD.Printf("httpError: status: %d", status)
//w.WriteHeader(status)
http.Error(w, errMsg, status)
}
func internalError(errMsg string, errData interface{}, w http.ResponseWriter) {
httpError(errMsg, errData, http.StatusInternalServerError, w)
}
func badRequest(errMsg string, errData interface{}, w http.ResponseWriter) {
httpError(errMsg, errData, http.StatusBadRequest, w)
}
func forbidden(errMsg string, errData interface{}, w http.ResponseWriter) {
httpError(errMsg, errData, http.StatusForbidden, w)
}
func weekdays(today time.Time) (days [7]string) {
for decrIdx := 6; decrIdx >= 0; decrIdx-- {
d := today.AddDate(0, 0, -decrIdx)
days[6-decrIdx] = d.Format("Jan 2\nMon")
}
return
}
func parseIdFromPathTail(urlPath string) (catId int64, err error) {
parts := strings.Split(urlPath, "/")
if len(parts) < 2 {
err = fmt.Errorf("number of path parts is %d", len(parts))
return
}
catId, err = strconv.ParseInt(parts[len(parts)-1], 10, 64)
if err != nil {
err = fmt.Errorf("convert category id to number: %v", err)
return
}
return
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostpath
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/golang/glog"
)
type hostPath struct {
name string
nodeID string
version string
endpoint string
maxVolumesPerNode int64
ids *identityServer
ns *nodeServer
}
var vendorVersion = "dev"
// Directory where data for volumes are persisted.
const dataRoot = "/csi-data-dir/volumes"
func NewHostPathDriver(driverName, nodeID, endpoint string, maxVolumesPerNode int64, version string) (*hostPath, error) {
if driverName == "" {
return nil, errors.New("no driver name provided")
}
if nodeID == "" {
return nil, errors.New("no node id provided")
}
if endpoint == "" {
return nil, errors.New("no driver endpoint provided")
}
if version != "" {
vendorVersion = version
}
if err := os.MkdirAll(dataRoot, 0750); err != nil {
return nil, fmt.Errorf("failed to create dataRoot: %v", err)
}
glog.Infof("Driver: %v ", driverName)
glog.Infof("Version: %s", vendorVersion)
return &hostPath{
name: driverName,
version: vendorVersion,
nodeID: nodeID,
endpoint: endpoint,
maxVolumesPerNode: maxVolumesPerNode,
}, nil
}
func (hp *hostPath) Run() {
// Create GRPC servers
hp.ids = NewIdentityServer(hp.name, hp.version)
hp.ns = NewNodeServer(hp.nodeID, hp.maxVolumesPerNode)
s := NewNonBlockingGRPCServer()
s.Start(hp.endpoint, hp.ids, hp.ns)
s.Wait()
}
// getVolumePath returns the canonical path for hostpath volume
func getVolumePath(volID string) string {
return filepath.Join(dataRoot, volID)
}
|
package stringcase_test
import (
"github.com/adamluzsi/frameless/pkg/stringcase"
"github.com/adamluzsi/testcase"
"github.com/adamluzsi/testcase/assert"
"github.com/adamluzsi/testcase/pp"
"github.com/adamluzsi/testcase/random"
"strings"
"testing"
)
func TestToSnake(t *testing.T) {
type TC struct {
In string
Out string
}
testcase.TableTest(t, map[string]TC{
"empty string": {In: "", Out: ""},
"one character": {In: "A", Out: "a"},
"snake": {In: "hello_world", Out: "hello_world"},
"pascal": {In: "HelloWorld", Out: "hello_world"},
"pascal with multiple words": {In: "HTTPFoo", Out: "http_foo"},
"camel": {In: "helloWorld", Out: "hello_world"},
"upper": {In: "HELLO WORLD", Out: "hello_world"},
"screaming snake": {In: "HELLO_WORLD", Out: "hello_world"},
"dot case": {In: "hello.world", Out: "hello_world"},
"kebab case": {In: "hello-world", Out: "hello_world"},
"handles utf-8 characters": {In: "Héllo Wörld", Out: "héllo_wörld"},
"mixture of Title and lower case": {In: "the Hello World", Out: "the_hello_world"},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.ToSnake(tc.In), "original:", assert.Message(pp.Format(tc.In)))
})
}
func BenchmarkToSnake(b *testing.B) {
rnd := random.New(random.CryptoSeed{})
fixtures := random.Slice(b.N, func() string {
return rnd.StringNC(10,
strings.ToLower(random.CharsetAlpha())+
strings.ToLower(random.CharsetAlpha())+
"_.-")
})
b.ResetTimer()
for i := 0; i < b.N; i++ {
stringcase.ToSnake(fixtures[i])
}
}
func TestIsSnake(t *testing.T) {
type TC struct {
In string
Out bool
}
testcase.TableTest(t, map[string]TC{
"snake case": {In: "hello_world", Out: true},
"snake case with utf-8 characters": {In: "héllo_wörld", Out: true},
"pascal case": {In: "HelloWorld", Out: false},
"pascal case with abbrevation": {In: "HTTPFoo", Out: false},
"mixed case with number suffix + pascal case": {In: "HelloWorld42", Out: false},
"pascal case with utf-8 characters": {In: "HélloWörld", Out: false},
"camel case": {In: "helloWorld", Out: false},
"title snake case": {In: "Hello_World", Out: false},
"title dot case": {In: "Hello.World", Out: false},
"title kebab case": {In: "Hello-World", Out: false},
"mixed case with number prefix + pascal case": {In: "1HelloWorld", Out: false},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.IsSnake(tc.In), "input:", assert.Message(pp.Format(tc.In)))
})
}
func TestIsScreamingSnake(t *testing.T) {
type TC struct {
In string
Out bool
}
testcase.TableTest(t, map[string]TC{
"snake case": {In: "HELLO_WORLD", Out: true},
"snake case with utf-8 characters": {In: "HÉLLO_WÖRLD", Out: true},
"pascal case": {In: "HelloWorld", Out: false},
"pascal case with abbrevation": {In: "HTTPFoo", Out: false},
"mixed case with number suffix + pascal case": {In: "HelloWorld42", Out: false},
"pascal case with utf-8 characters": {In: "HélloWörld", Out: false},
"camel case": {In: "helloWorld", Out: false},
"title snake case": {In: "Hello_World", Out: false},
"title dot case": {In: "Hello.World", Out: false},
"title kebab case": {In: "Hello-World", Out: false},
"mixed case with number prefix + pascal case": {In: "1HelloWorld", Out: false},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.IsScreamingSnake(tc.In), "input:", assert.Message(pp.Format(tc.In)))
})
}
func TestToScreamingSnake(t *testing.T) {
type TC struct {
In string
Out string
}
testcase.TableTest(t, map[string]TC{
"empty string": {In: "", Out: ""},
"one character": {In: "a", Out: "A"},
"snake": {In: "hello_world", Out: "HELLO_WORLD"},
"pascal": {In: "HelloWorld", Out: "HELLO_WORLD"},
"pascal with multiple words": {In: "HTTPFoo", Out: "HTTP_FOO"},
"camel": {In: "helloWorld", Out: "HELLO_WORLD"},
"upper": {In: "HELLO WORLD", Out: "HELLO_WORLD"},
"screaming snake": {In: "HELLO_WORLD", Out: "HELLO_WORLD"},
"dot case": {In: "hello.world", Out: "HELLO_WORLD"},
"kebab case": {In: "hello-world", Out: "HELLO_WORLD"},
"handles utf-8 characters": {In: "Héllo Wörld", Out: "HÉLLO_WÖRLD"},
"mixture of Title and lower case": {In: "the Hello World", Out: "THE_HELLO_WORLD"},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.ToScreamingSnake(tc.In), "original:", assert.Message(pp.Format(tc.In)))
})
}
func TestIsPascal(t *testing.T) {
type TC struct {
In string
Out bool
}
testcase.TableTest(t, map[string]TC{
"pascal case": {In: "HelloWorld", Out: true},
"pascal case with abbrevation": {In: "HTTPFoo", Out: true},
"mixed case with number suffix + pascal case": {In: "HelloWorld42", Out: true},
"pascal case with utf-8 characters": {In: "HélloWörld", Out: true},
"camel case": {In: "helloWorld", Out: false},
"title snake case": {In: "Hello_World", Out: false},
"title dot case": {In: "Hello.World", Out: false},
"title kebab case": {In: "Hello-World", Out: false},
"mixed case with number prefix + pascal case": {In: "1HelloWorld", Out: false},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.IsPascal(tc.In), "input:", assert.Message(pp.Format(tc.In)))
})
}
func TestToPascal(t *testing.T) {
type TC struct {
In string
Out string
}
testcase.TableTest(t, map[string]TC{
"empty string": {In: "", Out: ""},
"one upper character": {In: "A", Out: "A"},
"one lower character": {In: "a", Out: "A"},
"snake": {In: "hello_world", Out: "HelloWorld"},
"pascal": {In: "HelloWorld", Out: "HelloWorld"},
"pascal with multiple words": {In: "HTTPFoo", Out: "HTTPFoo"},
"camel": {In: "helloWorld", Out: "HelloWorld"},
"screaming snake": {In: "HELLO_WORLD", Out: "HelloWorld"},
"dot case": {In: "hello.world", Out: "HelloWorld"},
"kebab case": {In: "hello-world", Out: "HelloWorld"},
"handles utf-8 characters": {In: "Héllo Wörld", Out: "HélloWörld"},
"mixture of Title and lower case": {In: "the Hello World", Out: "TheHelloWorld"},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.ToPascal(tc.In), "original:", assert.Message(pp.Format(tc.In)))
})
}
func TestIsCamel(t *testing.T) {
type TC struct {
In string
Out bool
}
testcase.TableTest(t, map[string]TC{
"camel case": {In: "helloWorld", Out: true},
"camel case with utf-8 characters": {In: "hélloWörld", Out: true},
"pascal case": {In: "HelloWorld", Out: false},
"pascal case with abbrevation": {In: "HTTPFoo", Out: false},
"mixed case with number suffix + pascal case": {In: "HelloWorld42", Out: false},
"pascal case with utf-8 characters": {In: "HélloWörld", Out: false},
"title snake case": {In: "Hello_World", Out: false},
"title dot case": {In: "Hello.World", Out: false},
"title kebab case": {In: "Hello-World", Out: false},
"mixed case with number prefix + pascal case": {In: "1HelloWorld", Out: false},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.IsCamel(tc.In), "input:", assert.Message(pp.Format(tc.In)))
})
}
func TestToCamel(t *testing.T) {
type TC struct {
In string
Out string
}
testcase.TableTest(t, map[string]TC{
"empty string": {In: "", Out: ""},
"one upper character": {In: "A", Out: "a"},
"one lower character": {In: "a", Out: "a"},
"snake": {In: "hello_world", Out: "helloWorld"},
"pascal": {In: "HelloWorld", Out: "helloWorld"},
"pascal with multiple words v1": {In: "HTTPFoo", Out: "httpFoo"},
"pascal with multiple words v2": {In: "VFoo", Out: "vFoo"},
"camel": {In: "helloWorld", Out: "helloWorld"},
"screaming snake": {In: "HELLO_WORLD", Out: "helloWorld"},
"dot case": {In: "hello.world", Out: "helloWorld"},
"kebab case": {In: "hello-world", Out: "helloWorld"},
"handles utf-8 characters": {In: "Héllo Wörld", Out: "hélloWörld"},
"mixture of Title and lower case": {In: "the Hello World", Out: "theHelloWorld"},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.ToCamel(tc.In), "original:", assert.Message(pp.Format(tc.In)))
})
}
func BenchmarkToPascal(b *testing.B) {
rnd := random.New(random.CryptoSeed{})
fixtures := random.Slice(b.N, func() string {
return rnd.StringNC(10,
strings.ToLower(random.CharsetAlpha())+
strings.ToLower(random.CharsetAlpha())+
"_.-")
})
b.ResetTimer()
for i := 0; i < b.N; i++ {
stringcase.ToPascal(fixtures[i])
}
}
func TestIsKebab(t *testing.T) {
type TC struct {
In string
Out bool
}
testcase.TableTest(t, map[string]TC{
"snake case": {In: "hello_world", Out: true},
"snake case with utf-8 characters": {In: "héllo_wörld", Out: true},
"pascal case": {In: "HelloWorld", Out: false},
"pascal case with abbrevation": {In: "HTTPFoo", Out: false},
"mixed case with number suffix + pascal case": {In: "HelloWorld42", Out: false},
"pascal case with utf-8 characters": {In: "HélloWörld", Out: false},
"camel case": {In: "helloWorld", Out: false},
"title snake case": {In: "Hello_World", Out: false},
"title dot case": {In: "Hello.World", Out: false},
"title kebab case": {In: "Hello-World", Out: false},
"mixed case with number prefix + pascal case": {In: "1HelloWorld", Out: false},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.IsSnake(tc.In), "input:", assert.Message(pp.Format(tc.In)))
})
}
func TestToKebab(t *testing.T) {
type TC struct {
In string
Out string
}
testcase.TableTest(t, map[string]TC{
"empty string": {In: "", Out: ""},
"one character": {In: "A", Out: "a"},
"snake": {In: "hello_world", Out: "hello-world"},
"pascal": {In: "HelloWorld", Out: "hello-world"},
"pascal with multiple words": {In: "HTTPFoo", Out: "http-foo"},
"camel": {In: "helloWorld", Out: "hello-world"},
"upper": {In: "HELLO WORLD", Out: "hello-world"},
"screaming snake": {In: "HELLO_WORLD", Out: "hello-world"},
"dot case": {In: "hello.world", Out: "hello-world"},
"kebab case": {In: "hello-world", Out: "hello-world"},
"handles utf-8 characters": {In: "Héllo Wörld", Out: "héllo-wörld"},
"mixture of Title and lower case": {In: "the Hello World", Out: "the-hello-world"},
}, func(t *testcase.T, tc TC) {
t.Must.Equal(tc.Out, stringcase.ToKebab(tc.In), "original:", assert.Message(pp.Format(tc.In)))
})
}
|
// Goroutines-plus
package main
import (
"fmt"
"math/rand"
"sync"
)
func main() {
numberChan1 := make(chan int64, 3) // 数字通道1。
numberChan2 := make(chan int64, 3) // 数字通道2。
numberChan3 := make(chan int64, 3) // 数字通道3。
// 用于等待一组操作执行完毕的同步工具。
var waitGroup sync.WaitGroup
// 该组操作的数量是3。
waitGroup.Add(3)
go func() { // 数字过滤装置1。
for n := range numberChan1 { // 不断的从数字通道1中接收数字,直到该通道关闭。
if n%2 == 0 { // 仅当数字可以被2整除,才将其发送到数字通道2.
numberChan2 <- n
} else {
fmt.Printf("Filter %d. [filter 1]\n", n)
}
}
close(numberChan2) // 关闭数字通道2。
waitGroup.Done() // 表示一个操作完成。
}()
go func() { // 数字过滤装置2。
for n := range numberChan2 { // 不断的从数字通道2中接收数字,直到该通道关闭。
if n%5 == 0 { // 仅当数字可以被5整除,才将其发送到数字通道2.
numberChan3 <- n
} else {
fmt.Printf("Filter %d. [filter 2]\n", n)
}
}
close(numberChan3) // 关闭数字通道3。
waitGroup.Done() // 表示一个操作完成。
}()
go func() { // 数字输出装置。
for n := range numberChan3 { // 不断的从数字通道3中接收数字,直到该通道关闭。
fmt.Println(n) // 打印数字。
}
waitGroup.Done() // 表示一个操作完成。
}()
for i := 0; i < 30; i++ { // 先后向数字通道1传送100个范围在[0,30)的随机数。
numberChan1 <- rand.Int63n(30)
}
close(numberChan1) // 数字发送完毕,关闭数字通道1。
waitGroup.Wait() // 等待前面那组操作(共3个)的完成。
}
|
package models
type Genre struct {
BaseModel
Name string `json:"name"`
}
func (genre *Genre) Fields() []interface{} {
return []interface{}{&genre.Name}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package deskscuj
import (
"context"
"fmt"
"time"
"chromiumos/tast/common/action"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/mouse"
"chromiumos/tast/local/input"
)
// deskSwitchWorkflow represents a workflow for switching between desks.
// |run| switches to the "next" desk, which is defined by the |itinerary|.
// |run| takes in the currently active desk and the expected next desk,
// and activates the next desk.
type deskSwitchWorkflow struct {
name string
description string
itinerary []int
run func(context.Context, int, int) error
}
// getKeyboardSearchBracketWorkflow returns the workflow for switching
// between desks using Search+[ and Search+].
func getKeyboardSearchBracketWorkflow(tconn *chrome.TestConn, kw *input.KeyboardEventWriter) deskSwitchWorkflow {
return deskSwitchWorkflow{
name: "Search+] and Search+[",
description: "Cycle through desks using Search+] and Search+[",
itinerary: []int{0, 1, 2, 3, 2, 1},
run: func(ctx context.Context, fromDesk, toDesk int) error {
var direction string
switch toDesk {
case fromDesk - 1:
direction = "Search+["
case fromDesk + 1:
direction = "Search+]"
default:
return errors.Errorf("invalid Search+Bracket desk switch: from %d to %d", fromDesk, toDesk)
}
return kw.Accel(ctx, direction)
},
}
}
// getKeyboardSearchNumberWorkflow returns the workflow for switching
// between desks using Search+Shift+Number.
func getKeyboardSearchNumberWorkflow(tconn *chrome.TestConn, kw *input.KeyboardEventWriter) deskSwitchWorkflow {
return deskSwitchWorkflow{
name: "Search+Shift+Number",
description: "Cycle through desks using Search+Shift+Number",
itinerary: []int{0, 1, 2, 3, 2, 1},
run: func(ctx context.Context, fromDesk, toDesk int) error {
if fromDesk == toDesk {
return errors.Errorf("invalid target desk, can't switch from desk %d to itself", fromDesk)
}
// Shortcuts are 1-indexed, so offset currDesk by 1.
return kw.Accel(ctx, fmt.Sprintf("Shift+Search+%d", toDesk+1))
},
}
}
// getOverviewWorkflow returns the workflow for switching between desks
// by entering overview mode and selecting the next desk.
func getOverviewWorkflow(tconn *chrome.TestConn, ac *uiauto.Context, setOverviewModeAndWait action.Action) deskSwitchWorkflow {
return deskSwitchWorkflow{
name: "Overview",
description: "Cycle through desks using overview mode",
itinerary: []int{0, 1, 2, 3, 2, 1},
run: func(ctx context.Context, fromDesk, toDesk int) error {
if fromDesk == toDesk {
return errors.Errorf("invalid target desk, can't switch from desk %d to itself", fromDesk)
}
if err := setOverviewModeAndWait(ctx); err != nil {
return errors.Wrap(err, "failed to enter overview mode")
}
desksInfo, err := ash.FindDeskMiniViews(ctx, ac)
if err != nil {
return errors.Wrap(err, "failed to get desk previews")
}
if numDesks := len(desksInfo); toDesk >= numDesks || toDesk < 0 {
return errors.Errorf("invalid target desk: got %d, expected desk index between 0 and %d", toDesk, numDesks-1)
}
if err := mouse.Click(tconn, desksInfo[toDesk].Location.CenterPoint(), mouse.LeftButton)(ctx); err != nil {
return errors.Wrapf(err, "failed to click on the desk preview for desk %d", toDesk)
}
if err := ash.WaitForOverviewState(ctx, tconn, ash.Hidden, time.Minute); err != nil {
return errors.Wrap(err, "failed to exit overview mode")
}
return nil
},
}
}
|
package iot
import (
"context"
"fmt"
"time"
"github.com/rareinator/Svendeprove/Backend/packages/mongo"
. "github.com/rareinator/Svendeprove/Backend/packages/protocol"
)
type IotServer struct {
UnimplementedIotServiceServer
ListenAddress string
DB *mongo.MongoDB
}
func (i *IotServer) GetHealth(ctx context.Context, e *Empty) (*Health, error) {
return &Health{Message: fmt.Sprintf("IOT service is up and running on: %v 🚀", i.ListenAddress)}, nil
}
func (i *IotServer) UploadData(ctx context.Context, input *IOTData) (*IOTData, error) {
fmt.Println("Saving data to mongoDB")
fmt.Printf("saving %v for %v for device %v\n\r", input.Data, input.SensorID, input.Name)
data := mongo.Device{
Name: input.Name,
SensorID: input.SensorID,
Data: input.Data,
Date: time.Now(),
}
err := i.DB.UploadData(context.Background(), &data)
if err != nil {
return nil, err
}
return input, nil
}
func (i *IotServer) ReadData(ctx context.Context, request *Request) (*IOTDatas, error) {
datas, err := i.DB.ReadData(context.Background(), request.Id)
if err != nil {
return nil, err
}
response := IOTDatas{
IOTDatas: make([]*IOTData, 0),
}
for _, data := range datas {
iotData := IOTData{
ID: data.ID.String(),
Name: data.Name,
SensorID: data.SensorID,
Data: data.Data,
Date: data.Date.Format("02/01/2006 15:04:05"),
}
response.IOTDatas = append(response.IOTDatas, &iotData)
}
return &response, nil
}
func (i *IotServer) ReadDataInTimeFrame(ctx context.Context, request *IOTTimeframeRequest) (*IOTDatas, error) {
parsedStartTime, err := time.Parse("02/01/2006 15:04:05", request.TimeStart)
if err != nil {
return nil, err
}
parsedEndTime, err := time.Parse("02/01/2006 15:04:05", request.TimeEnd)
if err != nil {
return nil, err
}
datas, err := i.DB.ReadDataInTimeFrame(context.Background(), parsedStartTime, parsedEndTime)
if err != nil {
return nil, err
}
response := IOTDatas{
IOTDatas: make([]*IOTData, 0),
}
for _, data := range datas {
iotData := IOTData{
ID: data.ID.String(),
Name: data.Name,
SensorID: data.SensorID,
Data: data.Data,
Date: data.Date.Format("02/01/2006 15:04:05"),
}
response.IOTDatas = append(response.IOTDatas, &iotData)
}
return &response, nil
}
|
package main
import (
"fmt"
"html/template"
"net/http"
)
// InfoHandler Info接口的回调处理函数
func InfoHandler(w http.ResponseWriter, req *http.Request) {
t, err := template.ParseFiles("info.html", "ul.html")
if err != nil {
fmt.Printf("Parse HTML file failed, err=%v\n", err)
return
}
t.Execute(w, nil)
}
func main() {
http.HandleFunc("/info", InfoHandler)
http.ListenAndServe(":8001", nil)
}
|
package leetcode
import (
"strconv"
)
//func factor(m, n int) int {
// if n == 0 {
// return m
// }
//
// return factor(n, m%n)
//}
func factor(m, n int) int {
for m != 0 {
m, n = n%m, m
}
return n
}
//func removeDuplicates(s []string) []string {
// e := map[string]bool{}
// var r []string
//
// for v := range s {
// if e[s[v]] == false {
// e[s[v]] = true
// r = append(r, s[v])
// }
// }
//
// return r
//}
//func simplifiedFractions(n int) []string {
// var ans []string
//
// if n == 0 || n == 1 {
// return ans
// }
//
// for i := 2; i <= n; i++ {
// for j := 1; j < i; j++ {
// fac := factor(i, j)
// ans = append(ans, fmt.Sprint(j/fac)+"/"+fmt.Sprint(i/fac))
// }
// }
//
// return removeDuplicates(ans)
//}
func simplifiedFractions(n int) []string {
var ans []string
for i := 2; i <= n; i++ {
for j := 1; j < i; j++ {
if factor(j, i) == 1 {
ans = append(ans, strconv.Itoa(j)+"/"+strconv.Itoa(i))
}
}
}
return ans
}
|
package main
import (
"log"
"net/http"
"github.com/emicklei/go-restful"
gintonic "github.com/gin-tonic/pkg/api/services/gintonic/endpoint"
"github.com/gin-tonic/pkg/api/services/health/pingservice"
"github.com/gin-tonic/pkg/api/services/user/userservice"
utils "github.com/gin-tonic/pkg/api/utils"
)
func main() {
config, err := utils.ReadConfig("pkg/config/service-config.yml")
if err != nil {
log.Fatalf("Fatal error while bootstrapping the service, %v", err)
panic(err)
}
restful.Add(gintonic.New())
restful.Add(userservice.New())
restful.Add(pingservice.New())
log.Printf("Deploying Microservice within port: %v", config.Server.Port)
deployErr := http.ListenAndServe(config.Server.Port, nil)
if deployErr != nil {
log.Fatal(deployErr)
}
}
|
package db
import (
"github.com/evergreen-ci/sink"
"github.com/pkg/errors"
"gopkg.in/mgo.v2/bson"
)
// PROPOSAL: these functions should be private with exposed
// functionality via methods on the db.Q type.
// Insert inserts a document into a collection.
func Insert(collection string, item interface{}) error {
session, db, err := sink.GetMgoSession()
if err != nil {
return errors.Wrap(err, "problem getting session")
}
defer session.Close()
return errors.WithStack(db.C(collection).Insert(item))
}
// ClearCollections clears all documents from all the specified collections, returning an error
// immediately if clearing any one of them fails.
func ClearCollections(collections ...string) error {
session, db, err := sink.GetMgoSession()
if err != nil {
return errors.Wrap(err, "problem getting session")
}
defer session.Close()
for _, collection := range collections {
_, err = db.C(collection).RemoveAll(bson.M{})
if err != nil {
return errors.Wrapf(err, "couldn't clear collection: %s", collection)
}
}
return nil
}
// UpdateID updates one _id-matching document in the collection.
func UpdateID(collection string, id, update interface{}) error {
session, db, err := sink.GetMgoSession()
if err != nil {
return errors.Wrap(err, "problem getting session")
}
defer session.Close()
return errors.WithStack(db.C(collection).UpdateId(id, update))
}
// findOne finds one item from the specified collection and unmarshals it into the
// provided interface, which must be a pointer.
func findOne(coll string, query, proj interface{}, sort []string, out interface{}) error {
session, db, err := sink.GetMgoSession()
if err != nil {
return errors.Wrap(err, "problem getting session")
}
defer session.Close()
q := db.C(coll).Find(query).Select(proj)
if len(sort) != 0 {
q = q.Sort(sort...)
}
return errors.WithStack(q.One(out))
}
// runUpdate updates one matching document in the collection.
func runUpdate(collection string, query, update interface{}) error {
session, db, err := sink.GetMgoSession()
if err != nil {
return errors.Wrap(err, "problem getting session")
}
defer session.Close()
return errors.WithStack(db.C(collection).Update(query, update))
}
// findAll finds the items from the specified collection and unmarshals them into the
// provided interface, which must be a slice.
func findAll(coll string, query, proj interface{}, sort []string, skip, limit int, out interface{}) error {
session, db, err := sink.GetMgoSession()
if err != nil {
return errors.Wrap(err, "problem getting session")
}
defer session.Close()
q := db.C(coll).Find(query).Select(proj)
if len(sort) != 0 {
q = q.Sort(sort...)
}
return errors.WithStack(q.Skip(skip).Limit(limit).All(out))
}
// removeOne removes a single document from a collection.
func removeOne(coll string, query interface{}) error {
session, db, err := sink.GetMgoSession()
if err != nil {
return errors.Wrap(err, "problem getting session")
}
defer session.Close()
return errors.WithStack(db.C(coll).Remove(query))
}
// count run a count command with the specified query against the collection.f
func count(collection string, query interface{}) (int, error) {
session, db, err := sink.GetMgoSession()
if err != nil {
return 0, err
}
defer session.Close()
return db.C(collection).Find(query).Count()
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package hwsim setups a simulated Wi-Fi environment for testing.
package hwsim
import (
"context"
"time"
"chromiumos/tast/errors"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/shill"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
)
const (
hwsimTimeout = 30 * time.Second
shillRequestTimeout = 5 * time.Second
shillIfaceTimeout = 10 * time.Second
ifaceCount = 4
testIfaceClaimer = "hwsim-fixture"
)
// ShillSimulatedWiFi contains the Wi-Fi interfaces created by the simulated
// environment.
type ShillSimulatedWiFi struct {
// Simulated Wi-Fi interfaces used by Shill as client interfaces.
Client []string
// Simulated Wi-Fi interfaces available to be used as access point
// interfaces.
AP []string
// ARC's handle. This is only added when using the fixture "arcBooted".
ARC *arc.ARC
}
type fixture struct {
// m is the Shill Manager interface.
m *shill.Manager
// pid is the Shill process identifier used to ensure it does not restart
// while the fixture is running.
pid int
// hwIface is the name of the Wi-Fi interface already present on the device
// when the fixture is setup.
hwIface string
// claimedIfaces is the a set of interfaces claimed by the fixture to
// release before unloading the driver.
claimedIfaces []string
}
func init() {
testing.AddFixture(&testing.Fixture{
Name: "shillSimulatedWiFi",
Desc: "A fixture that loads the Wi-Fi hardware simulator and ensures Shill is configured correctly",
Contacts: []string{
"damiendejean@google.com", // fixture maintainer
"cros-networking@google.com",
},
SetUpTimeout: hwsimTimeout,
TearDownTimeout: hwsimTimeout,
ResetTimeout: hwsimTimeout,
Impl: &fixture{},
})
testing.AddFixture(&testing.Fixture{
Name: "shillSimulatedWiFiWithArcBooted",
Desc: "A fixture that loads the Wi-Fi hardware simulator and ensures Shill is configured correctly",
Contacts: []string{
"damiendejean@google.com", // fixture maintainer
"cros-networking@google.com",
},
SetUpTimeout: hwsimTimeout,
TearDownTimeout: hwsimTimeout,
ResetTimeout: hwsimTimeout,
Impl: &fixture{},
Parent: "arcBooted",
})
}
func (f *fixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
success := false
// Unload the module if it's already loaded
if loaded, err := isLoaded(); err != nil {
s.Fatal("Failed to check for hwsim module state: ", err)
} else if loaded {
if err = unload(ctx); err != nil {
s.Fatal("Failed to unload hwsim module: ", err)
}
}
// Obtain Shill PID and keep it to ensure to later check the process does
// not restart.
_, _, pid, err := upstart.JobStatus(ctx, "shill")
if err != nil {
s.Fatal("Failed to obtain Shill PID: ", err)
}
f.pid = pid
f.m, err = shill.NewManager(ctx)
if err != nil {
s.Fatal("Failed to connect to Shill Manager: ", err)
}
// Ensure the hardware interface is not in use. The call below will return
// an error if there's multiple Wi-Fi interfaces but it's not something we
// support at the moment.
f.hwIface, err = shill.WifiInterface(ctx, f.m, shillRequestTimeout)
if err == nil {
// There's a hardware interface, we must tell Shill not to use it.
err = f.m.ClaimInterface(ctx, testIfaceClaimer, f.hwIface)
if err != nil {
s.Fatalf("Failed to claim interface %s: %v", f.hwIface, err)
}
defer func(ctx context.Context) {
if !success {
if err := f.m.ReleaseInterface(ctx, testIfaceClaimer, f.hwIface); err != nil {
s.Fatalf("Failed to release interface %s: %v", f.hwIface, err)
}
}
}(ctx)
}
// Load the simulation driver (mac80211_hwsim)
ifaces, err := load(ctx, ifaceCount)
if err != nil {
s.Fatal("Failed to load Wi-Fi simulation driver: ", err)
}
defer func(ctx context.Context) {
if !success {
if err := unload(ctx); err != nil {
s.Fatal("Failed to unload simulation driver: ", err)
}
}
}(ctx)
// Wait for all the new interfaces to be managed by Shill.
// TODO(b/235259730): remove the timeout and find a way to know the number
// of interfaces expected to be managed by Shill.
if err := testing.Sleep(ctx, 3*time.Second); err != nil {
s.Fatal("Failed to wait for Shill to manage interfaces")
}
// Obtain the list of Wi-Fi interfaces managed by Shill.
wm, err := shill.NewWifiManager(ctx, f.m)
if err != nil {
s.Fatal("Failed to create Wi-Fi manager: ", err)
}
shillIfaces, err := wm.Interfaces(ctx)
if err != nil {
s.Fatal("Failed to obtain Wi-Fi interfaces from Shill: ", err)
}
if len(ifaces) == 0 {
s.Fatal("Shill has no Wi-Fi interfaces")
}
// Keep track of managed interfaces
managedIfaces := make(map[string]bool)
for _, iface := range shillIfaces {
managedIfaces[iface] = true
}
// Keep an interface as client.
clientIface := shillIfaces[0]
// Use the other interfaces as test access points.
var apIfaces []string
var claimedIfaces []string
for _, iface := range ifaces {
if iface.Name == clientIface {
// The client interface cannot be used as access point and will
// continue to be managed by Shill.
continue
}
if managedIfaces[iface.Name] {
// The interface is managed by Shill, we need to claim it before
// it can be used as a test access point.
if err := f.m.ClaimInterface(ctx, testIfaceClaimer, iface.Name); err != nil {
s.Fatalf("Failed to claim interfaces %s: %v", iface.Name, err)
}
defer func(ctx context.Context, name string) {
if !success {
if err := f.m.ReleaseInterface(ctx, testIfaceClaimer, name); err != nil {
s.Fatalf("Failed to release interface %s: %v", name, err)
}
}
}(ctx, iface.Name)
// Keep track of claimed interfaces to release them later.
claimedIfaces = append(claimedIfaces, iface.Name)
}
apIfaces = append(apIfaces, iface.Name)
}
f.claimedIfaces = claimedIfaces
success = true
fixt := &ShillSimulatedWiFi{
Client: []string{clientIface},
AP: apIfaces,
}
if s.ParentValue() != nil {
fixt.ARC = s.ParentValue().(*arc.PreData).ARC
}
return fixt
}
func (f *fixture) TearDown(ctx context.Context, s *testing.FixtState) {
// Release the simulation driver interfaces claimed in Shill.
for _, iface := range f.claimedIfaces {
if err := f.m.ReleaseInterface(ctx, testIfaceClaimer, iface); err != nil {
s.Errorf("Failed to release interfaces %s: %v", iface, err)
}
}
// Unload the simulation driver.
if err := unload(ctx); err != nil {
s.Error("Failed to unload simulation driver: ", err)
}
// Give the hardware interface back to Shill if any.
if f.hwIface != "" {
if err := f.m.ReleaseInterface(ctx, testIfaceClaimer, f.hwIface); err != nil {
s.Fatalf("Failed to release hardware interface %s: %v", f.hwIface, err)
}
}
}
func (f *fixture) Reset(ctx context.Context) error {
if _, _, pid, err := upstart.JobStatus(ctx, "shill"); err != nil {
return errors.Wrap(err, "failed to obtain Shill PID")
} else if f.pid != pid {
return errors.New("failed to maintain fixture state: Shill restarted")
}
return nil
}
func (f *fixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
}
func (f *fixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
}
|
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
/*
TODO:
- Document types with golang // <StructName> description style
*/
func main() {
var glancCmd = &cobra.Command{
Use: "glanc",
Short: "gyors lanc - the fastest bchain ever!",
Run: func(cmd *cobra.Command, args []string) {},
}
glancCmd.AddCommand(versionCmd())
glancCmd.AddCommand(balancesCmd())
glancCmd.AddCommand(txCmd())
err := glancCmd.Execute()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func incorrectUsageErr() error {
return fmt.Errorf("incorrect usage")
}
|
package main
import "fmt"
func main() {
digits := []int{4, 3, 2, 1}
fmt.Println(plusOne(digits))
}
func plusOne(digits []int) []int {
n := len(digits) - 1
digits[n]++
// 加1后开始处理进位
for i := n; i > 0; i-- {
// 没有进位直接跳出
if digits[i] < 10 {
break
}
digits[i] -= 10
digits[i-1]++
}
// 处理首位
if digits[0] >= 10 {
digits[0] -= 10
digits = append([]int{1}, digits...)
}
return digits
}
|
package detector
import (
"errors"
"io"
"os"
"regexp"
"strings"
"testing"
"fmt"
"github.com/stretchr/testify/assert"
)
func TestIsPotentialDelimiter(t *testing.T) {
tests := []struct {
input byte
expected bool
}{
{
byte('a'),
false,
},
{
byte('A'),
false,
},
{
byte('1'),
false,
},
{
byte('|'),
true,
},
{
byte('$'),
true,
},
}
detector := &detector{
nonDelimiterRegex: regexp.MustCompile(nonDelimiterRegexString),
}
for _, test := range tests {
assert.Equal(t, test.expected, !detector.nonDelimiterRegex.MatchString(string(test.input)))
}
}
func TestDetectDelimiter1(t *testing.T) {
detector := New()
file1, err := os.OpenFile("./Fixtures/test1.csv", os.O_RDONLY, os.ModePerm)
assert.NoError(t, err)
defer file1.Close()
file2, err := os.OpenFile("./Fixtures/test2.csv", os.O_RDONLY, os.ModePerm)
assert.NoError(t, err)
defer file2.Close()
unreachable := "blah"
testCases := []struct {
r io.Reader
delimiter string
}{
{file1, ","},
{file2, ","},
{strings.NewReader(""), unreachable},
}
for _, tc := range testCases {
delimiters := detector.DetectDelimiter(tc.r, '"')
fmt.Println(delimiters)
if len(delimiters) == 0 && tc.delimiter == unreachable {
return
}
assert.Equal(t, []string{tc.delimiter}, delimiters)
}
}
func TestDetectRowTerminator(t *testing.T) {
detector := New()
file, err := os.OpenFile("./Fixtures/test1.csv", os.O_RDONLY, os.ModePerm)
assert.NoError(t, err)
defer file.Close()
booString := "boo\r\nhere\r\n\beghosts!\r\n"
booR := strings.NewReader(booString)
wooString := "woo\rhere\rbe no pippy!\r"
wooR := strings.NewReader(wooString)
emptyR := strings.NewReader("")
testCases := []struct {
r io.Reader
terminator string
}{
{file, "\n"},
{wooR, "\r"},
{booR, "\r\n"},
{badRead{}, ""},
{emptyR, ""},
}
for _, tc := range testCases {
terminator := detector.DetectRowTerminator(tc.r)
assert.Equal(t, tc.terminator, terminator)
}
}
type badRead struct {
}
func (b badRead) Read(p []byte) (int, error) {
return 0, errors.New("woowoowoo")
}
|
package main
import (
"flag"
"fmt"
"os"
)
var VersionStr = "unknown"
func main() {
version := flag.Bool("v", false, "Show Version")
flag.Parse()
if *version {
fmt.Println(VersionStr)
os.Exit(0)
}
}
|
// SPDX-License-Identifier: MIT
package lsp
import (
"path/filepath"
"github.com/issue9/sliceutil"
"github.com/caixw/apidoc/v7/build"
"github.com/caixw/apidoc/v7/core"
"github.com/caixw/apidoc/v7/internal/ast"
"github.com/caixw/apidoc/v7/internal/lang"
"github.com/caixw/apidoc/v7/internal/lsp/protocol"
)
// textDocument/didChange
//
// https://microsoft.github.io/language-server-protocol/specifications/specification-current/#textDocument_didChange
func (s *server) textDocumentDidChange(notify bool, in *protocol.DidChangeTextDocumentParams, out *any) error {
f := s.findFolder(in.TextDocument.URI)
if f == nil {
return nil
}
f.parsedMux.Lock()
defer f.parsedMux.Unlock()
if !deleteURI(f.doc, in.TextDocument.URI) {
return nil
}
f.clearDiagnostics()
for _, blk := range in.Blocks() {
f.parseBlock(blk)
}
f.srv.textDocumentPublishDiagnostics(f)
return nil
}
func (f *folder) parseBlock(block core.Block) {
var input *build.Input
ext := filepath.Ext(block.Location.URI.String())
for _, i := range f.cfg.Inputs {
if sliceutil.Count(i.Exts, func(index string) bool { return index == ext }) > 0 {
input = i
break
}
}
if input == nil { // 无需解析
return
}
f.doc.ParseBlocks(f.h, func(blocks chan core.Block) {
lang.Parse(f.h, input.Lang, block, blocks)
})
if err := f.srv.apidocOutline(f); err != nil {
f.srv.printErr(err)
}
}
func deleteURI(doc *ast.APIDoc, uri core.URI) (deleted bool) {
l := len(doc.APIs)
doc.APIs = sliceutil.Delete(doc.APIs, func(i *ast.API) bool {
return i.URI == uri
})
deleted = l > len(doc.APIs)
if doc.URI == uri {
*doc = ast.APIDoc{APIs: doc.APIs}
deleted = true
}
return deleted
}
// textDocument/publishDiagnostics
//
// https://microsoft.github.io/language-server-protocol/specifications/specification-current/#textDocument_publishDiagnostics
func (s *server) textDocumentPublishDiagnostics(f *folder) {
for _, p := range f.diagnostics {
if err := s.Notify("textDocument/publishDiagnostics", p); err != nil {
s.erro.Println(err)
}
}
}
// 清空所有的诊断信息
func (f *folder) clearDiagnostics() {
for _, p := range f.diagnostics {
p.Diagnostics = p.Diagnostics[:0]
if err := f.srv.Notify("textDocument/publishDiagnostics", p); err != nil {
f.srv.erro.Println(err)
}
}
f.diagnostics = make(map[core.URI]*protocol.PublishDiagnosticsParams, 0)
}
// textDocument/foldingRange
//
// https://microsoft.github.io/language-server-protocol/specifications/specification-current/#textDocument_foldingRange
func (s *server) textDocumentFoldingRange(notify bool, in *protocol.FoldingRangeParams, out *[]protocol.FoldingRange) error {
f := s.findFolder(in.TextDocument.URI)
if f == nil {
return nil
}
lineFoldingOnly := s.clientParams != nil &&
s.clientParams.Capabilities.TextDocument.FoldingRange != nil &&
s.clientParams.Capabilities.TextDocument.FoldingRange.LineFoldingOnly
folds := make([]protocol.FoldingRange, 0, 10)
f.parsedMux.RLock()
defer f.parsedMux.RUnlock()
if f.doc.URI == in.TextDocument.URI {
folds = append(folds, protocol.BuildFoldingRange(f.doc.Base, lineFoldingOnly))
}
for _, api := range f.doc.APIs {
if api.URI == in.TextDocument.URI {
folds = append(folds, protocol.BuildFoldingRange(api.Base, lineFoldingOnly))
}
}
*out = folds
return nil
}
// textDocument/completion
//
// https://microsoft.github.io/language-server-protocol/specifications/specification-current/#textDocument_completion
func (s *server) textDocumentCompletion(notify bool, in *protocol.CompletionParams, out *protocol.CompletionList) error {
// TODO
return nil
}
|
package main
import (
"benchmark/controllers"
"github.com/astaxie/beego"
)
func main() {
beego.Router("/", &controllers.MainController{})
beego.Router("/benchmark",&controllers.BenchmarkController{})
beego.Router("/run",&controllers.RunController{})
beego.Run()
}
|
package proteus
import (
"context"
"reflect"
"sync"
"fmt"
"strings"
"github.com/jonbodner/multierr"
"github.com/jonbodner/proteus/logger"
"github.com/jonbodner/stackerr"
)
/*
struct tags:
proq - SQL code to execute
If the first parameter is an Executor, returns new id (if sql.Result has a non-zero value for LastInsertId) or number of rows changed.
If the first parameter is a Querier, returns single entity or list of entities.
prop - The parameter names. Should be in order for the function parameters (skipping over the first Executor parameter)
prof - The fields on the dto that are mapped to select parameters in a query
next:
Put a reference to a public string instead of the query and that public string will be used as the query
later:
struct tags to mark as CRUD operations
converting name parameterized queries to positional queries
1. build a map of prop entry->position in parameter list
2. For each : in the input query
3. Find it
4. Find the end of the term (whitespace, comma, or end of string)
5. Create a map of querypos -> struct {parameter position (int), field path (string), isSlice (bool)}
6. If is slice, replace with ??
7. Otherwise, replace with ?
8. Capture the new string and the map in the closure
9. On run, do the replacements directly
10. If there are slices (??), replace with series of ? separated by commas, blow out slice in args slice
Return type is 0, 1, or 2 values
If zero, suppress all errors and return values (not great)
If 1:
For exec, return LastInsertId if !=0, otherwise return # of row changes (int in either case)
For query, if return type is []Entity, map to the entities.
For query, if return type is Entity and there are > 1 value, return the first. If there are zero, return the zero value of the Entity.
If 2:
Same as 1, 2nd parameter is error
Exception: if return type is entity and there are 0 values or > 1 value, return error indicating this.
On mapping for query, any unmappable parameters are ignored
If the entity is a primitive, then the first value returned for a row must be of that type, or it's an error. All other values for that row will be ignored.
*/
type Error struct {
FuncName string
FieldOrder int
OriginalError error
}
func (pe Error) Error() string {
return fmt.Sprintf("error in field #%d (%s): %v", pe.FieldOrder, pe.FuncName, pe.OriginalError)
}
func (pe Error) Unwrap() error {
return pe.OriginalError
}
var l = logger.OFF
var rw sync.RWMutex
func SetLogLevel(ll logger.Level) {
rw.Lock()
l = ll
rw.Unlock()
}
// ShouldBuild works like Build, with two differences:
//
// 1. It will not populate any function fields if there are errors.
//
// 2. The context passed in to ShouldBuild can be used to specify the logging level used during ShouldBuild and
// when the generated functions are invoked. This overrides any logging level specified using the SetLogLevel
// function.
func ShouldBuild(c context.Context, dao interface{}, paramAdapter ParamAdapter, mappers ...QueryMapper) error {
//if log level is set and not in the context, use it
if _, ok := logger.LevelFromContext(c); !ok && l != logger.OFF {
rw.RLock()
c = logger.WithLevel(c, l)
rw.RUnlock()
}
daoPointerType := reflect.TypeOf(dao)
//must be a pointer to struct
if daoPointerType.Kind() != reflect.Ptr {
return stackerr.New("not a pointer")
}
daoType := daoPointerType.Elem()
//if not a struct, error out
if daoType.Kind() != reflect.Struct {
return stackerr.New("not a pointer to struct")
}
var out error
funcs := make([]reflect.Value, daoType.NumField())
daoPointerValue := reflect.ValueOf(dao)
daoValue := reflect.Indirect(daoPointerValue)
//for each field in ProductDao that is of type func and has a proteus struct tag, assign it a func
for i := 0; i < daoType.NumField(); i++ {
curField := daoType.Field(i)
//Implement embedded fields -- if we have a field of type struct and it's anonymous,
//recurse
if curField.Type.Kind() == reflect.Struct && curField.Anonymous {
pv := reflect.New(curField.Type)
embeddedErrs := ShouldBuild(c, pv.Interface(), paramAdapter, mappers...)
if embeddedErrs != nil {
out = multierr.Append(out, embeddedErrs)
} else {
funcs[i] = pv.Elem()
}
continue
}
query, ok := curField.Tag.Lookup("proq")
if curField.Type.Kind() != reflect.Func || !ok {
continue
}
funcType := curField.Type
//validate to make sure that the function matches what we expect
hasCtx, err := validateFunction(funcType)
if err != nil {
out = multierr.Append(out, Error{FuncName: curField.Name, FieldOrder: i, OriginalError: err})
continue
}
paramOrder := curField.Tag.Get("prop")
var nameOrderMap map[string]int
startPos := 1
if hasCtx {
startPos = 2
}
if len(paramOrder) == 0 {
nameOrderMap = buildDummyParameters(funcType.NumIn(), startPos)
} else {
nameOrderMap = buildNameOrderMap(paramOrder, startPos)
}
//check to see if the query is in a QueryMapper
query, err = lookupQuery(query, mappers)
if err != nil {
out = multierr.Append(out, Error{FuncName: curField.Name, FieldOrder: i, OriginalError: err})
continue
}
implementation, err := makeImplementation(c, funcType, query, paramAdapter, nameOrderMap)
if err != nil {
out = multierr.Append(out, Error{FuncName: curField.Name, FieldOrder: i, OriginalError: err})
continue
}
funcs[i] = reflect.MakeFunc(funcType, implementation)
}
if out == nil {
for i, v := range funcs {
if v.IsValid() {
fieldValue := daoValue.Field(i)
fieldValue.Set(v)
}
}
}
return out
}
// Build is the main entry point into Proteus. It takes in a pointer to a DAO struct to populate,
// a proteus.ParamAdapter, and zero or more proteus.QueryMapper instances.
//
// As of version v0.12.0, all errors found during building will be reported back. Also, prefer using
// proteus.ShouldBuild over proteus.Build.
func Build(dao interface{}, paramAdapter ParamAdapter, mappers ...QueryMapper) error {
rw.RLock()
c := logger.WithLevel(context.Background(), l)
rw.RUnlock()
daoPointerType := reflect.TypeOf(dao)
//must be a pointer to struct
if daoPointerType.Kind() != reflect.Ptr {
return stackerr.New("not a pointer")
}
daoType := daoPointerType.Elem()
//if not a struct, error out
if daoType.Kind() != reflect.Struct {
return stackerr.New("not a pointer to struct")
}
daoPointerValue := reflect.ValueOf(dao)
daoValue := reflect.Indirect(daoPointerValue)
var outErr error
//for each field in ProductDao that is of type func and has a proteus struct tag, assign it a func
for i := 0; i < daoType.NumField(); i++ {
curField := daoType.Field(i)
//Implement embedded fields -- if we have a field of type struct and it's anonymous,
//recurse
if curField.Type.Kind() == reflect.Struct && curField.Anonymous {
pv := reflect.New(curField.Type)
err := Build(pv.Interface(), paramAdapter, mappers...)
if err != nil {
outErr = multierr.Append(outErr, err)
}
daoValue.Field(i).Set(pv.Elem())
continue
}
query, ok := curField.Tag.Lookup("proq")
if curField.Type.Kind() != reflect.Func || !ok {
continue
}
funcType := curField.Type
//validate to make sure that the function matches what we expect
hasCtx, err := validateFunction(funcType)
if err != nil {
logger.Log(c, logger.WARN, fmt.Sprintln("skipping function", curField.Name, "due to error:", err.Error()))
outErr = multierr.Append(outErr, err)
continue
}
paramOrder := curField.Tag.Get("prop")
var nameOrderMap map[string]int
startPos := 1
if hasCtx {
startPos = 2
}
if len(paramOrder) == 0 {
nameOrderMap = buildDummyParameters(funcType.NumIn(), startPos)
} else {
nameOrderMap = buildNameOrderMap(paramOrder, startPos)
}
//check to see if the query is in a QueryMapper
query, err = lookupQuery(query, mappers)
if err != nil {
logger.Log(c, logger.WARN, fmt.Sprintln("skipping function", curField.Name, "due to error:", err.Error()))
outErr = multierr.Append(outErr, err)
continue
}
implementation, err := makeImplementation(c, funcType, query, paramAdapter, nameOrderMap)
if err != nil {
logger.Log(c, logger.WARN, fmt.Sprintln("skipping function", curField.Name, "due to error:", err.Error()))
outErr = multierr.Append(outErr, err)
continue
}
fieldValue := daoValue.Field(i)
fieldValue.Set(reflect.MakeFunc(funcType, implementation))
}
if outErr != nil {
return outErr
}
return nil
}
var (
exType = reflect.TypeOf((*Executor)(nil)).Elem()
qType = reflect.TypeOf((*Querier)(nil)).Elem()
contextType = reflect.TypeOf((*context.Context)(nil)).Elem()
conExType = reflect.TypeOf((*ContextExecutor)(nil)).Elem()
conQType = reflect.TypeOf((*ContextQuerier)(nil)).Elem()
)
func validateFunction(funcType reflect.Type) (bool, error) {
//first parameter is Executor
if funcType.NumIn() == 0 {
return false, stackerr.New("need to supply an Executor or Querier parameter")
}
var isExec bool
var hasContext bool
switch fType := funcType.In(0); {
case fType.Implements(contextType):
hasContext = true
case fType.Implements(exType):
isExec = true
case fType.Implements(qType):
//do nothing isExec is false
default:
return false, stackerr.New("first parameter must be of type context.Context, Executor, or Querier")
}
start := 1
if hasContext {
start = 2
switch fType := funcType.In(1); {
case fType.Implements(conExType):
isExec = true
case fType.Implements(conQType):
//do nothing isExec is false
default:
return false, stackerr.New("first parameter must be of type context.Context, Executor, or Querier")
}
}
//no in parameter can be a channel
for i := start; i < funcType.NumIn(); i++ {
if funcType.In(i).Kind() == reflect.Chan {
return false, stackerr.New("no input parameter can be a channel")
}
}
//has 0, 1, or 2 return values
if funcType.NumOut() > 2 {
return false, stackerr.New("must return 0, 1, or 2 values")
}
//if 2 return values, second is error
if funcType.NumOut() == 2 {
errType := reflect.TypeOf((*error)(nil)).Elem()
if !funcType.Out(1).Implements(errType) {
return false, stackerr.New("2nd output parameter must be of type error")
}
}
//if 1 or 2, 1st param is not a channel (handle map, I guess)
if funcType.NumOut() > 0 {
if funcType.Out(0).Kind() == reflect.Chan {
return false, stackerr.New("1st output parameter cannot be a channel")
}
if isExec && funcType.Out(0).Kind() != reflect.Int64 &&
funcType.Out(0) != sqlResultType {
return false, stackerr.New("the 1st output parameter of an Executor must be int64 or sql.Result")
}
//sql.Result only useful with executor.
if !isExec && funcType.Out(0) == sqlResultType {
return false, stackerr.New("output parameters of type sql.Result must be combined with Executor")
}
}
return hasContext, nil
}
func makeImplementation(c context.Context, funcType reflect.Type, query string, paramAdapter ParamAdapter, nameOrderMap map[string]int) (func([]reflect.Value) []reflect.Value, error) {
fixedQuery, paramOrder, err := buildFixedQueryAndParamOrder(c, query, nameOrderMap, funcType, paramAdapter)
if err != nil {
return nil, err
}
switch fType := funcType.In(0); {
case fType.Implements(contextType):
switch fType2 := funcType.In(1); {
case fType2.Implements(conExType):
return makeContextExecutorImplementation(c, funcType, fixedQuery, paramOrder), nil
case fType2.Implements(conQType):
return makeContextQuerierImplementation(c, funcType, fixedQuery, paramOrder)
}
case fType.Implements(exType):
return makeExecutorImplementation(c, funcType, fixedQuery, paramOrder), nil
case fType.Implements(qType):
return makeQuerierImplementation(c, funcType, fixedQuery, paramOrder)
}
//this should impossible, since we already validated that the first parameter is either an executor or a querier
return nil, stackerr.New("first parameter must be of type Executor or Querier")
}
func lookupQuery(query string, mappers []QueryMapper) (string, error) {
if !strings.HasPrefix(query, "q:") {
return query, nil
}
name := query[2:]
for _, v := range mappers {
if q := v.Map(name); q != "" {
return q, nil
}
}
return "", stackerr.Errorf("no query found for name %s", name)
}
|
package graylogger
// SendGELF sends GELF messages into Graylog instance.
// If the Graylog host is unreachable, it writes an error message to stdOut.
func (g *GrayLogger) SendGELF(level int, keysAndValues ...interface{}) {
if g.validateGraylogArguments(level) {
g.connect().send(level, keysAndValues)
}
}
|
package main
import (
"bytes"
"context"
"encoding/json"
"io/ioutil"
"net/http"
"github.com/epsagon/epsagon-go/epsagon"
epsagonhttp "github.com/epsagon/epsagon-go/wrappers/net/http"
)
func doTask(ctx context.Context) {
client := http.Client{Transport: epsagonhttp.NewTracingTransport(ctx)}
// This password will be masked in the sent trace:
decodedJSON, err := json.Marshal(map[string]string{"password": "abcde", "animal": "lion"})
if err != nil {
epsagon.Label("animal", "lion", ctx)
epsagon.TypeError(err, "json decoding error", ctx)
}
resp, err := client.Post("http://example.com/upload", "application/json", bytes.NewReader(decodedJSON))
if err != nil {
epsagon.Label("animal", "lion", ctx)
epsagon.TypeError(err, "post", ctx)
}
if resp.StatusCode != 200 {
body, _ := ioutil.ReadAll(resp.Body)
if err == nil {
epsagon.TypeError(string(body), "post status code", ctx)
} else {
epsagon.TypeError(err, "post status code", ctx)
}
epsagon.Label("animal", "lion", ctx)
}
}
func main() {
// With Epsagon instrumentation
config := epsagon.NewTracerConfig("test-ignored-keys", "")
config.Debug = true
config.MetadataOnly = false
config.IgnoredKeys = []string{"password"}
epsagon.ConcurrentGoWrapper(config, doTask)()
}
|
// Copyright 2017 Baidu, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
// This file provide basic configuration of BCE Server
package bceconf
import (
"fmt"
"strconv"
)
import (
"utils/util"
)
const (
SERVER_SECTION_NAME = "defaults"
DOMAIN_OPTION_NAME = "domain"
REGION_OPTION_NAME = "region"
USE_AUTO_SWITCH_DOMAIN_OPTION_NAME = "auto_switch_domain"
BREAKPIONT_FILE_EXPIRATION_OPTION_NAME = "breakpoint_file_expiration"
USE_HTTPS_OPTION_NAME = "https"
MULTI_UPLOAD_THREAD_NUM_NAME = "multi_upload_thread_num"
DEFAULT_DOMAIN_SUFFIX = ".bcebos.com"
DEFAULT_REGION = "bj"
DEFAULT_USE_AUTO_SWITCH_DOMAIN = "yes"
DEFAULT_BREAKPIONT_FILE_EXPIRATION = "7"
DEFAULT_USE_HTTPS_PROTOCOL = "no"
DEFAULT_MULTI_UPLOAD_THREAD_NUM = "10"
DEFAULT_MULTI_UPLOAD_PART_SIZE = "10"
DEFAULT_SYNC_PROCESSING_NUM = "10"
WILL_USE_AUTO_SWTICH_DOMAIN = "yes"
DOMAINS_SECTION_NAME = "domains"
)
var (
AOLLOWED_CONFIRM_OPTIONS = map[string]bool{
"y": true,
"yes": true,
"Yes": true,
"YES": true,
"n": false,
"no": false,
"No": false,
"NO": false,
}
BOOL_TO_STRING = map[bool]string{
true: "yes",
false: "no",
}
DEFAULT_DOMAINS = map[string]string{
"bj": "bj.bcebos.com",
"gz": "gz.bcebos.com",
"su": "su.bcebos.com",
"hk02": "hk-2.bcebos.com",
"hkg": "hkg.bcebos.com",
"yq": "bos.yq.baidubce.com",
}
)
// Store the default configuration.
// The default value of int is zero, therefore each parameter is special as string .
type ServerDefaultsCfg struct {
Domain string
Region string
AutoSwitchDomain string
BreakpointFileExpiration string
Https string
MultiUploadThreadNum string
SyncProcessingNum string
MultiUploadPartSize string
}
// Store region => domain
type EndpointCfg struct {
Endpoint string
}
type ServerConfig struct {
Defaults ServerDefaultsCfg
Domains map[string]*EndpointCfg
}
func checkConfig(cfg *ServerConfig) error {
if cfg == nil {
return nil
}
if cfg.Defaults.BreakpointFileExpiration != "" {
val, ok := strconv.Atoi(cfg.Defaults.BreakpointFileExpiration)
if ok != nil || val < -1 {
return fmt.Errorf("BreakpointFileExpiration must be integer, and equal" +
"or greater than -1")
}
}
if cfg.Defaults.MultiUploadThreadNum != "" {
val, ok := strconv.Atoi(cfg.Defaults.MultiUploadThreadNum)
if ok != nil || val < 1 {
return fmt.Errorf("Multi upload thread number must be integer and greater than zero!")
}
}
if cfg.Defaults.SyncProcessingNum != "" {
val, ok := strconv.Atoi(cfg.Defaults.SyncProcessingNum)
if ok != nil || val < 1 {
return fmt.Errorf("the number of sync processing must greater than zero!")
}
}
if cfg.Defaults.MultiUploadPartSize != "" {
val, ok := strconv.Atoi(cfg.Defaults.MultiUploadPartSize)
if ok != nil || val < 1 || val%1 != 0 {
return fmt.Errorf("part size must greater than zero!")
}
}
return nil
}
type ServerConfigProviderInterface interface {
GetDomain() (string, bool)
GetDomainByRegion(string) (string, bool)
GetRegion() (string, bool)
GetUseAutoSwitchDomain() (bool, bool)
GetBreakpointFileExpiration() (int, bool)
GetUseHttpsProtocol() (bool, bool)
GetMultiUploadThreadNum() (int64, bool)
GetSyncProcessingNum() (int, bool)
GetMultiUploadPartSize() (int64, bool)
}
// New file configuration provider
func NewFileServerConfigProvider(cofingPath string) (*FileServerConfigProvider, error) {
n := &FileServerConfigProvider{
configFilePath: cofingPath,
}
err := n.loadConfigFromFile()
if err == nil {
return n, nil
}
return nil, err
}
// Read server configuration from a file
type FileServerConfigProvider struct {
configFilePath string
dirty bool
cfg *ServerConfig
}
// When configuration file exist, loading configuration from a file
func (f *FileServerConfigProvider) loadConfigFromFile() error {
f.cfg = &ServerConfig{}
if ok := util.DoesFileExist(f.configFilePath); ok {
if err := LoadConfig(f.configFilePath, f.cfg); err != nil {
return fmt.Errorf("load configuration error: %s", err)
}
if err := checkConfig(f.cfg); err != nil {
return err
}
}
return nil
}
// Get server domain by region
// return: The domian of region
func (f *FileServerConfigProvider) GetDomainByRegion(region string) (string, bool) {
if region == "" {
return "", false
}
if len(f.cfg.Domains) > 0 {
domainInfo, ok := f.cfg.Domains[region]
if ok && domainInfo.Endpoint != "" {
return domainInfo.Endpoint, true
}
}
domain, ok := DEFAULT_DOMAINS[region]
if ok && domain != "" {
return domain, true
}
return "", false
}
// Get server domain address
func (f *FileServerConfigProvider) GetDomain() (string, bool) {
if f.cfg.Defaults.Domain != "" {
return f.cfg.Defaults.Domain, true
}
return "", false
}
// Return server region.
func (f *FileServerConfigProvider) GetRegion() (string, bool) {
if f.cfg.Defaults.Region != "" {
return f.cfg.Defaults.Region, true
}
return "", false
}
// return use auto siwitch domain ('yes' or 'no' or empty)
func (f *FileServerConfigProvider) GetUseAutoSwitchDomain() (bool, bool) {
if f.cfg.Defaults.AutoSwitchDomain != "" {
if val, ok := AOLLOWED_CONFIRM_OPTIONS[f.cfg.Defaults.AutoSwitchDomain]; ok {
return val, true
}
}
return false, false
}
// return: Breakpoint file expiration
func (f *FileServerConfigProvider) GetBreakpointFileExpiration() (int, bool) {
if f.cfg.Defaults.BreakpointFileExpiration != "" {
if val, ok := strconv.Atoi(f.cfg.Defaults.BreakpointFileExpiration); ok == nil {
if val >= -1 {
return val, true
}
}
}
return 0, false
}
// Get wheather use https
// RETURN: true or false
func (f *FileServerConfigProvider) GetUseHttpsProtocol() (bool, bool) {
if f.cfg.Defaults.Https != "" {
if val, ok := AOLLOWED_CONFIRM_OPTIONS[f.cfg.Defaults.Https]; ok {
return val, true
}
}
return false, false
}
// return: Server is multi upload thread num
func (f *FileServerConfigProvider) GetMultiUploadThreadNum() (int64, bool) {
if f.cfg.Defaults.MultiUploadThreadNum != "" {
if val, ok := strconv.ParseInt(f.cfg.Defaults.MultiUploadThreadNum, 10, 64); ok == nil {
if val > 0 {
return val, true
}
}
}
return 0, false
}
// Get sync processing num number
func (f *FileServerConfigProvider) GetSyncProcessingNum() (int, bool) {
if f.cfg.Defaults.SyncProcessingNum != "" {
if val, ok := strconv.Atoi(f.cfg.Defaults.SyncProcessingNum); ok == nil {
if val >= -1 {
return val, true
}
}
}
return 0, false
}
// Get sync processing num number
func (f *FileServerConfigProvider) GetMultiUploadPartSize() (int64, bool) {
if f.cfg.Defaults.MultiUploadPartSize != "" {
if val, ok := strconv.ParseInt(f.cfg.Defaults.MultiUploadPartSize, 10, 64); ok == nil {
if val >= -1 {
return val, true
}
}
}
return 0, false
}
// param domain: Set server domain address
// domain can be empty
func (f *FileServerConfigProvider) SetDomain(domain string) {
if f.cfg.Defaults.Domain != domain {
f.cfg.Defaults.Domain = domain
f.dirty = true
}
}
// Set server domains address.
// domains is {region1: domain1, region2: domain2 }
// domains can be empty
func (f *FileServerConfigProvider) SetDomains(domains map[string]*EndpointCfg) {
f.cfg.Domains = domains
f.dirty = true
}
// Delete a domain from domains, return nothing
func (f *FileServerConfigProvider) DelDomainInDomains(region string) {
if region != "" {
delete(f.cfg.Domains, region)
f.dirty = true
}
}
// param region: Set server domain by region
// return:false or true
func (f *FileServerConfigProvider) InsertDomainIntoDomains(region, domain string) bool {
if region == "" || domain == "" {
return false
}
val, ok := f.cfg.Domains[region]
if ok && val != nil && val.Endpoint == domain {
return true
}
if f.cfg.Domains == nil {
f.cfg.Domains = make(map[string]*EndpointCfg)
}
f.cfg.Domains[region] = &EndpointCfg{Endpoint: domain}
f.dirty = true
return true
}
// Set server region
func (f *FileServerConfigProvider) SetRegion(region string) bool {
if f.cfg.Defaults.Region != region {
f.cfg.Defaults.Region = region
f.dirty = true
}
return true
}
// Set use auto siwitch domain ("yes" or "no")
func (f *FileServerConfigProvider) SetUseAutoSwitchDomain(useAutoSwitchDomain string) bool {
if f.cfg.Defaults.AutoSwitchDomain != useAutoSwitchDomain {
f.cfg.Defaults.AutoSwitchDomain = useAutoSwitchDomain
f.dirty = true
}
return true
}
// param breakpoint_file_expiration: Set breakpoint file expiration
// num can be empty
func (f *FileServerConfigProvider) SetBreakpointFileExpiration(num string) bool {
if f.cfg.Defaults.BreakpointFileExpiration != num {
f.cfg.Defaults.BreakpointFileExpiration = num
f.dirty = true
}
return true
}
// set use https protocol
func (f *FileServerConfigProvider) SetUseHttpsProtocol(useHttpsProtocol string) bool {
if f.cfg.Defaults.Https != useHttpsProtocol {
f.cfg.Defaults.Https = useHttpsProtocol
f.dirty = true
}
return true
}
// set multi uplaod thread number
func (f *FileServerConfigProvider) SetMultiUploadThreadNum(multiUploadThreadNum string) bool {
if f.cfg.Defaults.MultiUploadThreadNum != multiUploadThreadNum {
f.cfg.Defaults.MultiUploadThreadNum = multiUploadThreadNum
f.dirty = true
}
return true
}
// set sync processing number
func (f *FileServerConfigProvider) SetSyncProcessingNum(syncProcessingNum string) bool {
if f.cfg.Defaults.SyncProcessingNum != syncProcessingNum {
f.cfg.Defaults.SyncProcessingNum = syncProcessingNum
f.dirty = true
}
return true
}
// set mulit upload part size
func (f *FileServerConfigProvider) SetMultiUploadPartSize(multiUploadPartSize string) bool {
if multiUploadPartSize != f.cfg.Defaults.MultiUploadPartSize {
f.cfg.Defaults.MultiUploadPartSize = multiUploadPartSize
f.dirty = true
}
return true
}
// Save configuration into file
func (f *FileServerConfigProvider) save() error {
if f.configFilePath == "" {
return fmt.Errorf("The path of configuration file is emtpy")
} else if !f.dirty {
return nil
}
if err := WriteConfig(f.configFilePath, f.cfg); err == nil {
f.dirty = false
return nil
} else {
return err
}
}
func NewDefaultServerConfigProvider() (*DefaultServerConfigProvider, error) {
return &DefaultServerConfigProvider{}, nil
}
// Provide default value for serve configuration
type DefaultServerConfigProvider struct{}
// Get default domain
// default domain is region + ".bcebos.com"
func (d *DefaultServerConfigProvider) GetDomain() (string, bool) {
return DEFAULT_REGION + DEFAULT_DOMAIN_SUFFIX, true
}
// Get default domain of region
// if region is empty, return defalut-region + ".bcebos.com"
// else find the domian of region in DEFAULT_DOMAINS
func (d *DefaultServerConfigProvider) GetDomainByRegion(region string) (string, bool) {
if region != "" {
domain, ok := DEFAULT_DOMAINS[region]
if ok && domain != "" {
return domain, true
} else {
return region + DEFAULT_DOMAIN_SUFFIX, true
}
}
return DEFAULT_REGION + DEFAULT_DOMAIN_SUFFIX, true
}
// Get default region
func (d *DefaultServerConfigProvider) GetRegion() (string, bool) {
return DEFAULT_REGION, true
}
// Get wheather use auto siwitch domain
func (d *DefaultServerConfigProvider) GetUseAutoSwitchDomain() (bool, bool) {
val, ok := AOLLOWED_CONFIRM_OPTIONS[DEFAULT_USE_AUTO_SWITCH_DOMAIN]
if ok {
return val, true
}
return false, false
}
// Get breakpoint file expiration
func (d *DefaultServerConfigProvider) GetBreakpointFileExpiration() (int, bool) {
if DEFAULT_BREAKPIONT_FILE_EXPIRATION != "" {
if val, ok := strconv.Atoi(DEFAULT_BREAKPIONT_FILE_EXPIRATION); ok == nil {
if val >= -1 {
return val, true
}
}
}
return 0, false
}
// Get wheather use https protocol
func (d *DefaultServerConfigProvider) GetUseHttpsProtocol() (bool, bool) {
val, ok := AOLLOWED_CONFIRM_OPTIONS[DEFAULT_USE_HTTPS_PROTOCOL]
if ok {
return val, true
}
return false, false
}
// Get sever multi upload thread number
func (d *DefaultServerConfigProvider) GetMultiUploadThreadNum() (int64, bool) {
if DEFAULT_MULTI_UPLOAD_THREAD_NUM != "" {
if val, ok := strconv.ParseInt(DEFAULT_MULTI_UPLOAD_THREAD_NUM, 10, 64); ok == nil {
if val >= -1 {
return val, true
}
}
}
return 0, false
}
// Get sync processing num number
func (d *DefaultServerConfigProvider) GetMultiUploadPartSize() (int64, bool) {
if DEFAULT_MULTI_UPLOAD_PART_SIZE != "" {
if val, ok := strconv.ParseInt(DEFAULT_MULTI_UPLOAD_PART_SIZE, 10, 64); ok == nil {
if val >= -1 {
return val, true
}
}
}
return 0, false
}
// Get sync processing num number
func (d *DefaultServerConfigProvider) GetSyncProcessingNum() (int, bool) {
if DEFAULT_SYNC_PROCESSING_NUM != "" {
if val, ok := strconv.Atoi(DEFAULT_SYNC_PROCESSING_NUM); ok == nil {
if val >= -1 {
return val, true
}
}
}
return 0, false
}
func NewChainServerConfigProvider(chain []ServerConfigProviderInterface) *ChainServerConfigProvider {
return &ChainServerConfigProvider{chain: chain}
}
type ChainServerConfigProvider struct {
chain []ServerConfigProviderInterface
}
// Get default domain
func (c *ChainServerConfigProvider) GetDomain() (string, bool) {
for _, provider := range c.chain {
val, ok := provider.GetDomain()
if ok {
return val, true
}
}
panic("There is no domain found!")
return "", false
}
// Get default domain of region
func (c *ChainServerConfigProvider) GetDomainByRegion(region string) (string, bool) {
for _, provider := range c.chain {
val, ok := provider.GetDomainByRegion(region)
if ok {
return val, true
}
}
panic("There is no domain found!")
return "", false
}
// Get default region
func (c *ChainServerConfigProvider) GetRegion() (string, bool) {
for _, provider := range c.chain {
val, ok := provider.GetRegion()
if ok {
return val, true
}
}
panic("There is no region found!")
return "", false
}
// Get wheather use auto siwitch domain
func (c *ChainServerConfigProvider) GetUseAutoSwitchDomain() (bool, bool) {
for _, provider := range c.chain {
val, ok := provider.GetUseAutoSwitchDomain()
if ok {
return val, true
}
}
panic("There is no use_auto_switch_domain found!")
return false, false
}
// Get breakpoint file expiration
func (c *ChainServerConfigProvider) GetBreakpointFileExpiration() (int, bool) {
for _, provider := range c.chain {
val, ok := provider.GetBreakpointFileExpiration()
if ok {
return val, true
}
}
panic("There if no breakpoint file expiration found!")
return 0, false
}
// Get wheather use https protocol
func (c *ChainServerConfigProvider) GetUseHttpsProtocol() (bool, bool) {
for _, provider := range c.chain {
val, ok := provider.GetUseHttpsProtocol()
if ok {
return val, true
}
}
panic("There is no https protocol info found!")
return false, false
}
// Get sever multi upload thread number
func (c *ChainServerConfigProvider) GetMultiUploadThreadNum() (int64, bool) {
for _, provider := range c.chain {
val, ok := provider.GetMultiUploadThreadNum()
if ok {
return val, true
}
}
panic("There is no MultiUploadThreadNum found!")
return 0, false
}
// Get sync processing num number
func (c *ChainServerConfigProvider) GetSyncProcessingNum() (int, bool) {
for _, provider := range c.chain {
val, ok := provider.GetSyncProcessingNum()
if ok {
return val, true
}
}
panic("There is no info about sync processing num found!")
return 0, false
}
// Get sever multi upload part size
func (c *ChainServerConfigProvider) GetMultiUploadPartSize() (int64, bool) {
for _, provider := range c.chain {
val, ok := provider.GetMultiUploadPartSize()
if ok {
return val, true
}
}
panic("There is no MultiUploadPartSize found!")
return 0, false
}
|
package helper
import (
"QRcodeBillApi/database"
"QRcodeBillApi/models"
"strconv"
"time"
"github.com/gofiber/fiber/v2"
)
// Add a new menu
func Menu(c *fiber.Ctx) error {
price, _ := strconv.ParseFloat(c.FormValue("fprice"), 64)
path := key(5)
if c.FormValue("fname") == "" || c.FormValue("ftype") == "" || c.FormValue("fprice") == "" {
c.Status(400).JSON(fiber.Map{"error": "Name is empty"})
}
file, err := c.FormFile("ffile")
if err != nil {
return err
}
ext := file.Filename
ext = ext[len(ext)-3:]
if ext != "jpg" && ext != "png" {
return c.Status(400).SendString("error : File is not image")
}
menu := models.Food{
Name: c.FormValue("fname"),
Price: price,
Type: c.FormValue("ftype"),
Description: c.FormValue("fdescription"),
Path: path,
Created: time.Now(),
}
err = c.SaveFile(file, "images/"+path+ext)
if err != nil {
return err
}
database.DB.Create(&menu)
return c.Status(200).SendString("Menu added")
}
|
package array_dyncon
import (
"testing"
"math/big"
)
func TestSetGet(t *testing.T) {
const arrayLength = 3;
myContract := NewArrayPush()
for i := 0 ; i<arrayLength ; i++ {
bigI := big.NewInt(int64(i))
startLen := myContract.ArrayLength(bigI)
if 0 != startLen.Cmp(big.NewInt(0)) {
t.Fatalf("Inital array length sould be zero not %d", startLen.Int64())
}
}
for x := 0; x < arrayLength ; x++ {
bigX := big.NewInt(int64(x))
for value := 0 ; value < 20 ; value++ {
rtvLength := myContract.ArrayLength(bigX).Int64()
if rtvLength != int64(value) {
t.Fatalf("arrayLength before push returned %d instead of %d.", rtvLength, x);
}
bigValue := big.NewInt(int64(value))
myContract.Push(bigX, bigValue)
rtvLength = myContract.ArrayLength(bigX).Int64()
if rtvLength != int64(value+1) {
t.Fatalf("arrayLength after push returned %d instead of %d.", rtvLength, x+1);
}
rtv := myContract.MyArray(bigValue, bigX).Int64()
if rtv != int64(value) {
t.Fatalf("Returned wrong array element at position %d %d", value, x);
}
}
}
}
|
package main
func p12928(n int) int {
ret := 0
for i := 1; i*i <= n; i++ {
if n%i == 0 {
ret += i
ret += n / i
}
if i*i == n {
ret -= i
}
}
return ret
}
|
package main
import (
"os"
)
func main() {
os.Mkdir("E:/test/", 0777)
}
|
package raft
type Entry struct {
Term int
Index int
Command interface{}
}
type ApplyMsg struct {
CommandValid bool
Command interface{}
CommandIndex int
}
type RequestVoteArgs struct {
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
type RequestVoteReply struct {
Term int
VoteGranted bool
LastLogIndex int
LastLogTerm int
}
type RequestAppendEntriesArgs struct {
Term int // leader’s term
LeaderId int // so follower can redirect clients
PrevLogIndex int // index of log entry immediately preceding new ones
PrevLogTerm int // term of prevLogIndex entry
Entries []Entry // log entries to store (empty for heartbeat; may send more than one for efficiency)
LeaderCommitted int // leader’s committedIndex
}
type RequestAppendEntriesReply struct {
Term int // currentTerm, for leader to update itself
Success bool // true if follower contained entry matching prevLogIndex and prevLogTerm
ConflictIndex int // conflict index for follower log and leader RequestAppendEntries args
}
type RequestInstallSnapshotArgs struct {
Term int // leader’s term
LeaderId int // so follower can redirect clients
LastIncludedIndex int // the snapshot replaces all entries up through and including this index
LastIncludedTerm int // term of lastIncludedIndex
//Offset int // byte offset where chunk is positioned in the snapshot file
Data []byte // raw bytes of the snapshot chunk, starting at offset
//Done bool // true if this is the last chunk
}
type RequestInstallSnapshotReply struct {
Term int // currentTerm, for leader to update itself
}
|
package errors
import (
"fmt"
)
type InvalidError struct{}
func (e *InvalidError) Error() string {
return fmt.Sprintf("Invalid or unsupported SFO Museum date string")
}
func Invalid() error {
return &InvalidError{}
}
func IsInvalid(e error) bool {
switch e.(type) {
case *InvalidError:
return true
default:
return false
}
}
|
package main
import (
"errors"
"github.com/smallfish/simpleyaml"
"io/ioutil"
"log"
"sort"
"strings"
)
// ReadYaml reads a file and parse it as YAML thanks to github.com/smallfish/simpleyaml
func ReadYaml(fileName string) (*simpleyaml.Yaml, error) {
file, err := ioutil.ReadFile(fileName)
if err != nil {
log.Fatalf("Err #%v ", err)
}
return simpleyaml.NewYaml(file)
}
// ParseDefinitions return a list of strings containing three columns , API , Definition and property
func ParseDefinitions(definitions map[string]string, yaml *simpleyaml.Yaml) ([][]string, error) {
rows := make([][]string, 1)
rows[0] = []string{"API", "Data_element", "property", "description"}
for path, definition := range definitions {
definition = strings.ReplaceAll(definition, "#/definitions/", "")
log.Printf("%s - %s", path, definition)
response, err := yaml.GetPath("definitions", definition, "properties").Map()
if err != nil {
return nil, err
}
keys := make([]string, 0, len(response))
for k := range response {
keys = append(keys, k.(string))
}
sort.Strings(keys)
for _, value := range keys {
descriptors := response[value]
description := ""
for key, value := range descriptors.(map[interface{}]interface{}) {
if key == "description" {
description = value.(string)
break
}
}
rows = append(rows, []string{path, definition, value, description})
}
}
return rows, nil
}
// ParseGetPaths return a list of paths and associated swagger definition
func ParseGetPaths(yaml *simpleyaml.Yaml) (map[string]string, error) {
if !yaml.Get("paths").IsFound() {
return nil, errors.New("paths not found")
}
paths, err := yaml.Get("paths").Map()
if err != nil {
return nil, err
}
getMap := map[string]string{}
for path, element := range paths {
methods := element.(map[interface{}]interface{})
for _, method := range methods {
details := method.(map[interface{}]interface{})
response := details["responses"]
if response == nil {
continue
}
status := response.(map[interface{}]interface{})[200]
if status == nil {
status = response.(map[interface{}]interface{})["200"]
}
// Check for int or string
if status == nil {
continue
}
success := status.(map[interface{}]interface{})["schema"]
if success == nil {
continue
}
schema := success.(map[interface{}]interface{})
var definition string
if schema["$ref"] == nil && schema["items"] != nil {
items := schema["items"].(map[interface{}]interface{})["$ref"]
definition = items.(string)
} else if schema["$ref"] != nil {
definition = schema["$ref"].(string)
} else {
continue
}
getMap[path.(string)] = definition
}
}
return getMap, nil
}
|
package usecase
import "github.com/jerolan/slack-poll/domain/entity"
func (uc *UseCase) UpsertPollAnswer(pollAnswer *entity.PollAnswer) error {
_, err := uc.pollService.GetPollByID(pollAnswer.PollID)
if err != nil {
return err
}
err = uc.pollService.CreatePollAnswer(pollAnswer)
if err != nil {
return err
}
return nil
}
|
package main
import (
"github.com/go-flutter-desktop/go-flutter"
"github.com/go-flutter-desktop/plugins/go-plugin-example/battery"
"github.com/go-flutter-desktop/plugins/go-plugin-example/complex"
)
var options = []flutter.Option{
flutter.WindowInitialDimensions(100, 100),
flutter.PopBehavior(flutter.PopBehaviorClose), // on SystemNavigator.pop() closes the app
flutter.AddPlugin(&battery.MyBatteryPlugin{}), // our wiki plugin
flutter.AddPlugin(&complex.Example{}), // another example
}
|
package handler
import (
"encoding/json"
"github.com/k1dan/string-encryptor/randomizer/encryptor"
string_creator "github.com/k1dan/string-encryptor/randomizer/string-creator"
"net/http"
"strconv"
)
type Number struct {
N string `json:"n"`
}
type Strings struct {
S []string `json:"s"`
}
func CreateStrings(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
var number Number
var strings Strings
_ = json.NewDecoder(r.Body).Decode(&number)
n, _ := strconv.Atoi(number.N)
for i:=0; i< n; i++ {
s := string_creator.RandStringRunes()
strings.S = append(strings.S, s)
}
str := strings.S
encr := encryptor.Encrypt(str)
json.NewEncoder(w).Encode(encr)
}
|
package reload
import (
"github.com/wudiliujie/common/log"
"io/ioutil"
"yxlserver/services/tb"
"yxlserver/services/tb/tbArea"
)
func Reload() {
data, errcode := ioutil.ReadFile("conf/cfg_data.json")
if errcode != nil {
log.Error("bReload %v", errcode)
return
}
tb.InitCfgData(data)
tbArea.Init()
}
|
package main
import "fmt"
func main() {
// Run this before returning.
defer func() {
str := recover()
fmt.Println(str)
}()
panic("should be recovered.")
fmt.Println("Unreachable code.")
}
|
package main
import (
"fmt"
"reflect"
)
type Person struct {
Name string
Age int
}
type Teacher struct {
Person
}
func (this *Person) GetName() string {
return this.Name
}
func main() {
t := &Teacher{}
p := &Person{Name: "cuihang", Age: 10}
t.Person = *p
fmt.Println(t.Person.Name)
v := reflect.ValueOf(p)
m := v.MethodByName("GetName")
input := make([]reflect.Value, 0)
output := m.Call(input)
name := output[0]
fmt.Println(name)
}
|
package golog
import (
"fmt"
"io"
"os"
)
// WriterLogger implements the Logger interface using different io.Writers for
// the different log levels.
type WriterLogger struct {
// BaseLogger is used to provide the basic functionality of a Logger
*BaseLogger
// FatalWriter is used to output FATAL level log messages
FatalWriter io.Writer
// ErrorWriter is used to output ERROR level log messages
ErrorWriter io.Writer
// WarnWriter is used to output WARN level log messages
WarnWriter io.Writer
// InfoWriter is used to output INFO level log messages
InfoWriter io.Writer
// DebugWriter is used to output DEBUG level log messages
DebugWriter io.Writer
}
// NewLogger creates a WriterLogger which outputs normal messages to
// stdout and error messages to stderr
func NewLogger(name string) *WriterLogger {
return &WriterLogger{
BaseLogger: NewBaseLogger(name),
FatalWriter: os.Stderr,
ErrorWriter: os.Stderr,
WarnWriter: os.Stderr,
InfoWriter: os.Stdout,
DebugWriter: os.Stdout,
}
}
// NewStdLogger calls NewLogger, kept for compatibility
func NewStdLogger(name string) *WriterLogger {
return NewLogger(name)
}
// NewWriterLogger creates a new WriterLogger
func NewWriterLogger(name string, fatalWriter io.Writer, errorWriter io.Writer,
warnWriter io.Writer, infoWriter io.Writer,
debugWriter io.Writer) *WriterLogger {
return &WriterLogger{
BaseLogger: NewBaseLogger(name),
FatalWriter: fatalWriter,
ErrorWriter: errorWriter,
WarnWriter: warnWriter,
InfoWriter: infoWriter,
DebugWriter: debugWriter,
}
}
// GetChild implements Logger.GetChild
func (l WriterLogger) GetChild(child string) Logger {
newName := fmt.Sprintf("%s.%s", l.name, child)
return NewWriterLogger(newName, l.FatalWriter, l.ErrorWriter,
l.WarnWriter, l.InfoWriter, l.DebugWriter)
}
func (l WriterLogger) Fatal(data ...interface{}) {
l.output(l.FatalWriter, FatalLevel, data...)
os.Exit(1)
}
func (l WriterLogger) Fatalf(format string, data ...interface{}) {
l.outputf(l.FatalWriter, FatalLevel, format, data...)
os.Exit(1)
}
func (l WriterLogger) Error(data ...interface{}) {
l.output(l.ErrorWriter, ErrorLevel, data...)
}
func (l WriterLogger) Errorf(format string, data ...interface{}) {
l.outputf(l.ErrorWriter, ErrorLevel, format, data...)
}
func (l WriterLogger) Warn(data ...interface{}) {
l.output(l.WarnWriter, WarnLevel, data...)
}
func (l WriterLogger) Warnf(format string, data ...interface{}) {
l.outputf(l.WarnWriter, WarnLevel, format, data...)
}
func (l WriterLogger) Info(data ...interface{}) {
l.output(l.InfoWriter, InfoLevel, data...)
}
func (l WriterLogger) Infof(format string, data ...interface{}) {
l.outputf(l.InfoWriter, InfoLevel, format, data...)
}
func (l WriterLogger) Debug(data ...interface{}) {
l.output(l.DebugWriter, DebugLevel, data...)
}
func (l WriterLogger) Debugf(format string, data ...interface{}) {
l.outputf(l.DebugWriter, DebugLevel, format, data...)
}
|
package config
import (
"encoding/json"
"log"
"os"
)
// Database 数据库配置对象
type Database struct {
Type string `json:"type"`
User string `json:"user"`
Password string `json:"password"`
Host string `json:"host"`
Port string `json:"port"`
Name string `json:"name"`
TablePrefix string `json:"table_prefix"`
}
// Config 配置对象
type Config struct {
Database *Database `json:"database"`
}
var (
// DatabaseSetting 数据库配置对象实例.
DatabaseSetting = &Database{}
// GlobalConfigSetting 配置实例.
GlobalConfigSetting = &Config{}
)
// GetConfig 读取配置
func GetConfig() {
confFile := "config.json"
filePtr, err := os.Open(confFile) // config的文件目录
if err != nil {
log.Fatalf("open config file from '%s' is error:%s\n", confFile, err.Error())
return
}
defer filePtr.Close()
// 创建json解码器
decoder := json.NewDecoder(filePtr)
err = decoder.Decode(GlobalConfigSetting)
DatabaseSetting = GlobalConfigSetting.Database
if err != nil {
log.Fatalf("decode config file error:%s\n", err.Error())
}
}
|
package main
import (
// "errors"
"fmt"
)
func main() {
ifController()
forController()
whileController()
forCollectionIterator()
switchController()
}
func ifController() {
if x := 1; x == 1 {
fmt.Println("x == 1, ", x)
} else if x == 2 {
fmt.Println("x == 2")
} else {
fmt.Println("x != 1 and x != 2")
}
}
func forController() {
sum := 1
for i:=1; i < 100; i++{
sum += i
}
fmt.Println(sum)
}
func whileController() {
sum := 1
i := 1
for i < 100{
sum += i
i++
}
fmt.Println(sum)
}
func forCollectionIterator() {
color := map[string]string{"1":"a", "2":"b"}
for k, v := range color {
fmt.Println(k,"=" ,v)
}
}
func switchController() {
i := 1
switch i {
case 0:
fmt.Println(0)
case 1:
fmt.Println(1)
// fallthrough
// 有了fallthrough,会执行后面的分支
default:
fmt.Println(-1)
}
}
|
package main
import (
"fmt"
"github.com/chidakiyo/benkyo/go-opai-codeegen-test/api"
"github.com/go-chi/chi/v5"
"net/http"
)
type Server struct{}
// Returns all pets
// (GET /pets)
func (s Server) FindPets(w http.ResponseWriter, r *http.Request, params api.FindPetsParams) {
fmt.Fprintf(w, "GET: /pets")
}
// Creates a new pet
// (POST /pets)
func (s Server) AddPet(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "POST: /pets")
}
// Deletes a pet by ID
// (DELETE /pets/{id})
func (s Server) DeletePet(w http.ResponseWriter, r *http.Request, id int64) {
fmt.Fprintf(w, "DELETE: /pets/{id}")
}
// Returns a pet by ID
// (GET /pets/{id})
func (s Server) FindPetByID(w http.ResponseWriter, r *http.Request, id int64) {
fmt.Fprintf(w, "GET: /pets/{id}")
}
var _ api.ServerInterface = Server{}
func main() {
r := chi.NewRouter()
sv := Server{}
handle := api.HandlerFromMux(sv, r)
http.ListenAndServe(":8080", handle)
}
|
/*
* Copyright IBM Corporation 2020, 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lib
import (
"io/ioutil"
"reflect"
"strings"
"github.com/konveyor/move2kube/internal/common"
"github.com/konveyor/move2kube/internal/k8sschema"
"github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/runtime/serializer"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PrintValidate - Print validate output
func PrintValidate(inputPath string) error {
codecs := serializer.NewCodecFactory(k8sschema.GetSchema())
filePaths, err := common.GetFilesByExt(inputPath, []string{".yml", ".yaml"})
if err != nil {
logrus.Errorf("Unable to fetch yaml files at path %q Error: %q", inputPath, err)
return err
}
for _, filePath := range filePaths {
data, err := ioutil.ReadFile(filePath)
if err != nil {
logrus.Debugf("Failed to read the yaml file at path %q Error: %q", filePath, err)
continue
}
docs, err := common.SplitYAML(data)
if err != nil {
logrus.Debugf("Failed to split the file at path %q into YAML documents. Error: %q", filePath, err)
continue
}
for _, doc := range docs {
obj, _, err := codecs.UniversalDeserializer().Decode(doc, nil, nil)
if err != nil {
continue
}
objectMeta := reflect.ValueOf(obj).Elem().FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta)
for k, v := range objectMeta.Annotations {
if strings.HasPrefix(k, common.TODOAnnotation) {
logrus.Infof("%s : %s", k, v)
}
}
}
}
return nil
}
|
package serve
import "net/http"
type dirWithIndexFallback struct {
dir http.Dir
}
func CreateDirWithIndexFallback(path string) http.FileSystem {
return dirWithIndexFallback{http.Dir(path)}
}
func (d dirWithIndexFallback) Open(name string) (http.File, error) {
file, err := d.dir.Open(name)
if err != nil {
return d.dir.Open("index.html")
}
return file, err
}
|
package operations
import (
"strconv"
"strings"
)
type FractionDestructured struct {
numerator int
denominator int
}
type BasicOperations struct {
}
func (bo BasicOperations) IntegerToFraction(element string) string {
partsFraction := strings.Split(element, "_")
if len(partsFraction) == 2 && len(partsFraction[1]) == 0 {
partsFraction = []string{partsFraction[0]}
}
if strings.Contains(partsFraction[0], "/") { // element ==> FRACTION (3/4, 1/2, etc)
return element
}
if len(partsFraction) == 1 { // element ==> INTEGER (5_, 666_, 999_)
return partsFraction[0]+"/1"
}
integer, _ := strconv.Atoi(partsFraction[0]) // element ==> INTEGER-FRACTION (5_3/4, 1_2/3, 124_234/233)
partsFraction = strings.Split(partsFraction[1], "/")
numerator, _ := strconv.Atoi(partsFraction[0])
denominator, _ := strconv.Atoi(partsFraction[1])
numerator = (integer * denominator) + numerator
newFraction := strconv.Itoa(numerator) + "/" + strconv.Itoa(denominator)
return newFraction
}
func (bo BasicOperations) Multiplication(element1 string, element2 string) string {
e1 := bo.fractionDestructuring(element1)
e2 := bo.fractionDestructuring(element2)
numerator := e1.numerator * e2.numerator
denominator := e1.denominator * e2.denominator
result := strconv.Itoa(numerator) + "/" + strconv.Itoa(denominator)
return result
}
func (bo BasicOperations) Division(element1 string, element2 string) string {
e1 := bo.fractionDestructuring(element1)
e2 := bo.fractionDestructuring(element2)
numerator := e1.numerator * e2.denominator
denominator := e1.denominator * e2.numerator
result := strconv.Itoa(numerator) + "/" + strconv.Itoa(denominator)
return result
}
func (bo BasicOperations) Addition(element1 string, element2 string) string {
e1 := bo.fractionDestructuring(element1)
e2 := bo.fractionDestructuring(element2)
numerator := (e1.numerator * e2.denominator) + (e1.denominator * e2.numerator)
denominator := e1.denominator * e2.denominator
result := strconv.Itoa(numerator) + "/" + strconv.Itoa(denominator)
return result
}
func (bo BasicOperations) Subtraction(element1 string, element2 string) string {
e1 := bo.fractionDestructuring(element1)
e2 := bo.fractionDestructuring(element2)
numerator := (e1.numerator * e2.denominator) - (e1.denominator * e2.numerator)
denominator := e1.denominator * e2.denominator
result := strconv.Itoa(numerator) + "/" + strconv.Itoa(denominator)
return result
}
func (bo BasicOperations) fractionDestructuring(element string) FractionDestructured {
partsFraction := strings.Split(element, "/")
numerator, _ := strconv.Atoi(partsFraction[0])
denominator, _ := strconv.Atoi(partsFraction[1])
if numerator < 0 && denominator < 0 {
numerator = bo.AbsoluteValue(numerator)
denominator = bo.AbsoluteValue(denominator)
}
return FractionDestructured{ numerator: numerator, denominator: denominator }
}
func (bo BasicOperations) Reduce(element string) string {
integer := 0
result := ""
e := bo.fractionDestructuring(element)
if (bo.AbsoluteValue(e.numerator) > bo.AbsoluteValue(e.denominator)) || (bo.AbsoluteValue(e.numerator) == bo.AbsoluteValue(e.denominator)) {
integer = int(e.numerator / e.denominator)
e.numerator = e.numerator - (integer * e.denominator)
result = strconv.Itoa(integer)
if e.numerator == 0 {
return result
}
result = result + "_"
}
divisor := bo.findDivisor(e)
e.numerator = e.numerator / divisor
e.denominator = e.denominator / divisor
result = result + strconv.Itoa(e.numerator) + "/" + strconv.Itoa(e.denominator)
return result
}
func (bo BasicOperations) AbsoluteValue(number int) int{
if number < 0 {
number = number * -1
}
return number
}
func (bo BasicOperations) findDivisor(element FractionDestructured) int {
small := element.numerator
if element.denominator < element.numerator{
small = element.denominator
}
result := 1
for i:=small; i>0; i--{
if (element.numerator%i == 0) && (element.denominator%i == 0) {
result = i
i = 0
break
}
}
return result
}
|
package client
import (
"encoding/base64"
"fmt"
"github.com/arunvelsriram/sftp-exporter/pkg/constants/viperkeys"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
"golang.org/x/crypto/ssh"
)
func parsePrivateKey(key, keyPassphrase []byte) (parsedKey ssh.Signer, err error) {
if len(keyPassphrase) > 0 {
log.Debug("key has passphrase")
parsedKey, err = ssh.ParsePrivateKeyWithPassphrase(key, keyPassphrase)
if err != nil {
log.WithField("when", "parsing encrypted ssh key").
Error("failed to parse key with passphrase")
return nil, err
}
return parsedKey, err
}
log.Debug("key has no passphrase")
parsedKey, err = ssh.ParsePrivateKey(key)
if err != nil {
log.WithField("when", "parsing ssh key").Error("failed to parse key")
return nil, err
}
return parsedKey, err
}
func sshAuthMethods() ([]ssh.AuthMethod, error) {
password := viper.GetString(viperkeys.SFTPPassword)
encodedKey := viper.GetString(viperkeys.SFTPKey)
key, err := base64.StdEncoding.DecodeString(encodedKey)
if err != nil {
return nil, err
}
keyPassphrase := []byte(viper.GetString(viperkeys.SFTPKeyPassphrase))
if len(password) > 0 && len(key) > 0 {
log.Debug("key and password are provided")
parsedKey, err := parsePrivateKey(key, keyPassphrase)
if err != nil {
log.WithField("when", "determining SSH authentication methods").Error(err)
return nil, err
}
return []ssh.AuthMethod{
ssh.PublicKeys(parsedKey),
ssh.Password(password),
}, nil
} else if len(password) > 0 {
log.Debug("password is provided")
return []ssh.AuthMethod{
ssh.Password(password),
}, nil
} else if len(key) > 0 {
log.Debug("key is provided")
parsedKey, err := parsePrivateKey(key, keyPassphrase)
if err != nil {
log.WithField("when", "determining SSH authentication methods").Error(err)
return nil, err
}
return []ssh.AuthMethod{
ssh.PublicKeys(parsedKey),
}, nil
}
log.Debug("both password and key are not provided")
return nil, fmt.Errorf("failed to determine the SSH authentication methods to use")
}
func NewSSHClient() (*ssh.Client, error) {
addr := fmt.Sprintf("%s:%d", viper.GetString(viperkeys.SFTPHost), viper.GetInt(viperkeys.SFTPPort))
auth, err := sshAuthMethods()
if err != nil {
log.WithField("when", "creating a SSH client").Error(err)
return nil, err
}
clientConfig := &ssh.ClientConfig{
User: viper.GetString(viperkeys.SFTPUser),
Auth: auth,
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
return ssh.Dial("tcp", addr, clientConfig)
}
|
package parsers
import (
"bufio"
. "github.com/ruxton/tracklist_parsers/data"
"github.com/ruxton/term"
"io"
"os"
"regexp"
)
func ParseBasicTracklist(bufReader *bufio.Reader) []Track {
var list []Track
regex,err := regexp.Compile(`([0-9]+)(?:.{1}?\s)(.+)(?:\s-\s)(.+)`)
if(err != nil) {
term.OutputError("Error matching strings")
os.Exit(2)
}
for line, _, err := bufReader.ReadLine(); err != io.EOF; line, _, err = bufReader.ReadLine() {
trackdata := regex.FindAllStringSubmatch(string(line),3)[0]
// trackdata := strings.SplitN(data[1], " - ", 2)
// t
if len(trackdata) != 4 {
term.OutputError("Error parsing track " + string(line))
term.OutputMessage("Please enter an artist for this track: ")
artist, err := term.STD_IN.ReadString('\n')
if err != nil {
term.OutputError("Incorrect artist entry.")
os.Exit(2)
}
term.OutputMessage("Please enter a name for this track: ")
track, err := term.STD_IN.ReadString('\n')
if err != nil {
term.OutputError("Incorrect track name entry.")
os.Exit(2)
}
trackdata = []string{"0", artist, track}
}
thistrack := new(Track)
thistrack.Artist = trackdata[2]
thistrack.Song = trackdata[3]
list = append(list, *thistrack)
}
return list
}
|
package crypto
import (
"bytes"
"crypto/ecdsa"
"crypto/rsa"
"encoding/hex"
"encoding/json"
"errors"
"io"
"io/ioutil"
"regexp"
"strings"
"time"
"golang.org/x/crypto/openpgp"
"golang.org/x/crypto/openpgp/armor"
pgperrors "golang.org/x/crypto/openpgp/errors"
"golang.org/x/crypto/openpgp/packet"
xrsa "golang.org/x/crypto/rsa"
armorUtils "github.com/ProtonMail/gopenpgp/armor"
"github.com/ProtonMail/gopenpgp/constants"
"github.com/ProtonMail/gopenpgp/models"
)
// A keypair contains a private key and a public key.
type pgpKeyObject struct {
ID string
Version int
Flags int
Fingerprint string
PublicKey string `json:",omitempty"`
PrivateKey string
Primary int
}
// PrivateKeyReader
func (ko *pgpKeyObject) PrivateKeyReader() io.Reader {
return strings.NewReader(ko.PrivateKey)
}
// Identity contains the name and the email of a key holder.
type Identity struct {
Name string
Email string
}
// Signature is be used to check a signature. Because the signature is checked
// when the reader is consumed, Signature must only be used after EOF has been
// seen. A signature is only valid if s.Err() returns nil, otherwise the
// sender's identity cannot be trusted.
type Signature struct {
md *openpgp.MessageDetails
}
// SignedString wraps string with a Signature
type SignedString struct {
String string
Signed *Signature
}
var errKeyringNotUnlocked = errors.New("gopenpgp: cannot sign message, key ring is not unlocked")
// Err returns a non-nil error if the signature is invalid.
func (s *Signature) Err() error {
return s.md.SignatureError
}
// KeyRing returns the key ring that was used to produce the signature, if
// available.
func (s *Signature) KeyRing() *KeyRing {
if s.md.SignedBy == nil {
return nil
}
return &KeyRing{
entities: openpgp.EntityList{s.md.SignedBy.Entity},
}
}
// IsBy returns true if the signature has been created by kr's owner.
func (s *Signature) IsBy(kr *KeyRing) bool {
// Use fingerprint if possible
if s.md.SignedBy != nil {
for _, e := range kr.entities {
if e.PrimaryKey.Fingerprint == s.md.SignedBy.PublicKey.Fingerprint {
return true
}
}
return false
}
for _, e := range kr.entities {
if e.PrimaryKey.KeyId == s.md.SignedByKeyId {
return true
}
}
return false
}
// KeyRing contains multiple private and public keys.
type KeyRing struct {
// PGP entities in this keyring.
entities openpgp.EntityList
// FirstKeyID as obtained from API to match salt
FirstKeyID string
}
// GetEntities returns openpgp entities contained in this KeyRing.
func (kr *KeyRing) GetEntities() openpgp.EntityList {
return kr.entities
}
// GetSigningEntity returns first private unlocked signing entity from keyring.
func (kr *KeyRing) GetSigningEntity(passphrase string) (*openpgp.Entity, error) {
var signEntity *openpgp.Entity
for _, e := range kr.entities {
// Entity.PrivateKey must be a signing key
if e.PrivateKey != nil {
if e.PrivateKey.Encrypted {
if err := e.PrivateKey.Decrypt([]byte(passphrase)); err != nil {
continue
}
}
signEntity = e
break
}
}
if signEntity == nil {
err := errors.New("gopenpgp: cannot sign message, unable to unlock signer key")
return signEntity, err
}
return signEntity, nil
}
// Encrypt encrypts data to this keyring's owner. If sign is not nil, it also
// signs data with it. The keyring sign must be unlocked to be able to sign data,
// if not an error will be returned.
func (kr *KeyRing) Encrypt(w io.Writer, sign *KeyRing, filename string, canonicalizeText bool) (io.WriteCloser, error) {
// The API returns keys sorted by descending priority
// Only encrypt to the first one
var encryptEntities []*openpgp.Entity
for _, e := range kr.entities {
encryptEntities = append(encryptEntities, e)
break
}
var signEntity *openpgp.Entity
if sign != nil {
// To sign a message, the private key must be decrypted
for _, e := range sign.entities {
// Entity.PrivateKey must be a signing key
if e.PrivateKey != nil && !e.PrivateKey.Encrypted {
signEntity = e
break
}
}
if signEntity == nil {
return nil, errKeyringNotUnlocked
}
}
return EncryptCore(
w,
encryptEntities,
signEntity,
filename,
canonicalizeText,
func() time.Time { return GetGopenPGP().GetTime() })
}
// EncryptCore is lower-level encryption method used by KeyRing.Encrypt.
func EncryptCore(w io.Writer, encryptEntities []*openpgp.Entity, signEntity *openpgp.Entity, filename string,
canonicalizeText bool, timeGenerator func() time.Time) (io.WriteCloser, error) {
config := &packet.Config{DefaultCipher: packet.CipherAES256, Time: timeGenerator}
hints := &openpgp.FileHints{
IsBinary: !canonicalizeText,
FileName: filename,
}
if canonicalizeText {
return openpgp.EncryptText(w, encryptEntities, signEntity, hints, config)
}
return openpgp.Encrypt(w, encryptEntities, signEntity, hints, config)
}
// An io.WriteCloser that both encrypts and armors data.
type armorEncryptWriter struct {
aw io.WriteCloser // Armored writer
ew io.WriteCloser // Encrypted writer
}
// Write encrypted data
func (w *armorEncryptWriter) Write(b []byte) (n int, err error) {
return w.ew.Write(b)
}
// Close armor and encryption io.WriteClose
func (w *armorEncryptWriter) Close() (err error) {
if err = w.ew.Close(); err != nil {
return
}
err = w.aw.Close()
return
}
// EncryptArmored encrypts and armors data to the keyring's owner.
// Wrapper of Encrypt.
func (kr *KeyRing) EncryptArmored(w io.Writer, sign *KeyRing) (wc io.WriteCloser, err error) {
aw, err := armorUtils.ArmorWithTypeBuffered(w, constants.PGPMessageHeader)
if err != nil {
return
}
ew, err := kr.Encrypt(aw, sign, "", false)
if err != nil {
aw.Close()
return
}
wc = &armorEncryptWriter{aw: aw, ew: ew}
return
}
// EncryptMessage encrypts and armors a string to the keyring's owner.
// Wrapper of Encrypt.
func (kr *KeyRing) EncryptMessage(s string, sign *KeyRing) (encrypted string, err error) {
var b bytes.Buffer
w, err := kr.EncryptArmored(&b, sign)
if err != nil {
return
}
if _, err = w.Write([]byte(s)); err != nil {
return
}
if err = w.Close(); err != nil {
return
}
encrypted = b.String()
return
}
// EncryptSymmetric data using generated symmetric key encrypted with this KeyRing.
// Wrapper of Encrypt.
func (kr *KeyRing) EncryptSymmetric(textToEncrypt string, canonicalizeText bool) (outSplit *models.EncryptedSplit,
err error) {
var encryptedWriter io.WriteCloser
buffer := &bytes.Buffer{}
if encryptedWriter, err = kr.Encrypt(buffer, kr, "msg.txt", canonicalizeText); err != nil {
return
}
if _, err = io.Copy(encryptedWriter, bytes.NewBufferString(textToEncrypt)); err != nil {
return
}
encryptedWriter.Close()
if outSplit, err = SeparateKeyAndData(kr, buffer, len(textToEncrypt), -1); err != nil {
return
}
return
}
// DecryptMessage decrypts an armored string sent to the keypair's owner.
// If error is errors.ErrSignatureExpired (from golang.org/x/crypto/openpgp/errors),
// contents are still provided if library clients wish to process this message further.
func (kr *KeyRing) DecryptMessage(encrypted string) (SignedString, error) {
r, signed, err := kr.DecryptArmored(strings.NewReader(encrypted))
if err != nil && err != pgperrors.ErrSignatureExpired {
return SignedString{String: encrypted, Signed: nil}, err
}
b, err := ioutil.ReadAll(r)
if err != nil && err != pgperrors.ErrSignatureExpired {
return SignedString{String: encrypted, Signed: nil}, err
}
s := string(b)
return SignedString{String: s, Signed: signed}, nil
}
// DecryptMessageIfNeeded data if has armored PGP message format, if not return original data.
// If error is errors.ErrSignatureExpired (from golang.org/x/crypto/openpgp/errors),
// contents are still provided if library clients wish to process this message further.
func (kr *KeyRing) DecryptMessageIfNeeded(data string) (decrypted string, err error) {
if re := regexp.MustCompile("^-----BEGIN " + constants.PGPMessageHeader + "-----(?s:.+)-----END " +
constants.PGPMessageHeader + "-----"); re.MatchString(data) {
var signed SignedString
signed, err = kr.DecryptMessage(data)
decrypted = signed.String
} else {
decrypted = data
}
return
}
// Unlock tries to unlock as many keys as possible with the following password. Note
// that keyrings can contain keys locked with different passwords, and thus
// err == nil does not mean that all keys have been successfully decrypted.
// If err != nil, the password is wrong for every key, and err is the last error
// encountered.
func (kr *KeyRing) Unlock(passphrase []byte) error {
// Build a list of keys to decrypt
var keys []*packet.PrivateKey
for _, e := range kr.entities {
// Entity.PrivateKey must be a signing key
if e.PrivateKey != nil {
keys = append(keys, e.PrivateKey)
}
// Entity.Subkeys can be used for encryption
for _, subKey := range e.Subkeys {
if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage ||
subKey.Sig.FlagEncryptCommunications) {
keys = append(keys, subKey.PrivateKey)
}
}
}
if len(keys) == 0 {
return errors.New("gopenpgp: cannot unlock key ring, no private key available")
}
var err error
var n int
for _, key := range keys {
if !key.Encrypted {
continue // Key already decrypted
}
if err = key.Decrypt(passphrase); err == nil {
n++
}
}
if n == 0 {
return err
}
return nil
}
// Decrypt decrypts a message sent to the keypair's owner. If the message is not
// signed, signed will be nil.
// If error is errors.ErrSignatureExpired (from golang.org/x/crypto/openpgp/errors),
// contents are still provided if library clients wish to process this message further.
func (kr *KeyRing) Decrypt(r io.Reader) (decrypted io.Reader, signed *Signature, err error) {
md, err := openpgp.ReadMessage(r, kr.entities, nil, nil)
if err != nil && err != pgperrors.ErrSignatureExpired {
return
}
decrypted = md.UnverifiedBody
if md.IsSigned {
signed = &Signature{md}
}
return
}
// DecryptArmored decrypts an armored message sent to the keypair's owner.
// If error is errors.ErrSignatureExpired (from golang.org/x/crypto/openpgp/errors),
// contents are still provided if library clients wish to process this message further.
func (kr *KeyRing) DecryptArmored(r io.Reader) (decrypted io.Reader, signed *Signature, err error) {
block, err := armor.Decode(r)
if err != nil && err != pgperrors.ErrSignatureExpired {
return
}
if block.Type != constants.PGPMessageHeader {
err = errors.New("gopenpgp: not an armored PGP message")
return
}
return kr.Decrypt(block.Body)
}
// WriteArmoredPublicKey outputs armored public keys from the keyring to w.
func (kr *KeyRing) WriteArmoredPublicKey(w io.Writer) (err error) {
aw, err := armor.Encode(w, openpgp.PublicKeyType, nil)
if err != nil {
return
}
for _, e := range kr.entities {
if err = e.Serialize(aw); err != nil {
aw.Close()
return
}
}
err = aw.Close()
return
}
// GetArmoredPublicKey returns the armored public keys from this keyring.
func (kr *KeyRing) GetArmoredPublicKey() (s string, err error) {
b := &bytes.Buffer{}
if err = kr.WriteArmoredPublicKey(b); err != nil {
return
}
s = b.String()
return
}
// WritePublicKey outputs unarmored public keys from the keyring to w.
func (kr *KeyRing) WritePublicKey(w io.Writer) (err error) {
for _, e := range kr.entities {
if err = e.Serialize(w); err != nil {
return
}
}
return
}
// GetPublicKey returns the unarmored public keys from this keyring.
func (kr *KeyRing) GetPublicKey() (b []byte, err error) {
var outBuf bytes.Buffer
if err = kr.WritePublicKey(&outBuf); err != nil {
return
}
b = outBuf.Bytes()
return
}
// GetFingerprint gets the fingerprint from the keyring.
func (kr *KeyRing) GetFingerprint() (string, error) {
for _, entity := range kr.entities {
fp := entity.PrimaryKey.Fingerprint
return hex.EncodeToString(fp[:]), nil
}
return "", errors.New("can't find public key")
}
// CheckPassphrase checks if private key passphrase is correct for every sub key.
func (kr *KeyRing) CheckPassphrase(passphrase string) bool {
var keys []*packet.PrivateKey
for _, entity := range kr.entities {
keys = append(keys, entity.PrivateKey)
}
var decryptError error
var n int
for _, key := range keys {
if !key.Encrypted {
continue // Key already decrypted
}
if decryptError = key.Decrypt([]byte(passphrase)); decryptError == nil {
n++
}
}
return n != 0
}
// readFrom reads unarmored and armored keys from r and adds them to the keyring.
func (kr *KeyRing) readFrom(r io.Reader, armored bool) error {
var err error
var entities openpgp.EntityList
if armored {
entities, err = openpgp.ReadArmoredKeyRing(r)
} else {
entities, err = openpgp.ReadKeyRing(r)
}
for _, entity := range entities {
if entity.PrivateKey != nil {
switch entity.PrivateKey.PrivateKey.(type) {
// TODO: type mismatch after crypto lib update, fix this:
case *rsa.PrivateKey:
entity.PrimaryKey = packet.NewRSAPublicKey(
time.Now(),
entity.PrivateKey.PrivateKey.(*rsa.PrivateKey).Public().(*xrsa.PublicKey))
case *ecdsa.PrivateKey:
entity.PrimaryKey = packet.NewECDSAPublicKey(
time.Now(),
entity.PrivateKey.PrivateKey.(*ecdsa.PrivateKey).Public().(*ecdsa.PublicKey))
}
}
for _, subkey := range entity.Subkeys {
if subkey.PrivateKey != nil {
switch subkey.PrivateKey.PrivateKey.(type) {
case *rsa.PrivateKey:
subkey.PublicKey = packet.NewRSAPublicKey(
time.Now(),
subkey.PrivateKey.PrivateKey.(*rsa.PrivateKey).Public().(*xrsa.PublicKey))
case *ecdsa.PrivateKey:
subkey.PublicKey = packet.NewECDSAPublicKey(
time.Now(),
subkey.PrivateKey.PrivateKey.(*ecdsa.PrivateKey).Public().(*ecdsa.PublicKey))
}
}
}
}
if err != nil {
return err
}
if len(entities) == 0 {
return errors.New("gopenpgp: key ring doesn't contain any key")
}
kr.entities = append(kr.entities, entities...)
return nil
}
// BuildKeyRing reads keyring from binary data
func (pgp *GopenPGP) BuildKeyRing(binKeys []byte) (kr *KeyRing, err error) {
kr = &KeyRing{}
entriesReader := bytes.NewReader(binKeys)
err = kr.readFrom(entriesReader, false)
return
}
// BuildKeyRingNoError does not return error on fail
func (pgp *GopenPGP) BuildKeyRingNoError(binKeys []byte) (kr *KeyRing) {
kr, _ = pgp.BuildKeyRing(binKeys)
return
}
// BuildKeyRingArmored reads armored string and returns keyring
func (pgp *GopenPGP) BuildKeyRingArmored(key string) (kr *KeyRing, err error) {
keyRaw, err := armorUtils.Unarmor(key)
if err != nil {
return nil, err
}
keyReader := bytes.NewReader(keyRaw)
keyEntries, err := openpgp.ReadKeyRing(keyReader)
return &KeyRing{entities: keyEntries}, err
}
// UnmarshalJSON implements encoding/json.Unmarshaler.
func (kr *KeyRing) UnmarshalJSON(b []byte) (err error) {
kr.entities = nil
keyObjs := []pgpKeyObject{}
if err = json.Unmarshal(b, &keyObjs); err != nil {
return
}
if len(keyObjs) == 0 {
return
}
for i, ko := range keyObjs {
if i == 0 {
kr.FirstKeyID = ko.ID
}
err = kr.readFrom(ko.PrivateKeyReader(), true)
if err != nil {
return err
}
}
return nil
}
// Identities returns the list of identities associated with this key ring.
func (kr *KeyRing) Identities() []*Identity {
var identities []*Identity
for _, e := range kr.entities {
for _, id := range e.Identities {
identities = append(identities, &Identity{
Name: id.UserId.Name,
Email: id.UserId.Email,
})
}
}
return identities
}
// KeyIds returns array of IDs of keys in this KeyRing.
func (kr *KeyRing) KeyIds() []uint64 {
var res []uint64
for _, e := range kr.entities {
res = append(res, e.PrimaryKey.KeyId)
}
return res
}
// ReadArmoredKeyRing reads an armored data into keyring.
func ReadArmoredKeyRing(r io.Reader) (kr *KeyRing, err error) {
kr = &KeyRing{}
err = kr.readFrom(r, true)
return
}
// ReadKeyRing reads an binary data into keyring.
func ReadKeyRing(r io.Reader) (kr *KeyRing, err error) {
kr = &KeyRing{}
err = kr.readFrom(r, false)
return
}
// FilterExpiredKeys takes a given KeyRing list and it returns only those
// KeyRings which contain at least, one unexpired Key. It returns only unexpired
// parts of these KeyRings.
func FilterExpiredKeys(contactKeys []*KeyRing) (filteredKeys []*KeyRing, err error) {
now := time.Now()
hasExpiredEntity := false
filteredKeys = make([]*KeyRing, 0)
for _, contactKeyRing := range contactKeys {
keyRingHasUnexpiredEntity := false
keyRingHasTotallyExpiredEntity := false
for _, entity := range contactKeyRing.GetEntities() {
hasExpired := false
hasUnexpired := false
for _, subkey := range entity.Subkeys {
if subkey.PublicKey.KeyExpired(subkey.Sig, now) {
hasExpired = true
} else {
hasUnexpired = true
}
}
if hasExpired && !hasUnexpired {
keyRingHasTotallyExpiredEntity = true
} else if hasUnexpired {
keyRingHasUnexpiredEntity = true
}
}
if keyRingHasUnexpiredEntity {
filteredKeys = append(filteredKeys, contactKeyRing)
} else if keyRingHasTotallyExpiredEntity {
hasExpiredEntity = true
}
}
if len(filteredKeys) == 0 && hasExpiredEntity {
return filteredKeys, errors.New("all contacts keys are expired")
}
return filteredKeys, nil
}
|
// Copyright © 2020. All rights reserved.
// Author: Ilya Stroy.
// Contacts: qioalice@gmail.com, https://github.com/qioalice
// License: https://opensource.org/licenses/MIT
package ekalog
// -----
// Integrator is the why this package called 'logintegro' earlier.
// The main idea is: "You integrate your log messages with your destination service".
// -----
// Integrator is an interface each type wants to convert log's Entry to some
// real output shall implement.
//
// E.g. If you want to use this package and writes all log's entries to your
// own service declare, define your type and implement Integrator interface
// and reg then it later using 'Logger.Apply' method or 'ApplyThis' func.
// But you also can use any of predefined basic integrators which cover 99% cases.
type Integrator interface {
// Write writes log entry to some destination (integrator determines
// what it will be). Thus, Write does the main thing of Integrator:
// "Integrates your log messages with your log destination service".
Write(entry *Entry)
// MinLevelEnabled returns minimum log's Level an integrator will handle
// log entries with.
// E.g. if minimum level is 'Warning', 'Debug' logs will be dropped.
MinLevelEnabled() Level
// MinLevelForStackTrace must return a minimum level starting with a stacktrace
// must be generated and added to the Logger's Entry only if it's not presented
// yet by attached ekaerr.Error object.
MinLevelForStackTrace() Level
// Sync flushes all pending log entries to integrator destination.
// It useful when integrator does async work and sometimes you need to make sure
// all pending entries are flushed.
//
// Logger type has the same name's method that just calls this method.
Sync() error
// IsAsync must return whether Integrator async or not.
// It's very important for internal parts (GC).
//
// If you not sure, return 'true'. It's more secure but slower.
IsAsync() bool
}
|
package integration
import (
"fmt"
"net/http"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Sender", func() {
Describe("Event", func() {
It("should receive event", func() {
startDate := time.Now()
title := fmt.Sprintf("Test Send Event %d", startDate.Unix())
payload := Event{
Title: title,
Description: "Test description",
UserID: user.ID,
StartDate: startDate,
EndDate: startDate.Add(time.Hour),
NotifiedFor: 1,
}
statusCode, event, err := ProcessEvent(EventsURL, http.MethodPost, payload)
Expect(err).NotTo(HaveOccurred())
Expect(statusCode).To(Equal(200))
Expect(user.ID).To(Equal(user.ID))
Expect(event.Title).To(Equal(title))
Expect(event.Status).To(Equal("New"))
// waiting for event processing
time.Sleep(time.Duration(10 * time.Second))
statusCode, result, err := ProcessEvents(fmt.Sprintf("%s/1/%d", EventsURL, startDate.Unix()), http.MethodGet, nil)
Expect(err).NotTo(HaveOccurred())
Expect(statusCode).To(Equal(200))
event.Status = "Sent"
Expect(result.Events).To(ContainElement(event))
})
})
})
|
/*
GTVM is Golang Tools Version Manager
Manage Golang versions and LiteIDE versions install/uninstall
Basics
All configs, archives, installed tools stored in $HOME/.gtvm in Linux and %USERPROFILE%\.gtvm in Windows
*/
package main
import (
"database/sql"
"fmt"
"os"
"os/user"
"path/filepath"
"github.com/PuerkitoBio/goquery"
_ "github.com/mattn/go-sqlite3"
)
const (
gtvmDirName = ".gtvm"
golang = "go"
archives = "archives"
liteide = "liteide"
gtvmStorage = "gtvmStorage.db"
urlSF = "http://sourceforge.net"
urlLiteIDE = "http://sourceforge.net/projects/liteide/files/"
urlLiteIDEDownload = "http://downloads.sourceforge.net/liteide/"
urlGoLang = "https://golang.org/dl/"
)
var (
ps = string(filepath.Separator) // Separator
goVersions []goVer // Store Go versions info
liteIDEVersions []liteIDEVer // Store LiteIDE versions info
liteIDEfileList []liteIDEfile // Store LiteIDE files info
doc *goquery.Document
err error
db *sql.DB
stmt *sql.Stmt
gtvmDir = ""
archivesDir = ""
liteideDir = ""
golangDir = ""
)
func init() {
usr, _ := user.Current()
gtvmDir = usr.HomeDir + ps + gtvmDirName
archivesDir = gtvmDir + ps + archives
golangDir = gtvmDir + ps + golang
liteideDir = gtvmDir + ps + liteide
createWorkDirs()
gvmwd, err := os.Stat(gtvmDir)
if err != nil {
fmt.Println(err)
}
if !gvmwd.IsDir() {
fmt.Println("Go Tools Version Manager working destination is not a directory")
os.Exit(1)
}
if _, err = os.Stat(gtvmDir + ps + gtvmStorage); os.IsNotExist(err) {
firstStart()
} else {
// os.Remove(gtvmDir + ps + gtvmStorage)
db = getDB()
}
}
func main() {
parseCmdLine()
defer db.Close()
}
func getDB() *sql.DB {
if db == nil {
db, err = sql.Open("sqlite3", gtvmDir+ps+gtvmStorage)
if err != nil {
panic(err)
}
}
return db
}
|
package structil_test
import (
"fmt"
"strconv"
"testing"
"github.com/google/go-cmp/cmp"
. "github.com/goldeneggg/structil"
)
type (
FinderTestStruct struct {
Byte byte
Bytes []byte
Int int
Int64 int64
Uint uint
Uint64 uint64
Float32 float32
Float64 float64
String string
Stringptr *string
Stringslice []string
Bool bool
Map map[string]interface{}
Func func(string) interface{}
ChInt chan int
privateString string
FinderTestStruct2
FinderTestStruct2Ptr *FinderTestStruct2
FinderTestStruct4Slice []FinderTestStruct4
FinderTestStruct4PtrSlice []*FinderTestStruct4
}
FinderTestStruct2 struct {
String string
*FinderTestStruct3
}
FinderTestStruct3 struct {
String string
Int int
}
FinderTestStruct4 struct {
String string
String2 string
}
)
var (
finderTestString2 = "test name2"
finderTestFunc = func(s string) interface{} { return s + "-func" }
finderTestChan = make(chan int)
)
func newFinderTestStruct() FinderTestStruct {
return FinderTestStruct{
Byte: 0x61,
Bytes: []byte{0x00, 0xFF},
Int: int(-2),
Int64: int64(-1),
Uint: uint(2),
Uint64: uint64(1),
Float32: float32(-1.23),
Float64: float64(-3.45),
String: "test name",
Stringptr: &finderTestString2,
Stringslice: []string{"strslice1", "strslice2"},
Bool: true,
Map: map[string]interface{}{"k1": "v1", "k2": 2},
Func: finderTestFunc,
ChInt: finderTestChan,
privateString: "unexported string",
FinderTestStruct2: FinderTestStruct2{
String: "struct2 string",
FinderTestStruct3: &FinderTestStruct3{
String: "struct3 string",
Int: -123,
},
},
FinderTestStruct2Ptr: &FinderTestStruct2{
String: "struct2 string ptr",
FinderTestStruct3: &FinderTestStruct3{
String: "struct3 string ptr",
Int: -456,
},
},
FinderTestStruct4Slice: []FinderTestStruct4{
{
String: "key100",
String2: "value100",
},
{
String: "key200",
String2: "value200",
},
},
FinderTestStruct4PtrSlice: []*FinderTestStruct4{
{
String: "key991",
String2: "value991",
},
{
String: "key992",
String2: "value992",
},
},
}
}
func newFinderTestStructPtr() *FinderTestStruct {
ts := newFinderTestStruct()
return &ts
}
func TestNewFinder(t *testing.T) {
t.Parallel()
type args struct {
i interface{}
}
tests := []struct {
name string
args args
wantError bool
}{
{
name: "NewFinder with valid struct",
args: args{i: newFinderTestStruct()},
wantError: false,
},
{
name: "NewFinder with valid struct ptr",
args: args{i: newFinderTestStructPtr()},
wantError: false,
},
{
name: "NewFinder with string",
args: args{i: "string"},
wantError: true,
},
{
name: "NewFinder with nil",
args: args{i: nil},
wantError: true,
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := NewFinder(tt.args.i)
if err == nil {
if tt.wantError {
t.Errorf("NewFinder() error did not occur. got: %v", got)
return
}
} else if !tt.wantError {
t.Errorf("NewFinder() unexpected error [%v] occurred. wantError: %v", err, tt.wantError)
}
})
}
}
func TestNewFinderWithGetterAndSep(t *testing.T) {
t.Parallel()
g, err := NewGetter(newFinderTestStructPtr())
if err != nil {
t.Errorf("NewGetter() error = %v", err)
}
type args struct {
g *Getter
sep string
}
tests := []struct {
name string
args args
wantError bool
}{
{
name: "NewFinderWithGetterAndSep with valid sep",
args: args{g: g, sep: ":"},
wantError: false,
},
{
name: "NewFinderWithGetterAndSep with empty sep",
args: args{g: g, sep: ""},
wantError: true,
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
got, err := NewFinderWithGetterAndSep(tt.args.g, tt.args.sep)
if err == nil {
if tt.wantError {
t.Errorf("NewFinderWithGetterAndSep() error did not occur. got: %v", got)
return
}
} else if !tt.wantError {
t.Errorf("NewFinderWithGetterAndSep() unexpected error [%v] occurred. wantError %v", err, tt.wantError)
}
})
}
}
func TestFinderToMap(t *testing.T) {
t.Parallel()
var f *Finder
var err error
fs := make([]*Finder, 10)
for i := 0; i < len(fs); i++ {
f, err = NewFinder(newFinderTestStructPtr())
if err != nil {
t.Errorf("NewFinder() error = %v", err)
return
}
fs[i] = f
}
fsep, err := NewFinderWithSep(newFinderTestStructPtr(), ":")
if err != nil {
t.Errorf("NewFinderWithSep() error = %v", err)
return
}
type args struct {
chain *Finder
}
tests := []struct {
name string
args args
wantError bool
wantErrorString string
wantMap map[string]interface{}
wantNestedMap map[string]interface{} // FIXME: not implemented ToNestedMap method yet
cmpopts []cmp.Option
}{
{
name: "with toplevel find chain",
args: args{
chain: fs[0].
Find(
"Int64",
"Float64",
"String",
"Stringptr",
"Stringslice",
"Bool",
"Map",
//"Func",
"ChInt",
"privateString",
"FinderTestStruct2",
"FinderTestStruct2Ptr",
"FinderTestStruct4Slice",
"FinderTestStruct4PtrSlice",
),
},
wantMap: map[string]interface{}{
"Int64": int64(-1),
"Float64": float64(-3.45),
"String": "test name",
"Stringptr": finderTestString2,
"Stringslice": []string{"strslice1", "strslice2"},
"Bool": true,
"Map": map[string]interface{}{"k1": "v1", "k2": 2},
//"Func": finderTestFunc, // TODO: func is fail
"ChInt": finderTestChan,
"privateString": nil, // unexported field is nil
"FinderTestStruct2": FinderTestStruct2{
String: "struct2 string",
FinderTestStruct3: &FinderTestStruct3{String: "struct3 string", Int: -123},
},
"FinderTestStruct2Ptr": FinderTestStruct2{ // not ptr
String: "struct2 string ptr",
FinderTestStruct3: &FinderTestStruct3{String: "struct3 string ptr", Int: -456},
},
"FinderTestStruct4Slice": []FinderTestStruct4{
{String: "key100", String2: "value100"},
{String: "key200", String2: "value200"},
},
"FinderTestStruct4PtrSlice": []*FinderTestStruct4{
{String: "key991", String2: "value991"},
{String: "key992", String2: "value992"},
},
},
},
{
name: "with single-nest chain",
args: args{
chain: fs[1].
Into("FinderTestStruct2").Find("String"),
},
wantMap: map[string]interface{}{
"FinderTestStruct2.String": "struct2 string",
},
wantNestedMap: map[string]interface{}{
"FinderTestStruct2": map[string]interface{}{
"String": "struct2 string",
},
},
},
{
name: "with two-nest chain",
args: args{
chain: fs[2].
Into("FinderTestStruct2Ptr", "FinderTestStruct3").Find("String", "Int"),
},
wantMap: map[string]interface{}{
"FinderTestStruct2Ptr.FinderTestStruct3.String": "struct3 string ptr",
"FinderTestStruct2Ptr.FinderTestStruct3.Int": int(-456),
},
wantNestedMap: map[string]interface{}{
"FinderTestStruct2Ptr": map[string]interface{}{
"FinderTestStruFinderTestStruct3ct2Ptr": map[string]interface{}{
"String": "struct3 string ptr",
"Int": int(-456),
},
},
},
},
{
name: "with multi nest chains",
args: args{
chain: fs[3].
Into("FinderTestStruct2").Find("String").
Into("FinderTestStruct2Ptr").Find("String").
Into("FinderTestStruct2Ptr", "FinderTestStruct3").Find("String", "Int"),
},
wantMap: map[string]interface{}{
"FinderTestStruct2.String": "struct2 string",
"FinderTestStruct2Ptr.String": "struct2 string ptr",
"FinderTestStruct2Ptr.FinderTestStruct3.String": "struct3 string ptr",
"FinderTestStruct2Ptr.FinderTestStruct3.Int": int(-456),
},
wantNestedMap: map[string]interface{}{
"FinderTestStruct2": map[string]interface{}{
"String": "struct2 string",
"FinderTestStruct2Ptr": map[string]interface{}{
"String": "struct2 string ptr",
"FinderTestStruct3": map[string]interface{}{
"String": "struct3 string ptr",
"Int": int(-456),
},
},
},
},
},
{
name: "with Find with non-existed name",
args: args{
chain: fs[4].Find("NonExist"),
},
wantError: true,
wantErrorString: "field name [NonExist] does not exist in getter",
},
{
name: "with Find with existed and non-existed names",
args: args{
chain: fs[5].Find("String", "NonExist"),
},
wantError: true,
wantErrorString: "field name [NonExist] does not exist in getter",
},
{
name: "with Struct with non-existed name",
args: args{
chain: fs[6].Into("NonExist").Find("String"),
},
wantError: true,
wantErrorString: "error Into() key [NonExist]: name [NonExist] does not exist",
},
{
name: "with Struct with existed name and Find with non-existed name",
args: args{
chain: fs[7].Into("FinderTestStruct2").Find("NonExist"),
},
wantError: true,
wantErrorString: "field name [NonExist] does not exist in getter",
},
{
name: "with Struct with existed and non-existed name and Find",
args: args{
chain: fs[8].
Into("FinderTestStruct2").Find("String").
Into("FinderTestStruct2", "NonExist").Find("String"),
},
wantError: true,
wantErrorString: "error Into() key [FinderTestStruct2.NonExist]: name [NonExist] does not exist",
},
{
name: "with multi nest chains separated by assigned sep",
args: args{
chain: fsep.
Into("FinderTestStruct2").Find("String").
Into("FinderTestStruct2Ptr").Find("String").
Into("FinderTestStruct2Ptr", "FinderTestStruct3").Find("String", "Int"),
},
wantMap: map[string]interface{}{
"FinderTestStruct2:String": "struct2 string",
"FinderTestStruct2Ptr:String": "struct2 string ptr",
"FinderTestStruct2Ptr:FinderTestStruct3:String": "struct3 string ptr",
"FinderTestStruct2Ptr:FinderTestStruct3:Int": int(-456),
},
},
{
name: "with toplevel and multi-nest find chain using FindTop",
args: args{
chain: fs[9].
FindTop(
"Int64",
"Float64",
"String",
"Stringptr",
"Stringslice",
"Bool",
"Map",
//"Func",
"ChInt",
"privateString",
"FinderTestStruct2",
"FinderTestStruct2Ptr",
"FinderTestStruct4Slice",
"FinderTestStruct4PtrSlice",
).
Into("FinderTestStruct2Ptr").Find("String").
Into("FinderTestStruct2Ptr", "FinderTestStruct3").Find("String", "Int"),
},
wantMap: map[string]interface{}{
"Int64": int64(-1),
"Float64": float64(-3.45),
"String": "test name",
"Stringptr": finderTestString2,
"Stringslice": []string{"strslice1", "strslice2"},
"Bool": true,
"Map": map[string]interface{}{"k1": "v1", "k2": 2},
//"Func": finderTestFunc, // TODO: func is fail
"ChInt": finderTestChan,
"privateString": nil, // unexported field is nil
"FinderTestStruct2": FinderTestStruct2{
String: "struct2 string",
FinderTestStruct3: &FinderTestStruct3{String: "struct3 string", Int: -123},
},
"FinderTestStruct2Ptr": FinderTestStruct2{ // not ptr
String: "struct2 string ptr",
FinderTestStruct3: &FinderTestStruct3{String: "struct3 string ptr", Int: -456},
},
"FinderTestStruct4Slice": []FinderTestStruct4{
{String: "key100", String2: "value100"},
{String: "key200", String2: "value200"},
},
"FinderTestStruct4PtrSlice": []*FinderTestStruct4{
{String: "key991", String2: "value991"},
{String: "key992", String2: "value992"},
},
"FinderTestStruct2Ptr.String": "struct2 string ptr",
"FinderTestStruct2Ptr.FinderTestStruct3.String": "struct3 string ptr",
"FinderTestStruct2Ptr.FinderTestStruct3.Int": int(-456),
},
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
// FIXME: comment out t.Parallel() because of race condition
//t.Parallel()
got, err := tt.args.chain.ToMap()
if err == nil {
if tt.wantError {
t.Errorf("error does not occur. got: %v", got)
return
}
if got == nil {
t.Errorf("result is nil %v", got)
return
}
for k, wv := range tt.wantMap {
gv, ok := got[k]
if ok {
if d := cmp.Diff(gv, wv, tt.cmpopts...); d != "" {
t.Errorf("key: %s, gotMap: %+v, (-got +want)\n%s", k, got, d)
return
}
} else {
t.Errorf("ok: %v, key: %s, gotValue: [%v], wantValue: [%v], gotMap: %+v, ", ok, k, gv, wv, got)
return
}
}
} else {
if tt.args.chain.HasError() && tt.wantError {
if d := cmp.Diff(err.Error(), tt.wantErrorString); d != "" {
t.Errorf("error string is unmatch. (-got +want)\n%s", d)
return
}
tt.args.chain.Reset()
if tt.args.chain.HasError() {
t.Errorf("Reset() does not work expectedly. Errors still remain.")
}
} else {
t.Errorf("unexpected error = %v, HasError: %v, wantError: %v", err, tt.args.chain.HasError(), tt.wantError)
}
}
})
}
}
func TestFromKeys(t *testing.T) {
// Note: This test should *NOT* be parallel because of race condition in NewFinderKeys func
// t.Parallel()
var f *Finder
var fk *FinderKeys
var err error
fs := make([]*Finder, 5)
fks := make([]*FinderKeys, 5)
for i := 0; i < len(fs); i++ {
fk, err = NewFinderKeys("testdata/finder_from_conf", fmt.Sprintf("ex_test%s_yml", strconv.Itoa(i+1)))
if err != nil {
t.Errorf("NewFinderKeys() error = %v", err)
return
}
fks[i] = fk
f, err = NewFinder(newFinderTestStructPtr())
if err != nil {
t.Errorf("NewFinder() error = %v", err)
return
}
fs[i] = f
}
type args struct {
chain *Finder
}
tests := []struct {
name string
args args
wantError bool
wantErrorString string
wantMap map[string]interface{}
cmpopts []cmp.Option
}{
{
name: "with toplevel find chain",
args: args{
chain: fs[0].FromKeys(fks[0]),
},
wantMap: map[string]interface{}{
"Int64": int64(-1),
"Float64": float64(-3.45),
"String": "test name",
"Stringptr": finderTestString2,
"Stringslice": []string{"strslice1", "strslice2"},
"Bool": true,
"Map": map[string]interface{}{"k1": "v1", "k2": 2},
"ChInt": finderTestChan,
"privateString": nil, // unexported field is nil
"FinderTestStruct2": FinderTestStruct2{
String: "struct2 string",
FinderTestStruct3: &FinderTestStruct3{String: "struct3 string", Int: -123},
},
"FinderTestStruct4Slice": []FinderTestStruct4{
{String: "key100", String2: "value100"},
{String: "key200", String2: "value200"},
},
"FinderTestStruct4PtrSlice": []*FinderTestStruct4{
{String: "key991", String2: "value991"},
{String: "key992", String2: "value992"},
},
"FinderTestStruct2Ptr.String": "struct2 string ptr",
"FinderTestStruct2Ptr.FinderTestStruct3.String": "struct3 string ptr",
"FinderTestStruct2Ptr.FinderTestStruct3.Int": int(-456),
},
},
{
name: "with Find with non-existed name",
args: args{
chain: fs[1].FromKeys(fks[1]),
},
wantError: true,
wantErrorString: "field name [NonExist] does not exist in getter",
},
{
name: "with Find with existed and non-existed names",
args: args{
chain: fs[2].FromKeys(fks[2]),
},
wantError: true,
wantErrorString: "field name [NonExist] does not exist in getter",
},
{
name: "with Struct with non-existed name",
args: args{
chain: fs[3].FromKeys(fks[3]),
},
wantError: true,
wantErrorString: "error Into() key [NonExist]: name [NonExist] does not exist",
},
{
name: "with Struct with existed name and Find with non-existed name",
args: args{
chain: fs[4].FromKeys(fks[4]),
},
wantError: true,
wantErrorString: "field name [NonExist] does not exist in getter",
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
// FIXME: comment out t.Parallel() because of race condition
// t.Parallel()
got, err := tt.args.chain.ToMap()
if err == nil {
if tt.wantError {
t.Errorf("error does not occur. got: %v", got)
return
}
if got == nil {
t.Errorf("result is nil %v", got)
return
}
for k, wv := range tt.wantMap {
gv, ok := got[k]
if ok {
if d := cmp.Diff(gv, wv, tt.cmpopts...); d != "" {
t.Errorf("key: %s, gotMap: %+v, (-got +want)\n%s", k, got, d)
return
}
} else {
t.Errorf("ok: %v, key: %s, gotValue: [%v], wantValue: [%v], gotMap: %+v, ", ok, k, gv, wv, got)
return
}
}
} else {
if tt.args.chain.HasError() && tt.wantError {
if d := cmp.Diff(err.Error(), tt.wantErrorString); d != "" {
t.Errorf("error string is unmatch. (-got +want)\n%s", d)
return
}
tt.args.chain.Reset()
if tt.args.chain.HasError() {
t.Errorf("Reset() does not work expectedly. Errors still remain.")
}
} else {
t.Errorf("unexpected error = %v, HasError: %v, wantError: %v", err, tt.args.chain.HasError(), tt.wantError)
}
}
})
}
}
func TestNewFinderKeys(t *testing.T) {
t.Parallel()
type args struct {
d string
n string
}
tests := []struct {
name string
args args
wantError bool
wantLen int
wantKeys []string
}{
{
name: "with valid yaml file",
args: args{d: "testdata/finder_from_conf", n: "ex_test1_yml"},
wantError: false,
wantLen: 15,
wantKeys: []string{
"Int64",
"Float64",
"String",
"Stringptr",
"Stringslice",
"Bool",
"Map",
"ChInt",
"privateString",
"FinderTestStruct2",
"FinderTestStruct4Slice",
"FinderTestStruct4PtrSlice",
"FinderTestStruct2Ptr.String",
"FinderTestStruct2Ptr.FinderTestStruct3.String",
"FinderTestStruct2Ptr.FinderTestStruct3.Int",
},
},
{
name: "with valid json file",
args: args{d: "testdata/finder_from_conf", n: "ex_test1_json"},
wantError: false,
wantLen: 15,
wantKeys: []string{
"Int64",
"Float64",
"String",
"Stringptr",
"Stringslice",
"Bool",
"Map",
"ChInt",
"privateString",
"FinderTestStruct2",
"FinderTestStruct4Slice",
"FinderTestStruct4PtrSlice",
"FinderTestStruct2Ptr.String",
"FinderTestStruct2Ptr.FinderTestStruct3.String",
"FinderTestStruct2Ptr.FinderTestStruct3.Int",
},
},
{
name: "with invalid conf file that Keys does not exist",
args: args{d: "testdata/finder_from_conf", n: "ex_test_nonkeys_yml"},
wantError: true,
},
{
name: "with invalid conf file that is empty",
args: args{d: "testdata/finder_from_conf", n: "ex_test_empty_yml"},
wantError: true,
},
{
name: "with invalid conf file",
args: args{d: "testdata/finder_from_conf", n: "ex_test_invalid_yml"},
wantError: true,
},
{
name: "with conf file does not exist",
args: args{d: "testdata/finder_from_conf", n: "ex_test_notexist"},
wantError: true,
},
}
for _, tt := range tests {
tt := tt // See: https://gist.github.com/posener/92a55c4cd441fc5e5e85f27bca008721
t.Run(tt.name, func(t *testing.T) {
// FIXME: comment out t.Parallel() because of race condition
// t.Parallel()
got, err := NewFinderKeys(tt.args.d, tt.args.n)
if err == nil {
if tt.wantError {
t.Errorf("NewFinderKeys() error did not occur. got: %v", got)
return
}
if got.Len() != tt.wantLen {
t.Errorf("NewFinderKeys() unexpected len. got: %d, want: %d", got.Len(), tt.wantLen)
}
if d := cmp.Diff(got.Keys(), tt.wantKeys); d != "" {
t.Errorf("NewFinderKeys() unexpected keys. (-got +want)\n%s", d)
}
} else if !tt.wantError {
t.Errorf("NewFinderKeys() unexpected error [%v] occurred. wantError: %v", err, tt.wantError)
}
})
}
}
|
package main
import fmt "fmt"
func main() {
a := make([]int,100)
for i := 0; i<100; i++ {
a[i]=i
}
end := search(a,11)
fmt.Println(end)
}
func search(a []int, x int) int {
left := 0
right := len(a)-1
for left < right {
// fmt.Println(left,right)
mid := (left+right)/2
if a[mid] < x {
left = mid + 1
} else if a[mid] > x {
right = mid - 1
} else {
right = mid
left = mid
return 1
}
}
return 0
}
|
// Copyright 2015 The Vanadium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package profiles
import (
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"v.io/jiri/project"
"v.io/jiri/tool"
"v.io/jiri/util"
"v.io/x/lib/envvar"
)
// GoFlags lists all of the Go environment variables and will be sorted in the
// init function for this package.
var GoFlags = []string{
"CC",
"CC_FOR_TARGET",
"CGO_ENABLED",
"CXX_FOR_TARGET",
"GO15VENDOREXPERIMENT",
"GOARCH",
"GOBIN",
"GOEXE",
"GOGCCFLAGS",
"GOHOSTARCH",
"GOHOSTOS",
"GOOS",
"GOPATH",
"GORACE",
"GOROOT",
"GOTOOLDIR",
}
type ProfilesMode bool
func (pm *ProfilesMode) Set(s string) error {
v, err := strconv.ParseBool(s)
*pm = ProfilesMode(v)
return err
}
func (pm *ProfilesMode) Get() interface{} { return bool(*pm) }
func (pm *ProfilesMode) String() string { return fmt.Sprintf("%v", *pm) }
func (pm *ProfilesMode) IsBoolFlag() bool { return true }
const (
UseProfiles ProfilesMode = false
SkipProfiles ProfilesMode = true
)
func init() {
sort.Strings(GoFlags)
}
// UnsetGoEnvVars unsets Go environment variables in the given environment.
func UnsetGoEnvVars(env *envvar.Vars) {
for _, k := range GoFlags {
env.Delete(k)
}
}
// UnsetGoEnvMap unsets Go environment variables in the given environment.
func UnsetGoEnvMap(env map[string]string) {
for _, k := range GoFlags {
delete(env, k)
}
}
// GoEnvironmentFromOS() returns the values of all Go environment variables
// as set via the OS; unset variables are omitted.
func GoEnvironmentFromOS() []string {
os := envvar.SliceToMap(os.Environ())
vars := make([]string, 0, len(GoFlags))
for _, k := range GoFlags {
v, present := os[k]
if !present {
continue
}
vars = append(vars, envvar.JoinKeyValue(k, v))
}
return vars
}
// ConfigHelper wraps the various sources of configuration and profile
// information to provide convenient methods for determing the environment
// variables to use for a given situation. It creates an initial copy of the OS
// environment that is mutated by its various methods.
type ConfigHelper struct {
*envvar.Vars
legacyMode bool
profilesMode bool
root string
ctx *tool.Context
config *util.Config
projects project.Projects
tools project.Tools
}
// NewConfigHelper creates a new config helper. If filename is of non-zero
// length then that file will be read as a profiles manifest file, if not, the
// existing, if any, in-memory profiles information will be used. If SkipProfiles
// is specified for profilesMode, then no profiles are used.
func NewConfigHelper(ctx *tool.Context, profilesMode ProfilesMode, filename string) (*ConfigHelper, error) {
root, err := project.JiriRoot()
if err != nil {
return nil, err
}
config, err := util.LoadConfig(ctx)
if err != nil {
return nil, err
}
projects, tools, err := project.ReadManifest(ctx)
if err != nil {
return nil, err
}
if profilesMode == UseProfiles && len(filename) > 0 {
if err := Read(ctx, filepath.Join(root, filename)); err != nil {
return nil, err
}
}
ch := &ConfigHelper{
ctx: ctx,
root: root,
config: config,
projects: projects,
tools: tools,
profilesMode: bool(profilesMode),
}
ch.Vars = envvar.VarsFromOS()
if profilesMode == SkipProfiles {
return ch, nil
}
ch.legacyMode = (SchemaVersion() == Original) || (len(os.Getenv("JIRI_PROFILE")) > 0)
if ch.legacyMode {
vars, err := util.JiriLegacyEnvironment(ch.ctx)
if err != nil {
return nil, err
}
ch.Vars = vars
}
return ch, nil
}
// Root returns the root of the jiri universe.
func (ch *ConfigHelper) Root() string {
return ch.root
}
// LegacyProfiles returns true if the old-style profiles are being used.
func (ch *ConfigHelper) LegacyProfiles() bool {
return ch.legacyMode
}
// SkippingProfiles returns true if no profiles are being used.
func (ch *ConfigHelper) SkippingProfiles() bool {
return ch.profilesMode == bool(SkipProfiles)
}
// CommonConcatVariables returns a map of variables that are commonly
// used for the concat parameter to SetEnvFromProfilesAndTarget.
func CommonConcatVariables() map[string]string {
return map[string]string{
"PATH": ":",
"CCFLAGS": " ",
"CXXFLAGS": " ",
"LDFLAGS": " ",
"CGO_CFLAGS": " ",
"CGO_CXXFLAGS": " ",
"CGO_LDFLAGS": " ",
}
}
// CommonIgnoreVariables returns a map of variables that are commonly
// used for the ignore parameter to SetEnvFromProfilesAndTarget.
func CommonIgnoreVariables() map[string]bool {
return map[string]bool{
"GOPATH": true,
"GOARCH": true,
"GOOS": true,
}
}
// SetEnvFromProfiles populates the embedded environment with the environment
// variables stored in the specified profiles for the specified target if
// new-style profiles are being used, otherwise it uses compiled in values as per
// the original profiles implementation.
// The profiles parameter contains a comma separated list of profile names; if the
// requested target does not exist for any of these profiles then those profiles
// will be ignored. The 'concat' parameter includes a map of variable names
// whose values are to concatenated with any existing ones rather than
// overwriting them (e.g. CFLAGS for example). The value of the concat map
// is the separator to use for that environment variable (e.g. space for
// CFLAGs or ':' for PATH-like ones).
func (ch *ConfigHelper) SetEnvFromProfiles(concat map[string]string, ignore map[string]bool, profiles string, target Target) {
if ch.profilesMode || ch.legacyMode {
return
}
for _, profile := range strings.Split(profiles, ",") {
t := LookupProfileTarget(profile, target)
if t == nil {
continue
}
for _, tmp := range t.Env.Vars {
k, v := envvar.SplitKeyValue(tmp)
if ignore[k] {
continue
}
if sep := concat[k]; len(sep) > 0 {
ov := ch.Vars.GetTokens(k, sep)
nv := envvar.SplitTokens(v, sep)
ch.Vars.SetTokens(k, append(ov, nv...), " ")
continue
}
ch.Vars.Set(k, v)
}
}
}
// ValidateRequestProfilesAndTarget checks that the supplied slice of profiles
// names is supported and that each has the specified target installed taking
// account if runnin in bootstrap mode or with old-style profiles.
func (ch *ConfigHelper) ValidateRequestedProfilesAndTarget(profileNames []string, target Target) error {
if ch.profilesMode || ch.legacyMode {
return nil
}
for _, n := range profileNames {
if LookupProfileTarget(n, target) == nil {
return fmt.Errorf("%q for %q is not available or not installed, use the \"list\" command to see the installed/available profiles.", target, n)
}
}
return nil
}
// PrependToPath prepends its argument to the PATH environment variable.
func (ch *ConfigHelper) PrependToPATH(path string) {
existing := ch.GetTokens("PATH", ":")
ch.SetTokens("PATH", append([]string{path}, existing...), ":")
}
// SetGoPath computes and sets the GOPATH environment variable based on the
// current jiri configuration.
func (ch *ConfigHelper) SetGoPath() {
if !ch.legacyMode {
ch.pathHelper("GOPATH", ch.root, ch.projects, ch.config.GoWorkspaces(), "")
}
}
// SetVDLPath computes and sets the VDLPATH environment variable based on the
// current jiri configuration.
func (ch *ConfigHelper) SetVDLPath() {
if !ch.legacyMode {
ch.pathHelper("VDLPATH", ch.root, ch.projects, ch.config.VDLWorkspaces(), "src")
}
}
// pathHelper is a utility function for determining paths for project workspaces.
func (ch *ConfigHelper) pathHelper(name, root string, projects project.Projects, workspaces []string, suffix string) {
path := ch.GetTokens(name, ":")
for _, workspace := range workspaces {
absWorkspace := filepath.Join(root, workspace, suffix)
// Only append an entry to the path if the workspace is rooted
// under a jiri project that exists locally or vice versa.
for _, project := range projects {
// We check if <project.Path> is a prefix of <absWorkspace> to
// account for Go workspaces nested under a single jiri project,
// such as: $JIRI_ROOT/release/projects/chat/go.
//
// We check if <absWorkspace> is a prefix of <project.Path> to
// account for Go workspaces that span multiple jiri projects,
// such as: $JIRI_ROOT/release/go.
if strings.HasPrefix(absWorkspace, project.Path) || strings.HasPrefix(project.Path, absWorkspace) {
if _, err := ch.ctx.Run().Stat(filepath.Join(absWorkspace)); err == nil {
path = append(path, absWorkspace)
break
}
}
}
}
ch.SetTokens(name, path, ":")
}
// MergeEnv merges vars with the variables in env taking care to concatenate
// values as per the concat and ignore parameters similarly to SetEnvFromProfiles.
func MergeEnv(concat map[string]string, ignore map[string]bool, env *envvar.Vars, vars ...[]string) {
for _, ev := range vars {
for _, tmp := range ev {
k, v := envvar.SplitKeyValue(tmp)
if ignore[k] {
continue
}
if sep := concat[k]; len(sep) > 0 {
ov := env.GetTokens(k, sep)
nv := envvar.SplitTokens(v, sep)
env.SetTokens(k, append(ov, nv...), " ")
continue
}
env.Set(k, v)
}
}
}
// MergeEnvFromProfiles merges the environment variables stored in the specified
// profiles and target with the env parameter. It uses MergeEnv to do so.
func MergeEnvFromProfiles(concat map[string]string, ignore map[string]bool, env *envvar.Vars, target Target, profileNames ...string) ([]string, error) {
vars := [][]string{}
for _, name := range profileNames {
t := LookupProfileTarget(name, target)
if t == nil {
return nil, fmt.Errorf("failed to lookup %v --target=%v", name, target)
}
vars = append(vars, t.Env.Vars)
}
MergeEnv(concat, ignore, env, vars...)
return env.ToSlice(), nil
}
|
package main
import "fmt"
// map is reference type
func set(m map[string]string, k string, v string) {
m[k] = v
}
func print(m map[string]string) {
for k, v := range m {
fmt.Println("k", k, "v", v)
}
}
func main() {
colors := map[string]string {
"red": "#ff0000",
"green": "#00ff00",
"blue": "#0000ff",
}
print(colors)
set(colors, "white", "#ffffff")
print(colors)
}
|
package dtclient
import (
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewClient(t *testing.T) {
{
c, err := NewClient("https://aabb.live.dynatrace.com/api", "foo", "bar")
if assert.NoError(t, err) {
assert.NotNil(t, c)
}
}
{
c, err := NewClient("https://aabb.live.dynatrace.com/api", "foo", "bar", SkipCertificateValidation(false))
if assert.NoError(t, err) {
assert.NotNil(t, c)
}
}
{
c, err := NewClient("https://aabb.live.dynatrace.com/api", "foo", "bar", SkipCertificateValidation(true))
if assert.NoError(t, err) {
assert.NotNil(t, c)
}
}
{
_, err := NewClient("https://aabb.live.dynatrace.com/api", "", "")
assert.Error(t, err, "tokens are empty")
}
{
_, err := NewClient("", "foo", "bar")
assert.Error(t, err, "empty URL")
}
}
func TestProxy(t *testing.T) {
dynatraceServer, _ := createTestDynatraceClient(t, http.NotFoundHandler(), "")
defer dynatraceServer.Close()
dtc := dynatraceClient{
url: dynatraceServer.URL,
apiToken: apiToken,
paasToken: paasToken,
httpClient: dynatraceServer.Client(),
hostCache: nil,
}
transport := dtc.httpClient.Transport.(*http.Transport)
rawURL := "working.url"
options := Proxy(rawURL)
assert.NotNil(t, options)
options(&dtc)
url, err := transport.Proxy(&http.Request{})
assert.NoError(t, err)
assert.NotNil(t, url)
assert.Equal(t, rawURL, url.Path)
options = Proxy("{!.*&%")
assert.NotNil(t, options)
options(&dtc)
}
func TestCerts(t *testing.T) {
dynatraceServer, _ := createTestDynatraceClient(t, http.NotFoundHandler(), "")
defer dynatraceServer.Close()
dtc := dynatraceClient{
url: dynatraceServer.URL,
apiToken: apiToken,
paasToken: paasToken,
httpClient: dynatraceServer.Client(),
hostCache: nil,
}
transport := dtc.httpClient.Transport.(*http.Transport)
certs := Certs(nil)
assert.NotNil(t, certs)
certs(&dtc)
assert.NotNil(t, transport.TLSClientConfig.RootCAs)
}
|
package cmd
import (
"fmt"
"os"
"strings"
"text/tabwriter"
sunspec "github.com/andig/gosunspec"
bus "github.com/andig/gosunspec/modbus"
"github.com/andig/gosunspec/smdx"
"github.com/evcc-io/evcc/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/volkszaehler/mbmd/meters"
quirks "github.com/volkszaehler/mbmd/meters/sunspec"
)
// sunspecCmd represents the charger command
var sunspecCmd = &cobra.Command{
Use: "sunspec <connection>",
Short: "Dump SunSpec model information",
Args: cobra.ExactArgs(1),
Run: runSunspec,
}
var slaveID *int
func init() {
rootCmd.AddCommand(sunspecCmd)
slaveID = sunspecCmd.Flags().IntP("id", "i", 1, "Slave id")
}
func pf(format string, v ...interface{}) {
format = strings.TrimSuffix(format, "\n") + "\n"
fmt.Printf(format, v...)
}
func modelName(m sunspec.Model) string {
model := smdx.GetModel(uint16(m.Id()))
if model == nil {
return ""
}
return model.Name
}
func runSunspec(cmd *cobra.Command, args []string) {
util.LogLevel(viper.GetString("log"), nil)
conn := meters.NewTCP(args[0])
conn.Slave(uint8(*slaveID))
conn.Logger(log.TRACE)
in, err := bus.Open(conn.ModbusClient())
if err != nil && in == nil {
log.FATAL.Fatal(err)
} else if err != nil {
log.WARN.Printf("warning: device opened with partial result: %v", err) // log error but continue
}
tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
in.Do(func(d sunspec.Device) {
d.Do(func(m sunspec.Model) {
pf("--------- Model %d %s ---------", m.Id(), modelName(m))
blocknum := 0
m.Do(func(b sunspec.Block) {
if blocknum > 0 {
fmt.Fprintf(tw, "-- Block %d --\n", blocknum)
}
blocknum++
err = b.Read()
if err != nil {
log.INFO.Printf("skipping due to read error: %v", err)
return
}
b.Do(func(p sunspec.Point) {
t := p.Type()[0:3]
v := p.Value()
if p.NotImplemented() {
v = "n/a"
} else if t == "int" || t == "uin" || t == "acc" {
// for time being, always to this
quirks.FixKostal(p)
v = p.ScaledValue()
v = fmt.Sprintf("%.2f", v)
}
vs := fmt.Sprintf("%17v", v)
fmt.Fprintf(tw, "%s\t%s\t %s\n", p.Id(), vs, p.Type())
})
})
tw.Flush()
})
})
}
|
package person_infrastructure
import (
"net/http"
person_application "github.com/MeliCGS/go-project-api-arq-hexa/api/person/application"
"github.com/gin-gonic/gin"
)
type PersonController struct {
PeopleSearcher *person_application.PeopleSearcher
}
func (p *PersonController) GetAllHandler(ctx *gin.Context) {
people := p.PeopleSearcher.SearchAll()
ctx.JSON(http.StatusOK, people)
}
|
package rock
func (I *Int) makeW() {
I.p.w.c = make(chan []byte, I.Len)
go postIfClient(I.p.w.c, Tint, I.Name)
}
func (I *Int) makeR() {
I.p.r.c = make(chan []byte, I.Len)
go getIfClient(I.p.r.c, Tint, I.Name)
}
func (I *Int) makeNIfServer() {
if IsClient {
return
}
I.p.n.c = make(chan int)
}
func (I *Int) add() {
intDict.Lock()
if intDict.m == nil {
intDict.m = map[string]*Int{}
}
if _, found := intDict.m[I.Name]; !found {
intDict.m[I.Name] = I
}
intDict.Unlock()
}
func (I *Int) to(i int) {
if IsClient {
I.p.w.c <- int2bytes(i)
return
}
for {
<-I.p.n.c
I.p.w.c <- int2bytes(i)
if len(I.p.n.c) == 0 {
break
}
}
}
func (I *Int) from() int {
return bytes2int(<-I.p.r.c)
}
func (I *Int) S() chan<- int {
c := make(chan int, I.Len)
go started.Do(getAndOrPostIfServer)
I.add()
I.p.w.Do(I.makeW)
I.p.n.Do(I.makeNIfServer)
go func() {
I.to(0)
i := <-c
close(c)
I.to(i)
}()
return c
}
func (I *Int) R() <-chan int {
c := make(chan int, I.Len)
go started.Do(getAndOrPostIfServer)
I.add()
I.p.r.Do(I.makeR)
go func() {
I.from()
c <- I.from()
close(c)
}()
return c
}
|
package adminApp
import (
"github.com/gin-gonic/gin"
"hd-mall-ed/packages/common/pkg/app"
)
func ApiInit(c *gin.Context) *ApiFunction {
return &ApiFunction{app.ApiFunction{C: c}}
}
|
package fs
func Defaults() FsConfig {
return FsConfig{
DataDir: defaultDataDir(),
}
}
|
package k8s
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/tilt-dev/tilt/internal/container"
"github.com/tilt-dev/tilt/internal/k8s/testyaml"
)
func TestCRDImageObjectInjection(t *testing.T) {
entities, err := ParseYAMLFromString(testyaml.CRDImageObjectYAML)
require.NoError(t, err)
e := entities[0]
selector := MustKindSelector("UselessMachine")
locator := MustJSONPathImageObjectLocator(selector, "{.spec.imageObject}", "repo", "tag")
images, err := locator.Extract(e)
require.NoError(t, err)
require.Equal(t, 1, len(images))
assert.Equal(t, "docker.io/library/frontend", images[0].String())
e, modified, err := locator.Inject(e, container.MustParseSelector("frontend"),
container.MustParseNamed("frontend:tilt-123"), v1.PullNever)
require.NoError(t, err)
assert.True(t, modified)
images, err = locator.Extract(e)
require.NoError(t, err)
require.Equal(t, 1, len(images))
assert.Equal(t, "docker.io/library/frontend:tilt-123", images[0].String())
}
func TestCRDPullPolicyInjection(t *testing.T) {
entities, err := ParseYAMLFromString(testyaml.CRDContainerSpecYAML)
require.NoError(t, err)
e := entities[0]
selector := MustKindSelector("UselessMachine")
locator := MustJSONPathImageLocator(selector, "{.spec.containers[*].image}")
images, err := locator.Extract(e)
require.NoError(t, err)
require.Equal(t, 1, len(images))
assert.Equal(t, "docker.io/library/frontend", images[0].String())
e, modified, err := locator.Inject(e, container.MustParseSelector("frontend"),
container.MustParseNamed("frontend:tilt-123"), v1.PullNever)
require.NoError(t, err)
require.True(t, modified)
spec := e.Obj.(*unstructured.Unstructured).Object["spec"].(map[string]interface{})
c := spec["containers"].([]interface{})[0].(map[string]interface{})
require.Equal(t, "frontend:tilt-123", c["image"])
require.Equal(t, "Never", c["imagePullPolicy"].(string))
}
|
package bot
import (
"regexp"
"strings"
"fmt"
"github.com/VG-Tech-Dojo/vg-1day-2017-05-20/hironomiu/env"
"github.com/VG-Tech-Dojo/vg-1day-2017-05-20/hironomiu/model"
)
const (
keywordApiUrlFormat = "https://jlp.yahooapis.jp/KeyphraseService/V1/extract?appid=%s&sentence=%s&output=json"
)
type (
// Processor はmessageを受け取り、投稿用messageを作るインターフェースです
Processor interface {
Process(message *model.Message) *model.Message
}
// HelloWorldProcessor は"hello, world!"メッセージを作るprocessorの構造体です
HelloWorldProcessor struct{}
// OmikujiProcessor は"大吉", "吉", "中吉", "小吉", "末吉", "凶"のいずれかをランダムで作るprocessorの構造体です
OmikujiProcessor struct{}
// メッセージ本文からキーワードを抽出するprocessorの構造体です
KeywordProcessor struct{}
)
// Process は"hello, world!"というbodyがセットされたメッセージのポインタを返します
func (p *HelloWorldProcessor) Process(msgIn *model.Message) *model.Message {
return &model.Message{
Body: msgIn.Body + ", world!",
}
}
// Process は"大吉", "吉", "中吉", "小吉", "末吉", "凶"のいずれかがbodyにセットされたメッセージへのポインタを返します
func (p *OmikujiProcessor) Process(msgIn *model.Message) *model.Message {
fortunes := []string{
"大吉",
"吉",
"中吉",
"小吉",
"末吉",
"凶",
}
result := fortunes[randIntn(len(fortunes))]
return &model.Message{
Body: result,
}
}
// Process はメッセージ本文からキーワードを抽出します
func (p *KeywordProcessor) Process(msgIn *model.Message) *model.Message {
r := regexp.MustCompile("\\Akeyword (.*)\\z")
matchedStrings := r.FindStringSubmatch(msgIn.Body)
text := matchedStrings[1]
url := fmt.Sprintf(keywordApiUrlFormat, env.KeywordApiAppId, text)
json := map[string]int{}
get(url, &json)
keywords := []string{}
for keyword := range map[string]int(json) {
keywords = append(keywords, keyword)
}
return &model.Message{
Body: "キーワード:" + strings.Join(keywords, ", "),
}
}
|
/**
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package utils
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestErrorHandling(t *testing.T) {
// do a division with error handling support
result := divideWithErrorHandling(1, 0)
assert.Equal(t, 0, result)
// do a division with error handling support
assert.Panics(t, func() { divideWithoutErrorHandling(1, 0) }, "The code did not panic")
}
func divideWithErrorHandling(m int, n int) int {
defer GracefullyHandleError()
return m / n
}
func divideWithoutErrorHandling(m int, n int) int {
return m / n
}
|
package main
import (
"fmt"
)
func usage() {
return
}
func main() {
var v1 int32 = 10
var v2 = 10
v3 := 10
v4 := 9.9
const (
a int = 1 << iota
b int = 1 << iota
c int = 1 << iota
)
fmt.Println("hello word!")
fmt.Println(a, b, c)
fmt.Println(1 << 0)
if int(v4) > v2 {
fmt.Println(int(v1) + v2 + v3)
}
fmt.Println(int(v4))
usage()
var str string = "hello world!"
fmt.Println(str)
for i := 0; i < len(str); i++ {
if str[i] == 'h' {
fmt.Printf("%c.", str[i])
fmt.Printf("equel.")
}
}
if 104 == 'h' {
fmt.Println("104==h")
}
for i, ch := range str {
fmt.Println(i, ch)
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package backupccl
import (
"context"
gosql "database/sql"
"fmt"
"io/ioutil"
"net/url"
"path"
"regexp"
"sort"
"strconv"
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/ccl/utilccl"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/jobs/jobstest"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/scheduledjobs"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/testutils"
"github.com/cockroachdb/cockroach/pkg/testutils/serverutils"
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
pbtypes "github.com/gogo/protobuf/types"
"github.com/gorhill/cronexpr"
"github.com/stretchr/testify/require"
)
const allSchedules = 0
// testHelper starts a server, and arranges for job scheduling daemon to
// use jobstest.JobSchedulerTestEnv.
// This helper also arranges for the manual override of scheduling logic
// via executeSchedules callback.
type execSchedulesFn = func(ctx context.Context, maxSchedules int64, txn *kv.Txn) error
type testHelper struct {
iodir string
server serverutils.TestServerInterface
env *jobstest.JobSchedulerTestEnv
cfg *scheduledjobs.JobExecutionConfig
sqlDB *sqlutils.SQLRunner
executeSchedules func() error
}
// newTestHelper creates and initializes appropriate state for a test,
// returning testHelper as well as a cleanup function.
func newTestHelper(t *testing.T) (*testHelper, func()) {
dir, dirCleanupFn := testutils.TempDir(t)
th := &testHelper{
env: jobstest.NewJobSchedulerTestEnv(jobstest.UseSystemTables, timeutil.Now()),
iodir: dir,
}
knobs := &jobs.TestingKnobs{
JobSchedulerEnv: th.env,
TakeOverJobsScheduling: func(fn execSchedulesFn) {
th.executeSchedules = func() error {
defer th.server.JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue()
return th.cfg.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error {
return fn(ctx, allSchedules, txn)
})
}
},
CaptureJobExecutionConfig: func(config *scheduledjobs.JobExecutionConfig) {
th.cfg = config
},
}
args := base.TestServerArgs{
ExternalIODir: dir,
Knobs: base.TestingKnobs{
JobsTestingKnobs: knobs,
},
}
s, db, _ := serverutils.StartServer(t, args)
require.NotNil(t, th.cfg)
th.sqlDB = sqlutils.MakeSQLRunner(db)
th.server = s
return th, func() {
dirCleanupFn()
s.Stopper().Stop(context.Background())
}
}
func (h *testHelper) clearSchedules(t *testing.T) {
t.Helper()
h.sqlDB.Exec(t, "DELETE FROM system.scheduled_jobs WHERE true")
}
func (h *testHelper) waitForSuccessfulScheduledJob(t *testing.T, scheduleID int64) {
query := "SELECT id FROM " + h.env.SystemJobsTableName() +
" WHERE status=$1 AND created_by_type=$2 AND created_by_id=$3"
testutils.SucceedsSoon(t, func() error {
// Force newly created job to be adopted and verify it succeeds.
h.server.JobRegistry().(*jobs.Registry).TestingNudgeAdoptionQueue()
var unused int64
return h.sqlDB.DB.QueryRowContext(context.Background(),
query, jobs.StatusSucceeded, jobs.CreatedByScheduledJobs, scheduleID).Scan(&unused)
})
}
// createBackupSchedule executes specified "CREATE SCHEDULE FOR BACKUP" query, with
// the provided arguments. Returns the list of created schedules
func (h *testHelper) createBackupSchedule(
t *testing.T, query string, args ...interface{},
) ([]*jobs.ScheduledJob, error) {
// Execute statement and get the list of schedule IDs created by the query.
ctx := context.Background()
rows, err := h.sqlDB.DB.QueryContext(ctx, query, args...)
if err != nil {
return nil, err
}
var unusedStr string
var unusedTS *time.Time
var schedules []*jobs.ScheduledJob
for rows.Next() {
var id int64
require.NoError(t, rows.Scan(&id, &unusedStr, &unusedStr, &unusedTS, &unusedStr, &unusedStr))
// Query system.scheduled_job table and load those schedules.
datums, cols, err := h.cfg.InternalExecutor.QueryRowExWithCols(
context.Background(), "sched-load", nil,
sessiondata.InternalExecutorOverride{User: security.RootUserName()},
"SELECT * FROM system.scheduled_jobs WHERE schedule_id = $1",
id,
)
require.NoError(t, err)
require.NotNil(t, datums)
s := jobs.NewScheduledJob(h.env)
require.NoError(t, s.InitFromDatums(datums, cols))
schedules = append(schedules, s)
}
if err := rows.Err(); err != nil {
return nil, err
}
return schedules, nil
}
func getScheduledBackupStatement(t *testing.T, arg *jobspb.ExecutionArguments) string {
var backup ScheduledBackupExecutionArgs
require.NoError(t, pbtypes.UnmarshalAny(arg.Args, &backup))
return backup.BackupStatement
}
type userType bool
const freeUser userType = false
const enterpriseUser userType = true
func (t userType) String() string {
if t == freeUser {
return "free user"
}
return "enterprise user"
}
// This test examines serialized representation of backup schedule arguments
// when the scheduled backup statement executes. This test does not concern
// itself with the actual scheduling and the execution of those backups.
func TestSerializesScheduledBackupExecutionArgs(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
th, cleanup := newTestHelper(t)
defer cleanup()
type expectedSchedule struct {
nameRe string
backupStmt string
period time.Duration
runsNow bool
shownStmt string
paused bool
}
testCases := []struct {
name string
query string
queryArgs []interface{}
user userType
expectedSchedules []expectedSchedule
errMsg string
}{
{
name: "full-cluster",
query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup?AWS_SECRET_ACCESS_KEY=neverappears' RECURRING '@hourly'",
user: freeUser,
expectedSchedules: []expectedSchedule{
{
nameRe: "BACKUP .+",
backupStmt: "BACKUP INTO 'nodelocal://0/backup?AWS_SECRET_ACCESS_KEY=neverappears' WITH detached",
shownStmt: "BACKUP INTO 'nodelocal://0/backup?AWS_SECRET_ACCESS_KEY=redacted' WITH detached",
period: time.Hour,
},
},
},
{
name: "full-cluster-with-name",
query: "CREATE SCHEDULE 'my-backup' FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly'",
user: freeUser,
expectedSchedules: []expectedSchedule{
{
nameRe: "my-backup",
backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached",
period: time.Hour,
},
},
},
{
name: "full-cluster-always",
query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly' FULL BACKUP ALWAYS",
user: freeUser,
expectedSchedules: []expectedSchedule{
{
nameRe: "BACKUP .+",
backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached",
period: time.Hour,
},
},
},
{
name: "full-cluster",
query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly'",
user: enterpriseUser,
expectedSchedules: []expectedSchedule{
{
nameRe: "BACKUP .*",
backupStmt: "BACKUP INTO LATEST IN 'nodelocal://0/backup' WITH detached",
period: time.Hour,
paused: true,
},
{
nameRe: "BACKUP .+",
backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached",
period: 24 * time.Hour,
runsNow: true,
},
},
},
{
name: "full-cluster-with-name",
query: "CREATE SCHEDULE 'my-backup' FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly'",
user: enterpriseUser,
expectedSchedules: []expectedSchedule{
{
nameRe: "my-backup",
backupStmt: "BACKUP INTO LATEST IN 'nodelocal://0/backup' WITH detached",
period: time.Hour,
paused: true,
},
{
nameRe: "my-backup",
backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached",
period: 24 * time.Hour,
runsNow: true,
},
},
},
{
name: "full-cluster-always",
query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly' FULL BACKUP ALWAYS",
user: enterpriseUser,
expectedSchedules: []expectedSchedule{
{
nameRe: "BACKUP .+",
backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH detached",
period: time.Hour,
},
},
},
{
name: "enterprise-license-required-for-incremental",
query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' RECURRING '@hourly' FULL BACKUP '@weekly'",
user: freeUser,
errMsg: "use of BACKUP INTO LATEST requires an enterprise license",
},
{
name: "enterprise-license-required-for-revision-history",
query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' WITH revision_history RECURRING '@hourly'",
user: freeUser,
errMsg: "use of BACKUP with revision_history requires an enterprise license",
},
{
name: "enterprise-license-required-for-encryption",
query: "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://0/backup' WITH encryption_passphrase = 'secret' RECURRING '@hourly'",
user: freeUser,
errMsg: "use of BACKUP with encryption requires an enterprise license",
},
{
name: "full-cluster-with-name-arg",
query: `CREATE SCHEDULE $1 FOR BACKUP INTO 'nodelocal://0/backup' WITH revision_history, detached RECURRING '@hourly'`,
queryArgs: []interface{}{"my_backup_name"},
user: enterpriseUser,
expectedSchedules: []expectedSchedule{
{
nameRe: "my_backup_name",
backupStmt: "BACKUP INTO LATEST IN 'nodelocal://0/backup' WITH revision_history, detached",
period: time.Hour,
paused: true,
},
{
nameRe: "my_backup_name",
backupStmt: "BACKUP INTO 'nodelocal://0/backup' WITH revision_history, detached",
period: 24 * time.Hour,
runsNow: true,
},
},
},
{
name: "multiple-tables-with-encryption",
user: enterpriseUser,
query: `
CREATE SCHEDULE FOR BACKUP TABLE system.jobs, system.scheduled_jobs INTO 'nodelocal://0/backup'
WITH encryption_passphrase = 'secret' RECURRING '@weekly'`,
expectedSchedules: []expectedSchedule{
{
nameRe: "BACKUP .*",
backupStmt: "BACKUP TABLE system.jobs, system.scheduled_jobs INTO 'nodelocal://0/backup' WITH encryption_passphrase = 'secret', detached",
shownStmt: "BACKUP TABLE system.jobs, system.scheduled_jobs INTO 'nodelocal://0/backup' WITH encryption_passphrase = '*****', detached",
period: 7 * 24 * time.Hour,
},
},
},
{
name: "partitioned-backup",
user: enterpriseUser,
query: `
CREATE SCHEDULE FOR BACKUP DATABASE system
INTO ('nodelocal://0/backup?COCKROACH_LOCALITY=x%3Dy', 'nodelocal://0/backup2?COCKROACH_LOCALITY=default')
WITH revision_history
RECURRING '1 2 * * *'
FULL BACKUP ALWAYS
WITH SCHEDULE OPTIONS first_run=$1
`,
queryArgs: []interface{}{th.env.Now().Add(time.Minute)},
expectedSchedules: []expectedSchedule{
{
nameRe: "BACKUP .+",
backupStmt: "BACKUP DATABASE system INTO " +
"('nodelocal://0/backup?COCKROACH_LOCALITY=x%3Dy', 'nodelocal://0/backup2?COCKROACH_LOCALITY=default') " +
"WITH revision_history, detached",
period: 24 * time.Hour,
},
},
},
{
name: "missing-destination-placeholder",
query: `CREATE SCHEDULE FOR BACKUP TABLE t INTO $1 RECURRING '@hourly'`,
errMsg: "failed to evaluate backup destination paths",
},
{
name: "missing-encryption-placeholder",
user: enterpriseUser,
query: `CREATE SCHEDULE FOR BACKUP INTO 'foo' WITH encryption_passphrase=$1 RECURRING '@hourly'`,
errMsg: "failed to evaluate backup encryption_passphrase",
},
}
for _, tc := range testCases {
t.Run(fmt.Sprintf("%s-%s", tc.name, tc.user), func(t *testing.T) {
defer th.clearSchedules(t)
if tc.user == freeUser {
defer utilccl.TestingDisableEnterprise()()
} else {
defer utilccl.TestingEnableEnterprise()()
}
schedules, err := th.createBackupSchedule(t, tc.query, tc.queryArgs...)
if len(tc.errMsg) > 0 {
require.True(t, testutils.IsError(err, tc.errMsg),
"expected error to match %q, found %q instead", tc.errMsg, err.Error())
return
}
require.NoError(t, err)
require.Equal(t, len(tc.expectedSchedules), len(schedules))
shown := th.sqlDB.QueryStr(t, `SELECT id, command->'backup_statement' FROM [SHOW SCHEDULES]`)
require.Equal(t, len(tc.expectedSchedules), len(shown))
shownByID := map[int64]string{}
for _, i := range shown {
id, err := strconv.ParseInt(i[0], 10, 64)
require.NoError(t, err)
shownByID[id] = i[1]
}
// Build a map of expected backup statement to expected schedule.
expectedByName := make(map[string]expectedSchedule)
for _, s := range tc.expectedSchedules {
expectedByName[s.backupStmt] = s
}
for _, s := range schedules {
stmt := getScheduledBackupStatement(t, s.ExecutionArgs())
expectedSchedule, ok := expectedByName[stmt]
require.True(t, ok, "could not find matching name for %q", stmt)
require.Regexp(t, regexp.MustCompile(expectedSchedule.nameRe), s.ScheduleLabel())
expectedShown := fmt.Sprintf("%q", expectedSchedule.backupStmt)
if expectedSchedule.shownStmt != "" {
expectedShown = fmt.Sprintf("%q", expectedSchedule.shownStmt)
}
require.Equal(t, expectedShown, shownByID[s.ScheduleID()])
frequency, err := s.Frequency()
require.NoError(t, err)
require.EqualValues(t, expectedSchedule.period, frequency, expectedSchedule)
require.Equal(t, expectedSchedule.paused, s.IsPaused())
if expectedSchedule.runsNow {
require.EqualValues(t, th.env.Now().Round(time.Microsecond), s.ScheduledRunTime())
}
}
})
}
}
func TestScheduleBackup(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
th, cleanup := newTestHelper(t)
defer cleanup()
th.sqlDB.Exec(t, `
CREATE DATABASE db;
USE db;
CREATE TABLE t1(a int);
INSERT INTO t1 values (1), (10), (100);
CREATE TABLE t2(b int);
INSERT INTO t2 VALUES (3), (2), (1);
CREATE TABLE t3(c int);
INSERT INTO t3 VALUES (5), (5), (7);
CREATE DATABASE other_db;
USE other_db;
CREATE TABLE t1(a int);
INSERT INTO t1 values (-1), (10), (-100);
`)
// We'll be manipulating schedule time via th.env, but we can't fool actual backup
// when it comes to AsOf time. So, override AsOf backup clause to be the current time.
th.cfg.TestingKnobs.(*jobs.TestingKnobs).OverrideAsOfClause = func(clause *tree.AsOfClause) {
expr, err := tree.MakeDTimestampTZ(th.cfg.DB.Clock().PhysicalTime(), time.Microsecond)
require.NoError(t, err)
clause.Expr = expr
}
type dbTables struct {
db string
tables []string
}
expectBackupTables := func(dbTbls ...dbTables) [][]string {
sort.Slice(dbTbls, func(i, j int) bool { return dbTbls[i].db < dbTbls[j].db })
var res [][]string
for _, dbt := range dbTbls {
sort.Strings(dbt.tables)
for _, tbl := range dbt.tables {
res = append(res, []string{dbt.db, tbl})
}
}
return res
}
expectedSystemTables := make([]string, 0)
for systemTableName := range GetSystemTablesToIncludeInClusterBackup() {
expectedSystemTables = append(expectedSystemTables, systemTableName)
}
testCases := []struct {
name string
schedule string
verifyTables [][]string
}{
{
name: "cluster-backup",
schedule: "CREATE SCHEDULE FOR BACKUP INTO $1 RECURRING '@hourly'",
verifyTables: expectBackupTables(
dbTables{"db", []string{"t1", "t2", "t3"}},
dbTables{"other_db", []string{"t1"}},
dbTables{"system", expectedSystemTables},
),
},
{
name: "tables-backup-with-history",
schedule: "CREATE SCHEDULE FOR BACKUP db.t2, db.t3 INTO $1 WITH revision_history RECURRING '@hourly' FULL BACKUP ALWAYS",
verifyTables: expectBackupTables(dbTables{"db", []string{"t2", "t3"}}),
},
{
name: "table-backup-in-different-dbs",
schedule: "CREATE SCHEDULE FOR BACKUP db.t1, other_db.t1, db.t3 INTO $1 RECURRING '@hourly' FULL BACKUP ALWAYS",
verifyTables: expectBackupTables(
dbTables{"db", []string{"t1", "t3"}},
dbTables{"other_db", []string{"t1"}},
),
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
destination := "nodelocal://0/backup/" + tc.name
schedules, err := th.createBackupSchedule(t, tc.schedule, destination)
require.NoError(t, err)
require.LessOrEqual(t, 1, len(schedules))
// Either 1 or two schedules will be created.
// One of them (incremental) must be paused.
var full, inc *jobs.ScheduledJob
if len(schedules) == 1 {
full = schedules[0]
} else {
require.Equal(t, 2, len(schedules))
full, inc = schedules[0], schedules[1]
if full.IsPaused() {
full, inc = inc, full // Swap: inc should be paused.
}
require.True(t, inc.IsPaused())
require.False(t, full.IsPaused())
// The full should list incremental as a schedule to unpause.
args := &ScheduledBackupExecutionArgs{}
require.NoError(t, pbtypes.UnmarshalAny(full.ExecutionArgs().Args, args))
require.EqualValues(t, inc.ScheduleID(), args.UnpauseOnSuccess)
}
defer func() {
th.sqlDB.Exec(t, "DROP SCHEDULE $1", full.ScheduleID())
if inc != nil {
th.sqlDB.Exec(t, "DROP SCHEDULE $1", inc.ScheduleID())
}
}()
// Force the schedule to execute.
th.env.SetTime(full.NextRun().Add(time.Second))
require.NoError(t, th.executeSchedules())
// Wait for the backup complete.
th.waitForSuccessfulScheduledJob(t, full.ScheduleID())
if inc != nil {
// Once the full backup completes, the incremental one should no longer be paused.
loadedInc, err := jobs.LoadScheduledJob(
context.Background(), th.env, inc.ScheduleID(), th.cfg.InternalExecutor, nil)
require.NoError(t, err)
require.False(t, loadedInc.IsPaused())
}
// Verify backup.
latest, err := ioutil.ReadFile(path.Join(th.iodir, "backup", tc.name, latestFileName))
require.NoError(t, err)
backedUp := th.sqlDB.QueryStr(t,
`SELECT database_name, object_name FROM [SHOW BACKUP $1] WHERE object_type='table' ORDER BY database_name, object_name`,
fmt.Sprintf("%s/%s", destination, string(latest)))
require.Equal(t, tc.verifyTables, backedUp)
})
}
}
func TestCreateBackupScheduleRequiresAdminRole(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
th, cleanup := newTestHelper(t)
defer cleanup()
th.sqlDB.Exec(t, `CREATE USER testuser`)
pgURL, cleanupFunc := sqlutils.PGUrl(
t, th.server.ServingSQLAddr(),
"TestCreateSchedule-testuser", url.User("testuser"),
)
defer cleanupFunc()
testuser, err := gosql.Open("postgres", pgURL.String())
require.NoError(t, err)
defer func() {
require.NoError(t, testuser.Close())
}()
_, err = testuser.Exec("CREATE SCHEDULE FOR BACKUP INTO 'somewhere' RECURRING '@daily'")
require.Error(t, err)
}
func TestCreateBackupScheduleCollectionOverwrite(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
th, cleanup := newTestHelper(t)
defer cleanup()
const collectionLocation = "nodelocal://1/collection"
th.sqlDB.Exec(t, `BACKUP INTO $1`, collectionLocation)
// Expect that trying to normally create a scheduled backup to this location
// fails.
th.sqlDB.ExpectErr(t, "backups already created in",
"CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://1/collection' RECURRING '@daily';")
// Expect that we can override this option with the ignore_existing_backups
// flag.
th.sqlDB.Exec(t, "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://1/collection' "+
"RECURRING '@daily' WITH SCHEDULE OPTIONS ignore_existing_backups;")
}
func TestCreateBackupScheduleInExplicitTxnRollback(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
th, cleanup := newTestHelper(t)
defer cleanup()
res := th.sqlDB.Query(t, "SELECT id FROM [SHOW SCHEDULES];")
require.False(t, res.Next())
require.NoError(t, res.Err())
th.sqlDB.Exec(t, "BEGIN;")
th.sqlDB.Exec(t, "CREATE SCHEDULE FOR BACKUP INTO 'nodelocal://1/collection' RECURRING '@daily';")
th.sqlDB.Exec(t, "ROLLBACK;")
res = th.sqlDB.Query(t, "SELECT id FROM [SHOW SCHEDULES];")
require.False(t, res.Next())
require.NoError(t, res.Err())
}
// Normally, we issue backups with AOST set to be the scheduled nextRun.
// But if the schedule time is way in the past, the backup will fail.
// This test verifies that scheduled backups will start working
// (eventually), even after the cluster has been down for a long period.
func TestScheduleBackupRecoversFromClusterDown(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
th, cleanup := newTestHelper(t)
defer cleanup()
th.sqlDB.Exec(t, `
CREATE DATABASE db;
USE db;
CREATE TABLE t(a int);
INSERT INTO t values (1), (10), (100);
`)
loadSchedule := func(t *testing.T, id int64) *jobs.ScheduledJob {
loaded, err := jobs.LoadScheduledJob(
context.Background(), th.env, id, th.cfg.InternalExecutor, nil)
require.NoError(t, err)
return loaded
}
advanceNextRun := func(t *testing.T, id int64, delta time.Duration) {
// Adjust next run by the specified delta (which maybe negative).
s := loadSchedule(t, id)
s.SetNextRun(th.env.Now().Add(delta))
require.NoError(t, s.Update(context.Background(), th.cfg.InternalExecutor, nil))
}
// We'll be manipulating schedule time via th.env, but we can't fool actual backup
// when it comes to AsOf time. So, override AsOf backup clause to be the current time.
useRealTimeAOST := func() func() {
knobs := th.cfg.TestingKnobs.(*jobs.TestingKnobs)
knobs.OverrideAsOfClause = func(clause *tree.AsOfClause) {
expr, err := tree.MakeDTimestampTZ(th.cfg.DB.Clock().PhysicalTime(), time.Microsecond)
require.NoError(t, err)
clause.Expr = expr
}
return func() {
knobs.OverrideAsOfClause = nil
}
}
// Create backup schedules for this test.
// Returns schedule IDs for full and incremental schedules, plus a cleanup function.
createSchedules := func(t *testing.T, name string) (int64, int64, func()) {
schedules, err := th.createBackupSchedule(t,
"CREATE SCHEDULE FOR BACKUP INTO $1 RECURRING '*/5 * * * *'",
"nodelocal://0/backup/"+name)
require.NoError(t, err)
// We expect full & incremental schedule to be created.
require.Equal(t, 2, len(schedules))
// Order schedules so that the full schedule is the first one
fullID, incID := schedules[0].ScheduleID(), schedules[1].ScheduleID()
if schedules[0].IsPaused() {
fullID, incID = incID, fullID
}
// For the initial backup, we need to ensure that AOST is the current time.
defer useRealTimeAOST()()
// Force full backup to execute (this unpauses incremental).
advanceNextRun(t, fullID, -1*time.Minute)
require.NoError(t, th.executeSchedules())
th.waitForSuccessfulScheduledJob(t, fullID)
// Do the same for the incremental.
advanceNextRun(t, incID, -1*time.Minute)
require.NoError(t, th.executeSchedules())
th.waitForSuccessfulScheduledJob(t, incID)
return fullID,
incID,
func() {
th.sqlDB.Exec(t, "DROP SCHEDULE $1", schedules[0].ScheduleID())
th.sqlDB.Exec(t, "DROP SCHEDULE $1", schedules[1].ScheduleID())
}
}
markOldAndSetSchedulesPolicy := func(
t *testing.T,
fullID, incID int64,
onError jobspb.ScheduleDetails_ErrorHandlingBehavior,
) {
for _, id := range []int64{fullID, incID} {
// Pretend we were down for a year.
s := loadSchedule(t, id)
s.SetNextRun(s.NextRun().Add(-365 * 24 * time.Hour))
// Set onError policy to the specified value.
s.SetScheduleDetails(jobspb.ScheduleDetails{
OnError: onError,
})
require.NoError(t, s.Update(context.Background(), th.cfg.InternalExecutor, nil))
}
}
t.Run("pause", func(t *testing.T) {
fullID, incID, cleanup := createSchedules(t, "pause")
defer cleanup()
markOldAndSetSchedulesPolicy(t, fullID, incID, jobspb.ScheduleDetails_PAUSE_SCHED)
require.NoError(t, th.executeSchedules())
// AOST way in the past causes backup planning to fail. We don't need
// to wait for any jobs, and the schedules should now be paused.
for _, id := range []int64{fullID, incID} {
require.True(t, loadSchedule(t, id).IsPaused())
}
})
metrics := func() *jobs.ExecutorMetrics {
ex, _, err := jobs.GetScheduledJobExecutor(tree.ScheduledBackupExecutor.InternalName())
require.NoError(t, err)
require.NotNil(t, ex.Metrics())
return ex.Metrics().(*backupMetrics).ExecutorMetrics
}()
t.Run("retry", func(t *testing.T) {
fullID, incID, cleanup := createSchedules(t, "retry")
defer cleanup()
markOldAndSetSchedulesPolicy(t, fullID, incID, jobspb.ScheduleDetails_RETRY_SOON)
require.NoError(t, th.executeSchedules())
// AOST way in the past causes backup planning to fail. We don't need
// to wait for any jobs, and the schedule nextRun should be advanced
// a bit in the future.
for _, id := range []int64{fullID, incID} {
require.True(t, loadSchedule(t, id).NextRun().Sub(th.env.Now()) > 0)
}
// We expect that, eventually, both backups would succeed.
defer useRealTimeAOST()()
th.env.AdvanceTime(time.Hour)
initialSucceeded := metrics.NumSucceeded.Count()
require.NoError(t, th.executeSchedules())
testutils.SucceedsSoon(t, func() error {
delta := metrics.NumSucceeded.Count() - initialSucceeded
if delta == 2 {
return nil
}
return errors.Newf("expected 2 backup to succeed, got %d", delta)
})
})
t.Run("reschedule", func(t *testing.T) {
fullID, incID, cleanup := createSchedules(t, "reschedule")
defer cleanup()
markOldAndSetSchedulesPolicy(t, fullID, incID, jobspb.ScheduleDetails_RETRY_SCHED)
require.NoError(t, th.executeSchedules())
// AOST way in the past causes backup planning to fail. We don't need
// to wait for any jobs, and the schedule nextRun should be advanced
// to the next scheduled recurrence.
for _, id := range []int64{fullID, incID} {
s := loadSchedule(t, id)
require.EqualValues(t,
cronexpr.MustParse(s.ScheduleExpr()).Next(th.env.Now()).Round(time.Microsecond),
s.NextRun())
}
// We expect that, eventually, both backups would succeed.
defer useRealTimeAOST()()
th.env.AdvanceTime(25 * time.Hour) // Go to next day to guarantee daily triggers.
initialSucceeded := metrics.NumSucceeded.Count()
require.NoError(t, th.executeSchedules())
testutils.SucceedsSoon(t, func() error {
delta := metrics.NumSucceeded.Count() - initialSucceeded
if delta == 2 {
return nil
}
return errors.Newf("expected 2 backup to succeed, got %d", delta)
})
})
}
|
/*
* Flow CLI
*
* Copyright 2019-2021 Dapper Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services
import (
"testing"
"github.com/onflow/flow-go-sdk/crypto"
"github.com/stretchr/testify/assert"
"github.com/onflow/flow-cli/pkg/flowcli/output"
"github.com/onflow/flow-cli/pkg/flowcli/project"
"github.com/onflow/flow-cli/tests"
)
func TestKeys(t *testing.T) {
mock := &tests.MockGateway{}
proj, err := project.Init(crypto.ECDSA_P256, crypto.SHA3_256)
assert.NoError(t, err)
keys := NewKeys(mock, proj, output.NewStdoutLogger(output.InfoLog))
t.Run("Generate Keys", func(t *testing.T) {
key, err := keys.Generate("", "ECDSA_P256")
assert.NoError(t, err)
assert.Equal(t, len(key.PrivateKey.String()), 66)
})
t.Run("Generate Keys with seed", func(t *testing.T) {
key, err := keys.Generate("aaaaaaaaaaaaaaaaaaaaaaannndddddd_its_gone", "ECDSA_P256")
assert.NoError(t, err)
assert.Equal(t, key.PrivateKey.String(), "0x134f702d0872dba9c7aea15498aab9b2ffedd5aeebfd8ac3cf47c591f0d7ce52")
})
t.Run("Fail generate keys, too short seed", func(t *testing.T) {
_, err := keys.Generate("im_short", "ECDSA_P256")
assert.Equal(t, err.Error(), "failed to generate private key: crypto: insufficient seed length 8, must be at least 32 bytes for ECDSA_P256")
})
t.Run("Fail generate keys, invalid sig algo", func(t *testing.T) {
_, err := keys.Generate("", "JUSTNO")
assert.Equal(t, err.Error(), "invalid signature algorithm: JUSTNO")
})
}
|
package main
import (
"fmt"
"time"
"github.com/aliyun/aliyun-datahub-sdk-go/datahub"
)
func listProjects(dh datahub.DataHubApi) {
projects, err := dh.ListProject()
if err != nil {
fmt.Println(err)
return
}
fmt.Println(projects)
}
func getProject(name string, dh datahub.DataHubApi) {
project, err := dh.GetProject(name)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(project)
fmt.Println("last modify time ", project.LastModifyTime)
}
func createProject(projectName, comment string, dh datahub.DataHubApi) {
if _, err := dh.CreateProject(projectName, comment); err != nil {
fmt.Println(err.Error())
return
}
}
func updateProject(projectName, comment string, dh datahub.DataHubApi) {
dh.UpdateProject(projectName, comment)
}
func deleteProject(projectName string, dh datahub.DataHubApi) {
if _, err := dh.DeleteProject(projectName); err != nil {
fmt.Println(err.Error())
}
fmt.Println("del " + projectName + " suc")
}
func createTupleTopic(projectName, topicName string, dh datahub.DataHubApi) {
recordSchema := datahub.NewRecordSchema()
recordSchema.AddField(datahub.Field{Name: "bigint_field", Type: datahub.BIGINT}).
AddField(datahub.Field{Name: "timestamp_field", Type: datahub.TIMESTAMP}).
AddField(datahub.Field{Name: "string_field", Type: datahub.STRING}).
AddField(datahub.Field{Name: "double_field", Type: datahub.DOUBLE}).
AddField(datahub.Field{Name: "boolean_field", Type: datahub.BOOLEAN})
_, err := dh.CreateTupleTopic(projectName, topicName, "go sdk test topic", 3, 7, recordSchema)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("create topic [" + topicName + "] suc")
if ready := dh.WaitAllShardsReadyWithTime(projectName, topicName, 1); ready {
fmt.Printf("all shard ready? %v\n", ready)
}
}
func createBlobTopic(projectName, topicName string, dh datahub.DataHubApi) {
_, err := dh.CreateBlobTopic(projectName, topicName, "go sdk test topic", 3, 7)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("create topic [" + topicName + "] suc")
}
func getTopic(projectName, topicName string, dh datahub.DataHubApi) {
topic, err := dh.GetTopic(projectName, topicName)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(topic)
}
func listTopics(projectName string, dh datahub.DataHubApi) {
topics, err := dh.ListTopic(projectName)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(topics)
}
func updateTopic(projectName, topicName string, lifecycle int, comment string, dh datahub.DataHubApi) {
_, err := dh.UpdateTopic(projectName, topicName, comment)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("update %s suc\n", topicName)
}
func deleteTopic(projectName, topicName string, dh datahub.DataHubApi) {
_, err := dh.DeleteTopic(projectName, topicName)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("del %s suc\n", topicName)
}
func listShards(projectName, topicName string, dh datahub.DataHubApi) {
shards, err := dh.ListShard(projectName, topicName)
if err != nil {
fmt.Println(err)
return
}
for _, shard := range shards.Shards {
fmt.Println(shard)
}
}
func mergeShard(projectName, topicName, shardId, adjShardId string, dh datahub.DataHubApi) {
newShards, err := dh.MergeShard(projectName, topicName, shardId, adjShardId)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(newShards)
}
func splitShard(projectName, topicName, shardId, splitKey string, dh datahub.DataHubApi) {
newShards, err := dh.SplitShardBySplitKey(projectName, topicName, shardId, splitKey)
if err != nil {
fmt.Println(err)
return
}
for _, shard := range newShards.NewShards {
fmt.Println(shard)
}
}
func getCursor(projectName, topicName, shardId string, ct datahub.CursorType, systemTime int64, dh datahub.DataHubApi) {
cursor, err := dh.GetCursor(projectName, topicName, shardId, ct, systemTime)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(cursor)
}
func putTupleRecords(projectName, topicName string, dh datahub.DataHubApi) {
topic, err := dh.GetTopic(projectName, topicName)
if err != nil {
fmt.Println(err)
return
}
records := make([]datahub.IRecord, 3)
record1 := datahub.NewTupleRecord(topic.RecordSchema, 0)
record1.ShardId = "0"
record1.SetValueByIdx(0, 1)
record1.SetValueByIdx(1, uint(123456))
record1.SetValueByName("string_field", "TEST")
record1.SetValueByName("double_field", 1.0)
record1.SetValueByIdx(4, true)
records[0] = record1
record2 := datahub.NewTupleRecord(topic.RecordSchema, 0)
record2.ShardId = "1"
record2.SetValueByIdx(0, datahub.Bigint(2))
record2.SetValueByIdx(1, datahub.Timestamp(123456))
record2.SetValueByName("string_field", datahub.String("TEST2"))
record2.SetValueByName("double_field", datahub.Double(1.0))
record2.SetValueByIdx(4, datahub.Boolean(true))
records[1] = record2
record3 := datahub.NewTupleRecord(topic.RecordSchema, 0)
record3.ShardId = "2"
record3.SetValueByIdx(0, datahub.Bigint(3))
record3.SetValueByIdx(1, datahub.Timestamp(133456))
record3.SetValueByName("string_field", datahub.String("TEST3"))
record3.SetValueByName("double_field", datahub.Double(1.0))
record3.SetValueByIdx(4, datahub.Boolean(true))
records[2] = record3
result, err := dh.PutRecords(projectName, topicName, records)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(result)
}
func getTupleRecords(projectName, topicName, shardId string, dh datahub.DataHubApi) {
topic, err := dh.GetTopic(projectName, topicName)
if err != nil {
fmt.Println(err)
return
}
cursor, err := dh.GetCursor(projectName, topicName, shardId, datahub.OLDEST)
if err != nil {
fmt.Println(err)
return
}
result, err := dh.GetTupleRecords(projectName, topicName, shardId, cursor.Cursor, 10, topic.RecordSchema)
if err != nil {
fmt.Println(err)
return
}
for _, record := range result.Records {
if br, ok := record.(*datahub.TupleRecord); ok {
// do some tuple record
fmt.Println(br)
}
}
}
func putBlobRecords(projectName, topicName string, dh datahub.DataHubApi) {
records := make([]datahub.IRecord, 3)
record1 := datahub.NewBlobRecord([]byte("blob test1"), 0)
record1.ShardId = "0"
records[0] = record1
record2 := datahub.NewBlobRecord([]byte("blob test2"), 0)
record2.ShardId = "1"
records[1] = record2
record3 := datahub.NewBlobRecord([]byte("blob test3"), 0)
record3.ShardId = "2"
records[2] = record3
result, err := dh.PutRecords(projectName, topicName, records)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(result)
}
func getBlobRecords(projectName, topicName, shardId string, dh datahub.DataHubApi) {
cursor, err := dh.GetCursor(projectName, topicName, shardId, datahub.OLDEST)
if err != nil {
fmt.Println(err)
return
}
result, err := dh.GetBlobRecords(projectName, topicName, shardId, cursor.Cursor, 10)
if err != nil {
fmt.Println(err)
return
}
for _, record := range result.Records {
if br, ok := record.(*datahub.BlobRecord); ok {
// do some blob record
fmt.Println(br)
}
}
}
func createSubscription(projectName, topicName, comment string, dh datahub.DataHubApi) string {
subId, err := dh.CreateSubscription(projectName, topicName, comment)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("create subscription, id: " + subId.SubId)
}
return subId.SubId
}
func updateSubscription(projectName, topicName, subId, comment string, dh datahub.DataHubApi) {
_, err := dh.UpdateSubscription(projectName, topicName, subId, comment)
if err != nil {
fmt.Println("update subscription error: " + err.Error())
} else {
fmt.Println("update subscription, id: " + subId)
}
}
func updateSubscriptionState(projectName, topicName, subId string, state datahub.SubscriptionState, dh datahub.DataHubApi) {
_, err := dh.UpdateSubscriptionState(projectName, topicName, subId, state)
if err != nil {
fmt.Println(err)
} else {
fmt.Println("update subscription state, id: " + subId)
}
}
func getSubscription(projectName, topicName, subId string, dh datahub.DataHubApi) {
subscription, err := dh.GetSubscription(projectName, topicName, subId)
if err != nil {
fmt.Println(err)
return
}
fmt.Println(subscription)
}
func deleteSubscription(projectName, topicName, subId string, dh datahub.DataHubApi) {
_, err := dh.DeleteSubscription(projectName, topicName, subId)
if err != nil {
fmt.Println(err)
return
}
fmt.Println("del subscription [" + subId + "] suc")
}
func listSubscriptions(projectName, topicName string, dh datahub.DataHubApi) {
subscriptions, err := dh.ListSubscription(projectName, topicName, 1, 5)
if err != nil {
fmt.Println(err)
return
}
for _, subscription := range subscriptions.Subscriptions {
fmt.Println(subscription)
}
}
func main() {
accessId := "your access id"
accessKey := "your access key"
endpoint := "the datahub server endpoint"
projectName := "your project name"
dh := datahub.New(accessId, accessKey, endpoint)
// list projects
listProjects(dh)
// create project
createProject(projectName, "comment", dh)
// get project
getProject(projectName, dh)
// update project
updateProject(projectName, "new comment", dh)
// get project
getProject(projectName, dh)
// create tuple topic
createTupleTopic(projectName, "go_sdk_tuple_topic_test_v2", dh)
// create blob topic
createBlobTopic(projectName, "go_sdk_blob_topic_test_v2", dh)
// list topics
listTopics(projectName, dh)
// get topic
getTopic(projectName, "go_sdk_tuple_topic_test_v2", dh)
// list shards
listShards(projectName, "go_sdk_tuple_topic_test_v2", dh)
// update topic
updateTopic(projectName, "go_sdk_tuple_topic_test_v2", 5, "update test", dh)
getTopic(projectName, "go_sdk_tuple_topic_test_v2", dh)
// put records
putTupleRecords(projectName, "go_sdk_tuple_topic_test_v2", dh)
putBlobRecords(projectName, "go_sdk_blob_topic_test_v2", dh)
// get records
fmt.Println("=======================")
getTupleRecords(projectName, "go_sdk_tuple_topic_test_v2", "0", dh)
fmt.Println("=======================")
getBlobRecords(projectName, "go_sdk_blob_topic_test_v2", "0", dh)
fmt.Println("=======================")
// split shard
time.Sleep(time.Second * 5)
splitShard(projectName, "go_sdk_tuple_topic_test_v2", "1", "88888888888888888888888888888888", dh)
listShards(projectName, "go_sdk_tuple_topic_test_v2", dh)
// merge shard
time.Sleep(time.Second * 5)
mergeShard(projectName, "go_sdk_tuple_topic_test_v2", "3", "4", dh)
listShards(projectName, "go_sdk_tuple_topic_test_v2", dh)
// create subscription
subId := createSubscription(projectName, "go_sdk_tuple_topic_test_v2", "comment", dh)
// get subscription
getSubscription(projectName, "go_sdk_tuple_topic_test_v2", subId, dh)
// update subscription
updateSubscription(projectName, "go_sdk_tuple_topic_test_v2", subId, "new comment", dh)
// list subscriptions
listSubscriptions(projectName, "go_sdk_tuple_topic_test_v2", dh)
// update subscription state
updateSubscriptionState(projectName, "go_sdk_tuple_topic_test_v2", subId, datahub.SUB_OFFLINE, dh)
// get subscription
getSubscription(projectName, "go_sdk_tuple_topic_test_v2", subId, dh)
//delete subscription
deleteSubscription(projectName, "go_sdk_tuple_topic_test_v2", subId, dh)
// delete topic
deleteTopic(projectName, "go_sdk_tuple_topic_test_v2", dh)
deleteTopic(projectName, "go_sdk_blob_topic_test_v2", dh)
//delete project
deleteProject(projectName, dh)
}
|
package main
import (
"net/http"
"github.com/gorilla/mux"
"github.com/muslim-teachings/api-orchestrator/src/main/controllers"
// "github.com/muslim-teachings/api-orchestrator/src/main/controllers"
// "github.com/muslim-teachings/api-orchestrator/src/main/middleware"
)
const (
// POST methods
POST = "POST"
// PUT handlers
PUT = "PUT"
// GET hanlders
GET = "GET"
// DELETE handlers
DELETE = "DELETE"
)
func main() {
// Set the router as the default one shipped with Gin
router := mux.NewRouter()
// router.NotFoundHandler = Handle404()
{
router.HandleFunc("/teachings", controllers.GetTeachings).Methods(GET)
}
{
router.HandleFunc("/quran", controllers.GetQuran).Methods(GET)
}
http.ListenAndServe(":9000", router)
}
|
package docs
import (
"github.com/gregoryv/draw"
"github.com/gregoryv/draw/design"
"github.com/gregoryv/draw/shape"
)
func ExampleClassDiagram() *design.ClassDiagram {
var (
d = design.NewClassDiagram()
record = d.Struct(shape.Record{})
arrow = d.Struct(shape.Arrow{})
line = d.Struct(shape.Line{})
circle = d.Struct(shape.Circle{})
diaarrow = d.Struct(shape.Diamond{})
triangle = d.Struct(shape.Triangle{})
shapE = d.Interface((*shape.Shape)(nil))
)
d.HideRealizations()
var (
fnt = d.Struct(shape.Font{})
style = d.Struct(draw.Style{})
seqdia = d.Struct(design.SequenceDiagram{})
classdia = d.Struct(design.ClassDiagram{})
dia = d.Struct(design.Diagram{})
aligner = d.Struct(shape.Aligner{})
adj = d.Struct(shape.Adjuster{})
rel = d.Struct(design.Relation{})
)
d.HideRealizations()
d.Place(shapE).At(220, 20)
d.Place(record).At(20, 120)
d.Place(line).Below(shapE, 90)
d.VAlignCenter(shapE, line)
d.Place(arrow).RightOf(line, 90)
d.Place(circle).RightOf(shapE, 280)
d.Place(diaarrow).Below(circle)
d.Place(triangle).Below(diaarrow)
d.HAlignBottom(record, arrow, line)
shape.Move(line, 30, 30)
d.Place(fnt).Below(record, 170)
d.Place(style).RightOf(fnt, 90)
d.VAlignCenter(shapE, line, style)
d.VAlignCenter(record, fnt)
d.Place(rel).Below(line, 80)
d.Place(dia).RightOf(style, 90)
d.Place(aligner).RightOf(dia, 80)
d.HAlignCenter(fnt, style, dia, aligner)
d.Place(adj).Below(fnt, 70)
d.Place(seqdia).Below(aligner, 90)
d.Place(classdia).Below(dia, 90)
d.VAlignCenter(dia, classdia)
d.HAlignBottom(classdia, seqdia)
d.SetCaption("Figure 1. Class diagram of design and design.shape packages")
return d
}
|
package scaffold
import (
"bytes"
"fmt"
"github.com/shurcooL/httpfs/vfsutil"
log "github.com/sirupsen/logrus"
tmpl "github.com/snowdrop/generator/pkg/template"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"text/template"
)
const (
dummyDirName = "dummy"
allVersionsSelector = "_all_"
)
var (
// Store template in map by name then version. If there are no version for a given template name, then the template applies
// to all versions.
templates = make(templateRegistry)
simplifiedVersionRegexp = regexp.MustCompile("^((\\d+.\\d+)(.\\d+)?)")
)
type versionRegistry map[string][]*template.Template
type templateRegistry map[string]versionRegistry
func (vr versionRegistry) getTemplatesFor(version string) []*template.Template {
return vr[version]
}
func (vr versionRegistry) addTemplate(version, path string) error {
templates := vr.getTemplatesFor(version)
if templates == nil {
templates = make([]*template.Template, 0, 20)
}
// Create a new Template using the File name as key and add it to the array
t := template.New(path)
// Read Template's content
data, err := vfsutil.ReadFile(tmpl.Assets, path)
if err != nil {
return err
}
t, err = t.Parse(bytes.NewBuffer(data).String())
if err != nil {
return err
}
templates = append(templates, t)
vr[version] = templates
return nil
}
func (tr templateRegistry) getTemplatesFor(name, version string) ([]*template.Template, string) {
log.Infof("Retrieving templates for project template '%s' with version '%s'", name, version)
// extract simplified Spring Boot version from project
simplifiedVersion := allVersionsSelector
majorVersion := ""
matches := simplifiedVersionRegexp.FindStringSubmatch(version)
if matches != nil {
simplifiedVersion = matches[1]
majorVersion = matches[2]
}
// first check if we have templates for this version
effectiveVersion := simplifiedVersion
if versions, ok := tr[name]; ok {
templates := versions.getTemplatesFor(simplifiedVersion)
if templates == nil {
log.Infof("No templates were found for exact version '%s' (converted to simplified version: '%s'), attempting major version '%s'", version, simplifiedVersion, majorVersion)
templates = versions.getTemplatesFor(majorVersion)
effectiveVersion = majorVersion
if templates == nil {
log.Infof("No templates were found for major version '%s', attempting default version", majorVersion)
templates = versions.getTemplatesFor(allVersionsSelector)
effectiveVersion = "" // if we used the default version selector, return empty string
}
}
return templates, effectiveVersion
}
return nil, ""
}
func (tr templateRegistry) addTemplate(path string) error {
// first, extract name and version from path
name, version := extractNameAndVersion(path)
// check if we already have a versions map for this template or create it otherwise
versions, ok := templates[name]
if !ok {
versions = make(versionRegistry)
templates[name] = versions
}
log.Debugf("Adding template %s, version: %s, path: %s", name, version, path)
return versions.addTemplate(version, path)
}
func extractNameAndVersion(path string) (name, version string) {
split := strings.Split(path, string(filepath.Separator))
name = split[1] // split[0] is empty because path starts with a separator
potentialVersion := split[2]
// check if the second hierarchy level match a version
if simplifiedVersionRegexp.MatchString(potentialVersion) {
version = potentialVersion
} else {
// otherwise, use the all version selector as version
version = allVersionsSelector
}
return name, version
}
func CollectVfsTemplates() {
walkFn := func(path string, fi os.FileInfo, err error) error {
if err != nil {
log.Printf("can't stat file %s: %v\n", path, err)
return err
}
if fi.IsDir() {
return nil
}
return templates.addTemplate(path)
}
errW := vfsutil.Walk(tmpl.Assets, "/", walkFn)
if errW != nil {
panic(errW)
}
}
func ParseSelectedTemplate(project *Project, dir string, outDir string) (string, error) {
templatesFor, effectiveVersion := templates.getTemplatesFor(project.Template, project.SpringBootVersion)
if templatesFor == nil {
return effectiveVersion, fmt.Errorf("'%s' template is not supported for '%s' Spring Boot version", project.Template, project.SpringBootVersion)
}
for _, t := range templatesFor {
log.Infof("Processed template : %s", t.Name())
var b bytes.Buffer
// Enrich project with dependencies if they exist
if strings.Contains(t.Name(), "pom.xml") {
if project.Modules != nil {
addDependenciesToModule(config.Modules, project)
}
}
// Remove duplicate's dependencies from modules
project.Dependencies = RemoveDuplicates(project.Modules)
if log.GetLevel() == log.InfoLevel {
for _, dep := range project.Dependencies {
log.Infof("Dependency : %s-%s-%s", dep.GroupId, dep.ArtifactId, dep.Version)
}
}
// Use template to generate the content
err := t.Execute(&b, project)
if err != nil {
log.Error(err.Error())
}
// Convert Path
tFileName := t.Name()
pathF := strings.Join([]string{dir, outDir, path.Dir(tFileName)}, "/")
log.Debugf("## Path : %s", pathF)
pathConverted := strings.Replace(pathF, dummyDirName, convertPackageToPath(project.PackageName), -1)
log.Debugf("Path converted: %s", pathF)
// Convert FileName
fileName := strings.Join([]string{dir, outDir, tFileName}, "/")
log.Debugf("## File name : %s", fileName)
fileNameConverted := strings.Replace(fileName, dummyDirName, convertPackageToPath(project.PackageName), -1)
log.Debugf("File name converted : %s", fileNameConverted)
// Create missing folders
log.Debugf("Path to generated file : %s", pathConverted)
os.MkdirAll(pathConverted, os.ModePerm)
// Content generated
log.Debugf("Content generated : %s", b.Bytes())
err = ioutil.WriteFile(fileNameConverted, b.Bytes(), 0644)
if err != nil {
log.Error(err.Error())
}
}
log.Infof("Enriched project %+v", project)
return effectiveVersion, nil
}
func RemoveDuplicates(mods []Module) []Dependency {
keys := make(map[string]bool)
list := []Dependency{}
for _, mod := range mods {
for _, dep := range mod.Dependencies {
gav := strings.Join([]string{dep.GroupId, dep.ArtifactId, dep.Version}, "-")
if _, value := keys[gav]; !value {
keys[gav] = true
list = append(list, dep)
}
}
}
return list
}
func addDependenciesToModule(configModules []Module, project *Project) {
for _, configModule := range configModules {
for i, pModule := range project.Modules {
if configModule.Name == pModule.Name {
// check if the module is available for the project's requested BOM
sbVersion := project.SpringBootVersion
if configModule.IsAvailableFor(sbVersion) {
log.Infof("Match found for project's module %s and modules %+v ", pModule.Name, configModule)
project.Modules[i].Dependencies = configModule.Dependencies
project.Modules[i].DependencyManagement = configModule.DependencyManagement
} else {
log.Infof("Ignoring module %s matching an existing module not available for SB version %s", pModule.Name, sbVersion)
}
}
}
}
}
func convertPackageToPath(p string) string {
c := strings.Replace(p, ".", "/", -1)
log.Debugf("Converted path : %s", c)
return c
}
|
package main
import (
"fmt"
//"time"
// mysql connector
_ "github.com/go-sql-driver/mysql"
sqlx "github.com/jmoiron/sqlx"
)
const (
StudentID = "18307130112"
User = "root"
Password = "123456"
DBName = "ass3"
AdminInit = "1"
AdminInitPassword = "a"
)
type Library struct {
db *sqlx.DB
}
func (lib *Library) ConnectDB() {
db, err := sqlx.Open("mysql", fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)/%s", User, Password, DBName))
if err != nil {
panic(err)
}
lib.db = db
}
func mustExecute(db *sqlx.DB, SQLs []string) {
for _, s := range SQLs {
_, err := db.Exec(s)
if err != nil {
panic(err)
}
}
}
// CreateTables created the tables in MySQL
func (lib *Library) CreateTables() error {
mustExecute(lib.db,[]string{"DROP TABLE IF EXISTS Admins","DROP TABLE IF EXISTS Student","DROP TABLE IF EXISTS Book","DROP TABLE IF EXISTS BorrowedBook","DROP TABLE IF EXISTS BorrowHistory"});
mustExecute(lib.db, []string{
"CREATE TABLE IF NOT EXISTS Admins (id CHAR(11) NOT NULL,password CHAR(15) NOT NULL, primary key(id));",
"CREATE TABLE IF NOT EXISTS Student (id CHAR(11), password CHAR(15), borrowright INT, primary key(id));",
"CREATE TABLE IF NOT EXISTS Book (title CHAR(32), author CHAR(20), isbn CHAR(13), bookid CHAR(15), borrowflag INT, primary key(bookid));",
"CREATE TABLE IF NOT EXISTS BorrowedBook (isbn CHAR(13), studentid CHAR(11), bookid CHAR(15), rettime DATE, extendtimes INT, primary key(bookid));",
"CREATE TABLE IF NOT EXISTS BorrowHistory (isbn CHAR(13), studentid CHAR(11), bookid CHAR(15), rettime DATE, brwtime DATE);",
})
return nil
}
//test init
func (lib *Library) init() {
mustExecute(lib.db,[]string{"DELETE FROM Admins","DELETE FROM Student","DELETE FROM Book","DELETE FROM BorrowedBook","DELETE FROM BorrowHistory"});
_, _ = lib.db.Exec("INSERT INTO Book(isbn, author, title, bookid, borrowflag) " +
"VALUES(\"isbn0001\", \"author01\", \"title001\", \"isbn000101\", 1), " +
"(\"isbn0001\", \"author01\", \"title001\", \"isbn000102\", 0), " +
"(\"isbn0002\", \"author02\", \"title002\", \"isbn000201\", 1), " +
"(\"isbn0003\", \"author01\", \"title003\", \"isbn000301\", 0), " +
"(\"isbn0004\", \"author04\", \"title003\", \"isbn000401\", 1), " +
"(\"isbn0005\", \"author05\", \"title003\", \"isbn000501\", 1), " +
"(\"isbn0006\", \"author06\", \"title003\", \"isbn000601\", 1)")
_, _ = lib.db.Exec("INSERT INTO Student(id, password, borrowright) " +
"VALUES(\"stu01\", \"123456\", 1), (\"stu02\", \"147258\", 1), (\"stu03\", \"147258\", 0), (\"stu04\", \"147258\", 1)")
_, _ = lib.db.Exec(fmt.Sprintf("INSERT INTO Admins(id, password) " +
"VALUES(\"12345678\", \"asdfghjk\"), (\"%s\", \"%s\")", AdminInit, AdminInitPassword),)
_, _ = lib.db.Exec("INSERT INTO BorrowedBook(isbn, studentid, bookid, rettime, extendtimes) " +
"VALUES(\"isbn0001\", \"stu01\", \"isbn000101\", '2020-06-01', 3), " +
"(\"isbn0002\", \"stu02\", \"isbn000201\", '2020-06-01', 0)," +
" (\"isbn0004\", \"stu03\", \"isbn000401\", '2020-05-01', 0), " +
"(\"isbn0005\", \"stu03\", \"isbn000501\", '2020-05-01', 0), " +
"(\"isbn0006\", \"stu03\", \"isbn000601\", '2020-05-01', 0)")
_, _ = lib.db.Exec("INSERT INTO BorrowHistory(isbn, studentid, bookid, rettime, brwtime) " +
"VALUES(\"isbn0001\", \"stu01\", \"isbn000101\", '2020-06-01', '2020-02-01')")
_, _ = lib.db.Exec("INSERT INTO BorrowHistory(isbn, studentid, bookid, rettime, brwtime) " +
"VALUES(\"isbn0002\", \"stu02\", \"isbn000201\", '2020-06-01', '2020-05-01')")
_, _ = lib.db.Exec("INSERT INTO BorrowHistory(isbn, studentid, bookid, rettime, brwtime) " +
"VALUES(\"isbn0002\", \"stu01\", \"isbn000201\", '2020-04-01', '2020-03-01')")
_, _ = lib.db.Exec("INSERT INTO BorrowHistory(isbn, studentid, bookid, rettime, brwtime) " +
"VALUE(\"isbn0004\", \"stu03\", \"isbn000401\", '2020-05-01', '2020-04-01'), " +
"(\"isbn0005\", \"stu03\", \"isbn000501\", '2020-05-01', '2020-04-01'), " +
"(\"isbn0006\", \"stu03\", \"isbn000601\", '2020-05-01', '2020-04-01')")
}
//Query all the student and administrator
func (lib *Library) QueryALLUser() error {
rows, err := lib.db.Query("Select * From Admins order by id asc")
defer rows.Close()
if err != nil {
panic(err)
}
count := 0
for rows.Next(){
var id, pass string
rows.Scan(&id, &pass)
count += 1
fmt.Println(fmt.Sprintf("Administrator %d: %s",count, id))
}
rows, err = lib.db.Query("Select * From Student order by id asc")
if err != nil {
panic(err)
}
count = 0
for rows.Next(){
var id, pass string
var flag int
rows.Scan(&id, &pass, &flag)
count += 1
stat := "can"
if flag == 0{
stat = "cannot"
}
fmt.Println(fmt.Sprintf("Student %d: %s, he/she %s borrow books.",count, id, stat))
}
return nil
}
//Query all the books
func (lib *Library) QueryALLBook() error {
rows, err := lib.db.Query("Select * From Book order by bookid asc")
defer rows.Close()
if err != nil {
panic(err)
}
for rows.Next(){
var tit, aut, isb, bid string
var bfg int
rows.Scan(&tit, &aut, &isb, &bid, &bfg)
stat := "is"
if bfg == 0{
stat = "isn`t"
}
fmt.Println(fmt.Sprintf("title: %s, author: %s, ISBN: %s, bookid: %s, the book %s borrowed.",tit, aut, isb, bid, stat))
}
return nil
}
//Query all the borrowing books
func (lib *Library) QueryALLBorrowing() error {
rows, err := lib.db.Query("Select * From BorrowedBook order by bookid asc")
defer rows.Close()
if err != nil {
panic(err)
}
for rows.Next(){
var isb, sid, bid, d1 string
var ett int
rows.Scan(&isb, &sid, &bid, &d1, &ett)
fmt.Println(fmt.Sprintf("ISBN: %s, studentid: %s, bookid: %s, extending times: %d, expected return date: %s", isb, sid, bid, ett, d1))
}
return nil
}
//Query all the borrow history
func (lib *Library) QueryALLBorrowHis() error {
rows, err := lib.db.Query("Select * From BorrowHistory ORDER BY BOOKID ASC")
defer rows.Close()
if err != nil {
panic(err)
}
for rows.Next(){
var isb, sid, bid, d1, d2 string
rows.Scan(&isb, &sid, &bid, &d1, &d2)
fmt.Println(fmt.Sprintf("ISBN: %s, studentid: %s, bookid: %s, borrow date: %s, expected return/returned date: %s", isb, sid, bid, d2, d1))
}
return nil
}
//check admin account
func (lib *Library) CheckAdmin(id, password string) bool {
rows, err := lib.db.Query("SELECT password FROM Admins WHERE id = ?", id)
if err != nil {
panic(err)
}
if rows.Next() {
var k string
rows.Scan(&k)
if password == k {
return true
}
}
return false
}
//check student account
func (lib *Library) CheckStudent(id, password string) bool{
rows, err := lib.db.Query("SELECT password FROM Student WHERE id = ?", id)
if err != nil {
panic(err)
}
if rows.Next() {
var k string
rows.Scan(&k)
if password == k {
return true
}
}
return false
}
//add administation user
func (lib *Library) AddAdm(id, password string) (int, error) {
rows, err := lib.db.Query("select * from Admins where id = ?", id)
defer rows.Close()
if err != nil {
panic(err)
}
if rows.Next(){
fmt.Println("This administrator id has already had its account.")
return 0, nil
}
_, _ = lib.db.Exec(fmt.Sprintf("INSERT INTO Admins(id, password) value (\"%s\",\"%s\")", id, password))
return 1, nil
}
//add student account by the administrator's account
func (lib *Library) AddStu(id, password string) (int, error) {
rows, err := lib.db.Query("select * from Student where id = ?", id)
defer rows.Close()
if err != nil {
panic(err)
}
if rows.Next(){
fmt.Println("This student id has already had its account.")
return 0, nil
}
_, _ = lib.db.Exec(fmt.Sprintf("INSERT INTO Student(id, password, borrowright) value (\"%s\", \"%s\", \"%d\")", id, password, 1))
return 1, nil
}
// AddBook add a book into the library
func (lib *Library) AddBook(title, author, ISBN string) error {
rows, err := lib.db.Query("SELECT * From Book where isbn = ?", ISBN)
defer rows.Close()
if err != nil {
panic(err)
}
count := 1
for rows.Next(){
count += 1
}
st := ""
if count <= 10{
st = fmt.Sprintf("%s0%d", ISBN, count)
}else{
st = fmt.Sprintf("%s%d", ISBN, count)
}
_, _ = lib.db.Exec("INSERT INTO BOOK(title, author, isbn, bookid, borrowflag) value (?,?,?,?,?)", title, author, ISBN, st, 0)
return nil
}
//remove a book from the library with explanation (e.g. book is lost)
func (lib *Library) RemoveBook(bookid string) error {
_, _ = lib.db.Exec("DELETE From Book where bookid = ?", bookid)
_, _ = lib.db.Exec("DELETE From BorrowedBook where bookid = ?", bookid)
return nil
}
//suspend student's account if the student has more than 3 overdue books
func (lib *Library) Checkborrow(studentid string) bool {
rows, err := lib.db.Query("Select * From BorrowedBook where studentid = ? and rettime < CURRENT_DATE()", studentid)
defer rows.Close()
if err != nil {
panic(err)
}
count := 0
for rows.Next(){
count += 1
}
if count < 3{
return true
}
_, _ = lib.db.Exec("Update Student set borrowright = 0 where studentid = ?", studentid)
return false
}
//borrow a book from the library with a student account
func (lib *Library) BorrowBook(isbn, studentid string) (int, error) {
stat := lib.Checkborrow(studentid)
if stat == false{
fmt.Println("You need to return the overdue books before borrowing new one!")
return 1, nil
}
rows, err := lib.db.Query("Select * From BorrowedBook where studentid = ?", studentid)
if err != nil {
panic(err)
}
for rows.Next(){
var isb, sid, bid, rtt string
var ett int
rows.Scan(&isb, &sid, &bid, &rtt, &ett)
if isb == isbn {
fmt.Printf("You have already borrowed this book!\n")
return 2, nil
}
}
rows, err = lib.db.Query("Select * From Book where isbn = ?", isbn)
defer rows.Close()
if err != nil {
panic(err)
}
for rows.Next(){
var ttl, aut, isb, bid string
var bfg int
rows.Scan(&ttl, &aut, &isb, &bid, &bfg)
if bfg == 1 {
continue
}else {
_, _ = lib.db.Exec("INSERT INTO BorrowedBook(isbn, studentid, bookid, rettime, extendtimes) VALUES(?, ?, ?, date_add(CURRENT_DATE(), interval 1 month), 0)", isb, studentid, bid)
_, _ = lib.db.Exec("INSERT INTO BorrowHistory(isbn, studentid, bookid, rettime, brwtime) VALUES(?, ?, ?, date_add(CURRENT_DATE(), interval 1 month), CURRENT_DATE())", isb, studentid, bid)
_, _ = lib.db.Exec("update Book set borrowflag = 1 where bookid = ?", bid)
fmt.Printf("You have Succeeded Borrowing this book!\n")
return 0, nil
}
}
fmt.Printf("Fail Borrowing! Book doesn`t exist or all the same books have been borrowed.\n")
return 3, nil
}
//query books by title, author or ISBN
func (lib *Library) QueryBook(bookinfo, swit string) (int, error) {
rows, err := lib.db.Query("Select * From Book where isbn = ?", bookinfo)
if swit == "0" {
rows, err = lib.db.Query("Select * From Book where title = ?", bookinfo)
} else if swit == "1" {
rows, err = lib.db.Query("Select * From Book where author = ?", bookinfo)
}
defer rows.Close()
if err != nil {
panic(err)
}
count := 0
for rows.Next() {
var ttl, aut, isb, bid string
var bfg int
rows.Scan(&ttl, &aut, &isb, &bid, &bfg)
state := "is"
if bfg == 0 {
state = "isn`t"
}
fmt.Println(fmt.Sprintf("title: %s, author: %s, ISBN: %s, bookid: %s, the book %s borrowed.", ttl, aut, isb, bid, state))
count += 1
}
if count != 0{
fmt.Println(fmt.Sprintf("The number of valid infomation is %d.", count))
return 0, nil
}
fmt.Println("The book you search is inexistent.")
return 1, nil
}
//query the borrow history of a student account
func (lib *Library) QueryHis(studentid string) (int, error) {
rows, err := lib.db.Query("Select * From BorrowHistory where studentid = ?", studentid)
defer rows.Close()
if err != nil {
panic(err)
}
count := 0
for rows.Next(){
count += 1
var isb, sid, bid, d1, d2 string
rows.Scan(&isb, &sid, &bid, &d1, &d2)
fmt.Println(fmt.Sprintf("ISBN: %s, studentid: %s, bookid: %s, borrow date: %s, expected return/returned date: %s", isb, sid, bid, d2, d1))
}
if count == 0{
fmt.Println(fmt.Sprintf("The student %s hasn`t borrowed any book yet.", studentid))
return 1, nil
}else{
fmt.Println("All information is as above.")
return 0, nil
}
}
//query the books a student has borrowed and not returned yet
func (lib *Library) QueryBorrowing(studentid string) (int, error) {
rows, err := lib.db.Query("Select * From BorrowedBook where studentid = ?", studentid)
defer rows.Close()
if err != nil {
panic(err)
}
count := 0
for rows.Next(){
count += 1
var isb, sid, bid, d1 string
var ett int
rows.Scan(&isb, &sid, &bid, &d1, &ett)
fmt.Println(fmt.Sprintf("ISBN: %s, studentid: %s, bookid: %s, extending times: %d, expected return date: %s", isb, sid, bid, ett, d1))
}
if count == 0{
fmt.Println(fmt.Sprintf("The student %s isn`t borrowing any book now.", studentid))
return 1, nil
}else{
fmt.Println("All information is as above.")
return 0, nil
}
}
//check the deadline of returning a borrowed book
func (lib *Library) CheckDDL(studentid, isbn string) (int, error) {
rows, err := lib.db.Query("Select * From BorrowedBook where studentid = ?", studentid)
defer rows.Close()
if err != nil {
panic(err)
}
for rows.Next(){
var isb, sid, bid, d1 string
var ett int
rows.Scan(&isb, &sid, &bid, &d1, &ett)
if isb == isbn {
fmt.Printf(fmt.Sprintf("Deadline of returning book(ISBN code: %s) is %s.\n", isb, d1))
return 0, nil
}
}
fmt.Printf(fmt.Sprintf("You didn`t borrow this book.\n"))
return 1, nil
}
//extend the deadline of returning a book, at most 3 times
func (lib *Library) ExtendDDL(studentid, isbn string) (int, error) {
rows, err := lib.db.Query("Select * From BorrowedBook where studentid = ?", studentid)
defer rows.Close()
if err != nil {
panic(err)
}
var isb, sid, bid, d1 string
var ett int
for rows.Next() {
rows.Scan(&isb, &sid, &bid, &d1, &ett)
if isb == isbn {
break
}
}
if isb != isbn {
fmt.Printf("You didn`t borrow this book!\n")
return 1, nil
}else if ett >= 3{
fmt.Printf("You have extend this book for 3 times!\n")
return 2, nil
}else{
_, _ = lib.db.Exec("Update BorrowedBook SET rettime = date_add(rettime, interval 1 month), extendtimes += 1 where studentid = ? and isbn = ?",studentid, isbn)
_, _ = lib.db.Exec("Update BorrowedHistory SET rettime = rettime = date_add(rettime, interval 1 month) where studentid = ? and isbn = ?",studentid, isbn)
fmt.Printf("Deadline of returning book is extended.\n")
return 0, nil
}
}
//check if a student has any overdue books that needs to be returned
func (lib *Library) CheckOver(studentid string) (int, error) {
rows, err := lib.db.Query("Select * From BorrowedBook where studentid = ? and rettime < CURRENT_DATE()", studentid)
defer rows.Close()
if err != nil {
panic(err)
}
count := 0
for rows.Next(){
var isb, sid, bid, d1 string
var ett int
rows.Scan(&isb, &sid, &bid, &d1, &ett)
fmt.Printf("Book which ISBN = %s is overdue.\n", isb)
count += 1
}
if count == 0{
fmt.Printf("The student %s doesn`t have overdue books.\n", studentid)
return 1, nil
}
return 0, nil
}
//return a book to the library by a student account
func (lib *Library) RetBook(isbn, studentid string) (int, error) {
rows, err := lib.db.Query("Select * From BorrowedBook where studentid = ?", studentid)
defer rows.Close()
if err != nil {
panic(err)
}
for rows.Next(){
var isb, sid, bid, d1 string
var ett int
rows.Scan(&isb, &sid, &bid, &d1, &ett)
if isb != isbn {
continue
}
_, _ = lib.db.Exec("delete from BorrowedBook where bookid = ?", bid)
_, _ = lib.db.Exec("update Book set borrowflag = 0 where bookid = ?", bid)
_, _ = lib.db.Exec("update BorrowHistory set rettime = CURRENT_DATE() where bookid = ?", bid)
fmt.Printf("Succeed Returning\n")
return 0, nil
}
fmt.Printf("The student did not borrow this book!\n")
return 1, nil
}
func main() {
fmt.Println("Welcome to the Library Management System!")
var lib Library
lib.ConnectDB()
lib.CreateTables()
lib.init()
var initid, initpass string
Menu:= []string{"1: Add Administrator (Admin Only)",
"2: Add Student (Admin Only)",
"3: Add Book (Admin Only)",
"4: Remove Book (Admin Only)",
"5: Borrow Book (Student Only)",
"6: Query Book",
"7: Query Student`s Borrowing History",
"8: Query Student`s Borrowing Books",
"9: Check the Deadline of Returing Book (Student Only)",
"10: Extend the Deadline of Returing Book (Student Only)",
"11: Query If a Student Has Overdue Books",
"12: Return Book (Student Only)",
"13: Check and Suspend Student's Account (Admin Only)",
"14: Check all the administrators and students (Admin Only)",
"15: Check all the book infomation (Admin Only)",
"16: Check all the borrowing book infomation (Admin Only)",
"17: Check all the borrow history (Admin Only)",
"0: Exit Account"}
fmt.Println("Please enter the activation account:")
fmt.Scanln(&initid)
fmt.Println("Please enter the activation password:")
fmt.Scanln(&initpass)
if initid != AdminInit || initpass != AdminInitPassword{
fmt.Println("The account or password is invalid, please restart the program.")
} else {
for true {
var modetest, mode string
flag := false
fmt.Println("Please enter the usage mode:\n 0: Student mode 1: Administrator mode 2: Exit")
fmt.Scanln(&modetest)
if modetest == "2" {
fmt.Println("Thank you for using the Library Management System of Fairy Union of Defense and Attack Nebula University!")
break
}else if modetest == "1"{
fmt.Println("Please enter the account:")
fmt.Scanln(&initid)
fmt.Println("Please enter the password:")
fmt.Scanln(&initpass)
if lib.CheckAdmin(initid, initpass) == false{
fmt.Println("The account or password is invalid.")
}else{
flag = true
}
}else if modetest == "0"{
fmt.Println("Please enter the account:")
fmt.Scanln(&initid)
fmt.Println("Please enter the password:")
fmt.Scanln(&initpass)
if lib.CheckStudent(initid, initpass) == false {
fmt.Println("The account or password is invalid.")
}else{
flag = true
}
}else{
fmt.Println("Mode is inexistent.")
}
for flag {
for _, putstring := range Menu {
fmt.Println(putstring)
}
fmt.Println("Please enter the function mode:")
fmt.Scanln(&mode)
if mode == "0" {
break
}
switch mode {
case "1":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
for true {
var id, pass, repass string
fmt.Println("Please enter the account number (11 digits):")
fmt.Scanln(&id)
fmt.Println("Please enter the password (no more than 15 characters):")
fmt.Scanln(&pass)
fmt.Println("Please enter the password again:")
fmt.Scanln(&repass)
if pass != repass {
fmt.Println("The password entered twice is different, please re-enter:")
} else {
lib.AddAdm(id, pass)
fmt.Println("The account is created!")
break
}
}
}
case "2":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
for true {
var id, pass, repass string
fmt.Println("Please enter the account number (11 digits):")
fmt.Scanln(&id)
fmt.Println("Please enter the password (no more than 15 characters):")
fmt.Scanln(&pass)
fmt.Println("Please enter the password again:")
fmt.Scanln(&repass)
if pass != repass {
fmt.Println("The password entered twice is different, please re-enter:")
} else {
lib.AddStu(id, pass)
fmt.Println("The account is created!")
break
}
}
}
case "3":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
var title, author, isbn string
fmt.Println("Please enter the book title, author, and isbn code, separated by newlines:")
fmt.Scanln(&title)
fmt.Scanln(&author)
fmt.Scanln(&isbn)
lib.AddBook(title, author, isbn)
}
case "4":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
var bookid string
fmt.Println("Please enter the bookid code:")
fmt.Scanln(&bookid)
lib.RemoveBook(bookid)
}
case "5":
if modetest == "1" {
fmt.Println("You do not have permission to execute this command.")
} else {
var bookinfo string
fmt.Println("Please enter ISBN code of the book that you want:")
fmt.Scanln(&bookinfo)
lib.BorrowBook(bookinfo, initid)
}
case "6":
var searchcode, bookinfo string
fmt.Println("You can search the infomation you want by title(0), author(1) or ISBN code(2):")
for true {
fmt.Scanln(&searchcode)
if searchcode == "0" || searchcode == "1" || searchcode == "2"{
break
}
fmt.Println("Mode is inexistent.")
}
fmt.Println("Please enter the book information that matches your options:")
fmt.Scanln(&bookinfo)
lib.QueryBook(bookinfo, searchcode)
case "7":
var stuid string
if modetest == "1"{
fmt.Println("Please enter the student id :")
fmt.Scanln(&stuid)
}else{
stuid = initid
}
lib.QueryHis(stuid)
case "8":
var stuid string
if modetest == "1"{
fmt.Println("Please enter the student id :")
fmt.Scanln(&stuid)
}else{
stuid = initid
}
lib.QueryBorrowing(stuid)
case "9":
if modetest == "1" {
fmt.Println("You do not have permission to execute this command.")
} else {
var bookinfo string
fmt.Println("Please enter ISBN code of the book that you want to check:")
fmt.Scanln(&bookinfo)
lib.CheckDDL(initid, bookinfo)
}
case "10":
if modetest == "1" {
fmt.Println("You do not have permission to execute this command.")
} else {
var bookinfo string
fmt.Println("Please enter ISBN code of the book that you want to extend return deadline:")
fmt.Scanln(&bookinfo)
lib.ExtendDDL(initid, bookinfo)
}
case "11":
var stuid string
if modetest == "1"{
fmt.Println("Please enter the student id :")
fmt.Scanln(&stuid)
}else{
stuid = initid
}
lib.CheckOver(stuid)
case "12":
if modetest == "1" {
fmt.Println("You do not have permission to execute this command.")
} else {
var bookinfo string
fmt.Println("Please enter ISBN code of the book that you want to return:")
fmt.Scanln(&bookinfo)
lib.RetBook(bookinfo, initid)
}
case "13":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
var studentid string
fmt.Println("Please enter the student id:")
fmt.Scanln(&studentid)
borrowflag := lib.Checkborrow(studentid)
if borrowflag == false{
fmt.Println("This student has over 3 overdue books, and his/her account is suspended.")
}else{
fmt.Println("This student has less than 3 overdue books, and his/her account is normal.")
}
}
case "14":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
lib.QueryALLUser()
}
case "15":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
lib.QueryALLBook()
}
case "16":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
lib.QueryALLBorrowing()
}
case "17":
if modetest == "0" {
fmt.Println("You do not have permission to execute this command.")
} else {
lib.QueryALLBorrowHis()
}
default:
fmt.Println("Mode is inexistent.")
}
}
}
}
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vulkan
import (
"context"
"fmt"
"strconv"
"github.com/google/gapid/core/data/id"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/service/path"
)
// Interface compliance test
var (
_ = api.MemoryBreakdownProvider(API{})
)
type sparseBindingMap map[VkDeviceMemory][]*api.MemoryBinding
// Implements api.MemoryBreakdownProvider
func (a API) MemoryBreakdown(st *api.GlobalState) (*api.MemoryBreakdown, error) {
s := GetState(st)
// Iterate over images and buffers looking for sparsely-bound objects.
// They are not listed on the VkDeviceMemoryObject and so we need to
// collect them beforehand.
sparseBindings := sparseBindingMap{}
for _, info := range s.Buffers().All() {
if (info.Info().CreateFlags() & VkBufferCreateFlags(
VkBufferCreateFlagBits_VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) ==
VkBufferCreateFlags(0) {
continue
}
if err := sparseBindings.getBufferSparseBindings(info); err != nil {
return nil, err
}
}
for _, info := range s.Images().All() {
if (info.Info().Flags() & VkImageCreateFlags(
VkImageCreateFlagBits_VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) ==
VkImageCreateFlags(0) {
continue
}
if err := sparseBindings.getImageSparseBindings(info); err != nil {
return nil, err
}
}
allocations := make([]*api.MemoryAllocation, 0, len(s.DeviceMemories().All()))
// Serialize data on all allocations into protobufs
for handle, info := range s.DeviceMemories().All() {
device := info.Device()
typ := info.MemoryTypeIndex()
flags, err := s.getMemoryTypeFlags(device, typ)
if err != nil {
return nil, err
}
bindings, err := s.getAllocationBindings(info.Get(), st)
if err != nil {
return nil, err
}
if binds, ok := sparseBindings[handle]; ok {
bindings = append(bindings, binds...)
}
mapping := api.MemoryMapping{
Size: uint64(info.MappedSize()),
Offset: uint64(info.MappedOffset()),
MappedAddress: uint64(info.MappedLocation()),
}
alloc := api.MemoryAllocation{
Device: uint64(info.Device()),
MemoryType: uint32(typ),
Flags: uint32(flags),
Handle: uint64(handle),
Name: strconv.FormatUint(uint64(handle), 10),
Size: uint64(info.AllocationSize()),
Mapping: &mapping,
Bindings: bindings,
}
allocations = append(allocations, &alloc)
}
return &api.MemoryBreakdown{
API: path.NewAPI(id.ID(ID)),
Allocations: allocations,
AllocationFlagsIndex: int32(VkMemoryPropertyFlagBitsConstants()),
}, nil
}
func (s sparseBindingMap) getBufferSparseBindings(info BufferObjectʳ) error {
handle := uint64(info.VulkanHandle())
for _, bind := range info.SparseMemoryBindings().All() {
if bind.Memory() == VkDeviceMemory(0) {
continue
}
binding := api.MemoryBinding{
Handle: handle,
Name: strconv.FormatUint(handle, 10),
Size: uint64(bind.Size()),
Offset: uint64(bind.MemoryOffset()),
Type: &api.MemoryBinding_SparseBufferBlock{
&api.SparseBinding{
Offset: uint64(bind.ResourceOffset()),
},
},
}
v, ok := s[bind.Memory()]
if !ok {
v = []*api.MemoryBinding{}
}
s[bind.Memory()] = append(v, &binding)
}
return nil
}
func getAspects(aspects uint32) []api.AspectType {
aspectTypes := []api.AspectType{}
if aspects&uint32(VkImageAspectFlagBits_VK_IMAGE_ASPECT_COLOR_BIT) != 0 {
aspectTypes = append(aspectTypes, api.AspectType_COLOR)
}
if aspects&uint32(VkImageAspectFlagBits_VK_IMAGE_ASPECT_DEPTH_BIT) != 0 {
aspectTypes = append(aspectTypes, api.AspectType_DEPTH)
}
if aspects&uint32(VkImageAspectFlagBits_VK_IMAGE_ASPECT_STENCIL_BIT) != 0 {
aspectTypes = append(aspectTypes, api.AspectType_STENCIL)
}
return aspectTypes
}
func (s sparseBindingMap) getImageSparseBindings(img ImageObjectʳ) error {
opaque := (img.Info().Flags() & VkImageCreateFlags(
VkImageCreateFlagBits_VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) ==
VkImageCreateFlags(0)
handle := uint64(img.VulkanHandle())
name := strconv.FormatUint(handle, 10)
for _, bind := range img.OpaqueSparseMemoryBindings().All() {
if bind.Memory() == VkDeviceMemory(0) {
continue
}
binding := api.MemoryBinding{
Handle: handle,
Name: name,
Size: uint64(bind.Size()),
Offset: uint64(bind.MemoryOffset()),
}
if opaque {
binding.Type = &api.MemoryBinding_SparseOpaqueImageBlock{
&api.SparseBinding{
Offset: uint64(bind.ResourceOffset()),
},
}
} else {
// Need to determine which type it is
// Determine if it's in a mip tail
checkMipTail := func(reqs VkSparseImageMemoryRequirements) (uint64, uint32, bool) {
offset := bind.ResourceOffset()
if offset < reqs.ImageMipTailOffset() {
return 0, 0, false
}
offset -= reqs.ImageMipTailOffset()
arrayLayer := uint32(0)
if reqs.FormatProperties().Flags()&(VkSparseImageFormatFlags(
VkSparseImageFormatFlagBits_VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)) ==
VkSparseImageFormatFlags(0) {
arrayLayer = uint32(offset / reqs.ImageMipTailStride())
offset %= reqs.ImageMipTailStride()
}
if offset >= reqs.ImageMipTailSize() || arrayLayer >= img.Info().ArrayLayers() {
return 0, 0, false
}
return uint64(offset), arrayLayer, true
}
if (bind.Flags() & VkSparseMemoryBindFlags(
VkSparseMemoryBindFlagBits_VK_SPARSE_MEMORY_BIND_METADATA_BIT)) !=
VkSparseMemoryBindFlags(0) {
reqs, ok := img.SparseMemoryRequirements().Lookup(
VkImageAspectFlagBits_VK_IMAGE_ASPECT_METADATA_BIT)
if !ok {
return fmt.Errorf("Metadata binding present but no metadata sparse memory requirements for image %v", handle)
}
offset, arrayLayer, ok := checkMipTail(reqs)
if !ok {
return fmt.Errorf("Binding has metadata flag setbut isn't in metadata mip tail")
}
binding.Type = &api.MemoryBinding_SparseImageMetadata{
&api.SparseImageMetadataMipTail{
ArrayLayer: arrayLayer,
Offset: offset,
},
}
} else {
inMip := false
for aspects, reqs := range img.SparseMemoryRequirements().All() {
offset, arrayLayer, ok := checkMipTail(reqs)
if !ok {
continue
}
inMip = true
binding.Type = &api.MemoryBinding_SparseImageMipTail{
&api.SparseImageMetadataMipTail{
ArrayLayer: arrayLayer,
Offset: offset,
Aspects: getAspects(uint32(aspects)),
},
}
break
}
if !inMip {
binding.Type = &api.MemoryBinding_SparseOpaqueImageBlock{
&api.SparseBinding{
Offset: uint64(bind.ResourceOffset()),
},
}
}
}
}
v, ok := s[bind.Memory()]
if !ok {
v = []*api.MemoryBinding{}
}
s[bind.Memory()] = append(v, &binding)
}
for aspects, layers := range img.SparseImageMemoryBindings().All() {
aspectTypes := getAspects(uint32(aspects))
for layer, levels := range layers.Layers().All() {
for level, blocks := range levels.Levels().All() {
for _, block := range blocks.Blocks().All() {
if block.Memory() == VkDeviceMemory(0) {
continue
}
binding := api.MemoryBinding{
Handle: handle,
Name: name,
Size: uint64(block.Size()),
Offset: uint64(block.MemoryOffset()),
Type: &api.MemoryBinding_SparseImageBlock{
&api.SparseImageBlock{
XOffset: block.Offset().X(),
YOffset: block.Offset().Y(),
Width: block.Extent().Width(),
Height: block.Extent().Height(),
MipLevel: level,
ArrayLayer: layer,
Aspects: aspectTypes,
},
},
}
v, ok := s[block.Memory()]
if !ok {
v = []*api.MemoryBinding{}
}
s[block.Memory()] = append(v, &binding)
}
}
}
}
return nil
}
func (s *State) getMemoryTypeFlags(device VkDevice, typeIndex uint32) (VkMemoryPropertyFlags, error) {
deviceObject := s.Devices().Get(device)
if deviceObject.IsNil() {
return VkMemoryPropertyFlags(0), fmt.Errorf("Failed to find device %v", device)
}
physicalDevice := deviceObject.PhysicalDevice()
physicalDeviceObject := s.PhysicalDevices().Get(physicalDevice)
if physicalDeviceObject.IsNil() {
return VkMemoryPropertyFlags(0), fmt.Errorf("Failed to find physical device %v", physicalDevice)
}
props := physicalDeviceObject.MemoryProperties()
if props.MemoryTypeCount() <= typeIndex {
return VkMemoryPropertyFlags(0), fmt.Errorf("Memory type %v is larger than physical device %v's number of memory types (%v)",
typeIndex, physicalDevice, props.MemoryTypeCount())
}
return props.MemoryTypes().Get(int(typeIndex)).PropertyFlags(), nil
}
func (s *State) getAllocationBindings(allocation DeviceMemoryObject, st *api.GlobalState) ([]*api.MemoryBinding, error) {
bindings := []*api.MemoryBinding{}
for handle, offset := range allocation.BoundObjects().All() {
binding := api.MemoryBinding{
Handle: uint64(handle),
Name: strconv.FormatUint(handle, 10),
Offset: uint64(offset),
}
if buffer, ok := s.Buffers().Lookup(VkBuffer(handle)); ok {
binding.Size = uint64(buffer.Info().Size())
binding.Type = &api.MemoryBinding_Buffer{&api.NormalBinding{}}
} else if image, ok := s.Images().Lookup(VkImage(handle)); ok {
ctx := context.Background()
memInfo, _ := subGetImagePlaneMemoryInfo(ctx, nil, api.CmdNoID, nil, st, s, 0, nil, nil, image, VkImageAspectFlagBits(0))
memRequirement := memInfo.MemoryRequirements()
binding.Size = uint64(memRequirement.Size())
binding.Type = &api.MemoryBinding_Image{&api.NormalBinding{}}
} else {
return nil, fmt.Errorf("Bound object %v is not a buffer or an image", handle)
}
bindings = append(bindings, &binding)
}
return bindings, nil
}
|
package chain
import (
"github.com/ethereum/go-ethereum/common"
"math/big"
)
type Output struct {
Owner common.Address
Denom *big.Int
DepositNonce *big.Int
}
func NewOutput(newOwner common.Address, amount, depositNonce *big.Int) *Output {
return &Output{
Owner: common.BytesToAddress(newOwner.Bytes()),
Denom: big.NewInt(amount.Int64()),
DepositNonce: big.NewInt(depositNonce.Int64()),
}
}
func ZeroOutput() *Output {
return &Output{
Owner: common.BytesToAddress(make([]byte, 20, 20)),
Denom: big.NewInt(0),
DepositNonce: big.NewInt(0),
}
}
func (out *Output) IsExit() bool {
if out == nil {
return false
}
exit := ExitOutput()
for i := 0; i != len(out.Owner); i++ {
if out.Owner[i] != exit.Owner[i] {
return false
}
}
return true
}
func (out *Output) IsDeposit() bool {
if out == nil {
return false
}
return out.DepositNonce != nil && out.DepositNonce.Cmp(Zero()) != 0
}
func (out *Output) IsZeroOutput() bool {
if out == nil {
return true
}
addrBytes := out.Owner.Bytes()
for _, v := range addrBytes {
if v != 0 {
return false
}
}
return (out.Denom == nil ||out.Denom.Cmp(Zero()) == 0) &&
(out.DepositNonce == nil || out.DepositNonce.Cmp(Zero()) == 0)
}
|
// +build js,wasm
package main
import "syscall/js"
var document js.Value
func init() {
document = js.Global().Get("document")
}
func main() {
div := document.Call("getElementById", "target")
node := document.Call("createElement", "div")
node.Set("innerText", "Hello World")
div.Call("appendChild", node)
}
|
package main
func main() {
state := initState("Engineering")
state.generateConstraints()
state.generateCandidates()
state.solve()
state.generateXLSX()
}
|
package main
import(
"fmt"
)
func main() {
count := 0
for i := 0000; i <= 9999; i++ {
for n := i; n <= 9999; n++ { // don't count twice
q := i * n
num := fmt.Sprintf("%d", q)
if num[0] == num[len(num)-1] {
count++
}
}
}
fmt.Println(count)
}
|
package packet
import (
"fmt"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/pkg/errors"
"golang.org/x/net/icmp"
)
func FindProtocol(p []byte) (layers.IPProtocol, error) {
version, err := FindIPVersion(p)
if err != nil {
return 0, err
}
switch version {
case 4:
if len(p) < ipv4MinHeaderLen {
return 0, fmt.Errorf("IPv4 packet should have at least %d bytes, got %d bytes", ipv4MinHeaderLen, len(p))
}
// Protocol is in the 10th byte of IPv4 header
return layers.IPProtocol(p[9]), nil
case 6:
if len(p) < ipv6HeaderLen {
return 0, fmt.Errorf("IPv6 packet should have at least %d bytes, got %d bytes", ipv6HeaderLen, len(p))
}
// Next header is in the 7th byte of IPv6 header
return layers.IPProtocol(p[6]), nil
default:
return 0, fmt.Errorf("unknow ip version %d", version)
}
}
func FindIPVersion(p []byte) (uint8, error) {
if len(p) == 0 {
return 0, fmt.Errorf("packet length is 0")
}
return p[0] >> 4, nil
}
// IPDecoder decodes raw packets into IP. It can process packets sequentially without allocating
// memory for the layers, so it cannot be called concurrently.
type IPDecoder struct {
ipv4 *layers.IPv4
ipv6 *layers.IPv6
layers uint8
v4parser *gopacket.DecodingLayerParser
v6parser *gopacket.DecodingLayerParser
}
func NewIPDecoder() *IPDecoder {
var (
ipv4 layers.IPv4
ipv6 layers.IPv6
)
dlpv4 := gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4)
dlpv4.SetDecodingLayerContainer(gopacket.DecodingLayerSparse(nil))
dlpv4.AddDecodingLayer(&ipv4)
// Stop parsing when it encounter a layer that it doesn't have a parser
dlpv4.IgnoreUnsupported = true
dlpv6 := gopacket.NewDecodingLayerParser(layers.LayerTypeIPv6)
dlpv6.SetDecodingLayerContainer(gopacket.DecodingLayerSparse(nil))
dlpv6.AddDecodingLayer(&ipv6)
dlpv6.IgnoreUnsupported = true
return &IPDecoder{
ipv4: &ipv4,
ipv6: &ipv6,
layers: 1,
v4parser: dlpv4,
v6parser: dlpv6,
}
}
func (pd *IPDecoder) Decode(packet RawPacket) (*IP, error) {
// Should decode to IP layer
decoded, err := pd.decodeByVersion(packet.Data)
if err != nil {
return nil, err
}
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeIPv4:
return newIPv4(pd.ipv4)
case layers.LayerTypeIPv6:
return newIPv6(pd.ipv6)
}
}
return nil, fmt.Errorf("no ip layer is decoded")
}
func (pd *IPDecoder) decodeByVersion(packet []byte) ([]gopacket.LayerType, error) {
version, err := FindIPVersion(packet)
if err != nil {
return nil, err
}
decoded := make([]gopacket.LayerType, 0, pd.layers)
switch version {
case 4:
err = pd.v4parser.DecodeLayers(packet, &decoded)
case 6:
err = pd.v6parser.DecodeLayers(packet, &decoded)
default:
err = fmt.Errorf("unknow ip version %d", version)
}
if err != nil {
return nil, err
}
return decoded, nil
}
// ICMPDecoder decodes raw packets into IP and ICMP. It can process packets sequentially without allocating
// memory for the layers, so it cannot be called concurrently.
type ICMPDecoder struct {
*IPDecoder
icmpv4 *layers.ICMPv4
icmpv6 *layers.ICMPv6
}
func NewICMPDecoder() *ICMPDecoder {
ipDecoder := NewIPDecoder()
var (
icmpv4 layers.ICMPv4
icmpv6 layers.ICMPv6
)
ipDecoder.layers++
ipDecoder.v4parser.AddDecodingLayer(&icmpv4)
ipDecoder.v6parser.AddDecodingLayer(&icmpv6)
return &ICMPDecoder{
IPDecoder: ipDecoder,
icmpv4: &icmpv4,
icmpv6: &icmpv6,
}
}
func (pd *ICMPDecoder) Decode(packet RawPacket) (*ICMP, error) {
// Should decode to IP and optionally ICMP layer
decoded, err := pd.decodeByVersion(packet.Data)
if err != nil {
return nil, err
}
for _, layerType := range decoded {
switch layerType {
case layers.LayerTypeICMPv4:
ipv4, err := newIPv4(pd.ipv4)
if err != nil {
return nil, err
}
msg, err := icmp.ParseMessage(int(layers.IPProtocolICMPv4), append(pd.icmpv4.Contents, pd.icmpv4.Payload...))
if err != nil {
return nil, errors.Wrap(err, "failed to parse ICMPv4 message")
}
return &ICMP{
IP: ipv4,
Message: msg,
}, nil
case layers.LayerTypeICMPv6:
ipv6, err := newIPv6(pd.ipv6)
if err != nil {
return nil, err
}
msg, err := icmp.ParseMessage(int(layers.IPProtocolICMPv6), append(pd.icmpv6.Contents, pd.icmpv6.Payload...))
if err != nil {
return nil, errors.Wrap(err, "failed to parse ICMPv6")
}
return &ICMP{
IP: ipv6,
Message: msg,
}, nil
}
}
layers := make([]string, len(decoded))
for i, l := range decoded {
layers[i] = l.String()
}
return nil, fmt.Errorf("Expect to decode IP and ICMP layers, got %s", layers)
}
|
package scraper
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
_ "regexp"
"strconv"
"strings"
"sync"
r "github.com/mohakkataria/kfit-scraper/retriever"
"github.com/PuerkitoBio/goquery"
)
// Partner stores details of a single partner
type Partner struct {
Name string `csv:"name"`
City string `csv:"city"`
Address string `csv:"address"`
Latitude float64 `csv:"latitude"`
Longitude float64 `csv:"longitude"`
Rating float64 `csv:"rating"`
Phone string `csv:"phone"`
}
// Result stores details of the scraped partners
type Result struct {
Partners []Partner `json:"results"`
}
type extendedDocument struct {
Size string
Document *goquery.Document
}
var ch chan Partner
var wg sync.WaitGroup
const host = "https://access.kfit.com"
// Scrape function parses provided URL for product links
func Scrape(urls []string) Result {
ch = make(chan Partner, len(urls))
result := Result{}
for _, url := range urls {
wg.Add(1)
go getPartner(host+url)
}
wg.Wait()
close(ch)
for item := range ch {
result.Partners = append(result.Partners, item)
}
return result
}
func extendDocument(url string) (extendedDocument, error) {
res, err := http.Get(url)
if err != nil {
return extendedDocument{}, err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return extendedDocument{}, err
}
size := strconv.Itoa(len(body)/1000) + "kb"
// Rewind response body so it can be re-read by goquery
res.Body = ioutil.NopCloser(bytes.NewReader(body))
doc, err := goquery.NewDocumentFromResponse(res)
if err != nil {
return extendedDocument{}, err
}
return extendedDocument{size, doc}, nil
}
var getPartner = func(url string) {
defer wg.Done()
d, err := extendDocument(url)
if err != nil {
fmt.Println(
fmt.Errorf("Unable to create a new document: %s", err.Error()),
)
}
partner := Partner{}
rating := d.Document.Find("span .rating").Text()
partner.Rating,_ = strconv.ParseFloat(rating, 64)
text := ""
d.Document.Find("script").Each(func(i int, s *goquery.Selection) {
if (strings.Contains(s.Text(), "var outlet_details = ")) {
text = s.Text()
}
})
if (len(text) > 0) {
//parse and remove remaining data
firstOpeningBracketIndex := strings.IndexRune(text,'{')
firstClosingBracketIndex := strings.IndexRune(text,'}')
partnerDataJsonString := text[firstOpeningBracketIndex:firstClosingBracketIndex+1]
nameIndex := strings.Index(partnerDataJsonString, "name:")
addressIndex := strings.Index(partnerDataJsonString, "address:")
positionIndex := strings.Index(partnerDataJsonString, "position:")
cityIndex := strings.Index(partnerDataJsonString, "city:")
iconIndex := strings.Index(text, "icon:")
partner.Name = strings.Trim(strings.Trim(strings.TrimSpace(partnerDataJsonString[nameIndex+5:addressIndex-1]),","),"'")
partner.Address = strings.Trim(strings.Trim(strings.TrimSpace(partnerDataJsonString[addressIndex+8:cityIndex-2]),","),"'")
partner.City = strings.Trim(strings.Trim(strings.TrimSpace(strings.ToTitle(strings.Replace(partnerDataJsonString[cityIndex+5:positionIndex-1], "-"," ", -1))),","),"'")
positionString := partnerDataJsonString[positionIndex+9:iconIndex]
positionString = positionString[strings.IndexRune(positionString,'(')+1:strings.IndexRune(positionString,')')]
positionCoordinates := strings.Split(positionString,",")
latitude,_ := strconv.ParseFloat(strings.Trim(strings.TrimSpace(positionCoordinates[0]),"'"), 64)
longitude,_ := strconv.ParseFloat(strings.Trim(strings.TrimSpace(positionCoordinates[1]),"'"), 64)
partner.Latitude = latitude
partner.Longitude = longitude
}
if d.Document.Find(".classtable .emptytable").Length() == 0 {
if v, exists := d.Document.Find(".classtable .reserve-col .btn").First().Attr("href"); exists {
partner.Phone,_ = r.RetrievePartnerPhone(host+v, goquery.NewDocument)
}
}
ch <- partner
}
func (Partner) GetHeaders() []string{
return []string{"name", "address", "city", "latitude", "longitude", "rating", "phone"}
}
func (p *Partner) GetSerializedData() []string{
return []string{p.Name, p.Address, p.City, strconv.FormatFloat(p.Latitude, 'f', 6, 64), strconv.FormatFloat(p.Longitude, 'f', 6, 64), strconv.FormatFloat(p.Rating, 'f', 1, 64), p.Phone}
}
|
package k8s
import (
"fmt"
"go.starlark.net/starlark"
"github.com/pkg/errors"
"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/internal/tiltfile/value"
)
// Deserializing locators from starlark values.
type JSONPathImageLocatorListSpec struct {
Specs []JSONPathImageLocatorSpec
}
func (s JSONPathImageLocatorListSpec) IsEmpty() bool {
return len(s.Specs) == 0
}
func (s *JSONPathImageLocatorListSpec) Unpack(v starlark.Value) error {
list := value.ValueOrSequenceToSlice(v)
for _, item := range list {
spec := JSONPathImageLocatorSpec{}
err := spec.Unpack(item)
if err != nil {
return err
}
s.Specs = append(s.Specs, spec)
}
return nil
}
func (s JSONPathImageLocatorListSpec) ToImageLocators(selector k8s.ObjectSelector) ([]k8s.ImageLocator, error) {
result := []k8s.ImageLocator{}
for _, spec := range s.Specs {
locator, err := spec.ToImageLocator(selector)
if err != nil {
return nil, err
}
result = append(result, locator)
}
return result, nil
}
type JSONPathImageLocatorSpec struct {
jsonPath string
}
func (s *JSONPathImageLocatorSpec) Unpack(v starlark.Value) error {
var ok bool
s.jsonPath, ok = starlark.AsString(v)
if !ok {
return fmt.Errorf("Expected string, got: %s", v)
}
return nil
}
func (s JSONPathImageLocatorSpec) ToImageLocator(selector k8s.ObjectSelector) (k8s.ImageLocator, error) {
return k8s.NewJSONPathImageLocator(selector, s.jsonPath)
}
type JSONPathImageObjectLocatorSpec struct {
jsonPath string
repoField string
tagField string
}
func (s JSONPathImageObjectLocatorSpec) IsEmpty() bool {
return s == JSONPathImageObjectLocatorSpec{}
}
func (s *JSONPathImageObjectLocatorSpec) Unpack(v starlark.Value) error {
d, ok := v.(*starlark.Dict)
if !ok {
return fmt.Errorf("Expected dict of the form {'json_path': str, 'repo_field': str, 'tag_field': str}. Actual: %s", v)
}
values, err := validateStringDict(d, []string{"json_path", "repo_field", "tag_field"})
if err != nil {
return errors.Wrap(err, "Expected dict of the form {'json_path': str, 'repo_field': str, 'tag_field': str}")
}
s.jsonPath, s.repoField, s.tagField = values[0], values[1], values[2]
return nil
}
func (s JSONPathImageObjectLocatorSpec) ToImageLocator(selector k8s.ObjectSelector) (k8s.ImageLocator, error) {
return k8s.NewJSONPathImageObjectLocator(selector, s.jsonPath, s.repoField, s.tagField)
}
func validateStringDict(d *starlark.Dict, expectedFields []string) ([]string, error) {
indices := map[string]int{}
result := make([]string, len(expectedFields))
for i, f := range expectedFields {
indices[f] = i
}
for _, item := range d.Items() {
key, val := item[0], item[1]
keyString, ok := starlark.AsString(key)
if !ok {
return nil, fmt.Errorf("Unexpected key: %s", key)
}
index, ok := indices[keyString]
if !ok {
return nil, fmt.Errorf("Unexpected key: %s", key)
}
valString, ok := starlark.AsString(val)
if !ok {
return nil, fmt.Errorf("Expected string at key %q. Got: %s", key, val.Type())
}
result[index] = valString
}
if len(d.Items()) != len(expectedFields) {
return nil, fmt.Errorf("Missing keys. Actual keys: %s", d.Keys())
}
return result, nil
}
|
package logconsts
const (
INFO = 1
DEBUG = 2
)
|
// -*- Mode: Go; indent-tabs-mode: t -*-
package main
import (
"fmt"
"log"
"net"
"os"
)
func echoServer(c net.Conn) {
for {
buf := make([]byte, 512)
nr, err := c.Read(buf)
if err != nil {
log.Fatal("Read: ", err)
}
data := buf[0:nr]
fmt.Println("Server got: ", string(data))
_, err = c.Write(data)
if err != nil {
log.Fatal("Write: ", err)
}
}
}
func main() {
if len(os.Args) <= 1 {
log.Fatal("Usage unix-domain-socket <socketdir>")
}
dir := os.Args[1]
path := dir + "/sockets/socket"
fmt.Println("socket path is ", path)
l, err := net.Listen("unix", path)
if err != nil {
log.Fatal(err)
}
defer l.Close()
for {
// wait for connection
conn, err := l.Accept()
if err != nil {
log.Fatal("Accept error: ", err)
}
// handle the connection in a new goroutine.
// the loop then returns to accepting, so that
// multiple connections may be server conncurrently.
go echoServer(conn)
}
fmt.Println("all done!")
}
|
package "atomic_stream";
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
import stream from "../deps/stream/stream.module.c";
static int _type;
export int type() {
if (_type == 0) {
_type = stream.register("atomic");
}
return _type;
}
typedef struct {
int fd;
char * temp;
char * dest;
} context_t;
static ssize_t atomic_write(void * _ctx, const void * buf, size_t nbyte, stream.error_t * error) {
context_t * ctx = (context_t*) _ctx;
int e = global.write(ctx->fd, buf, nbyte);
if (e < 0 && error != NULL) {
error->code = errno;
error->message = strerror(error->code);
}
return e;
}
static ssize_t atomic_close(void * _ctx, stream.error_t * error) {
context_t * ctx = (context_t*) _ctx;
int e = global.close(ctx->fd);
if (e < 0 && error != NULL) {
error->code = errno;
error->message = strerror(error->code);
global.unlink(ctx->temp);
global.free(ctx->temp);
global.free(ctx->dest);
global.free(ctx);
return e;
}
e = global.rename(ctx->temp, ctx->dest);
if (e < 0 && error != NULL) {
error->code = errno;
error->message = strerror(error->code);
}
global.free(ctx->temp);
global.free(ctx->dest);
global.free(ctx);
return e;
}
static char * get_temp(char * base) {
int salt = rand();
char * temp = NULL;
char * ext = strrchr(base, '.');
asprintf(&temp, "%.*s-%x%s", (int)(ext - base), base, salt, ext);
return temp;
}
export stream.t * open(const char * _dest) {
int fd = 0;
char * dest = strdup(_dest);
char * temp = NULL;
do {
temp = get_temp(dest);
fd = global.open(temp, O_WRONLY | O_CREAT | O_EXCL, 0666);
if (fd == -1 && errno == EEXIST) global.free(temp);
} while(fd == -1 && errno == EEXIST);
if ( fd < 0 ) {
global.free(dest);
global.free(temp);
return stream.error(NULL, errno, strerror(errno));
}
context_t * ctx = malloc(sizeof(context_t));
ctx->fd = fd;
ctx->temp = temp;
ctx->dest = dest;
stream.t * s = malloc(sizeof(stream.t));
s->ctx = ctx;
s->read = NULL;
s->write = atomic_write;
s->pipe = NULL;
s->close = atomic_close;
s->type = type();
s->error.code = 0;
s->error.message = NULL;
return s;
}
export ssize_t abort(stream.t * s) {
if (s->type != type()) return stream.close(s);
context_t * ctx = (context_t*) s->ctx;
global.close(ctx->fd);
int e = global.unlink(ctx->temp);
if (e < 0) {
s->error.code = errno;
s->error.message = strerror(s->error.code);
}
global.free(ctx->temp);
global.free(ctx->dest);
global.free(ctx);
return e;
}
|
package management
import (
"context"
"github.com/caos/zitadel/pkg/grpc/management"
"github.com/golang/protobuf/ptypes/empty"
)
func (s *Server) GetZitadelDocs(ctx context.Context, _ *empty.Empty) (*management.ZitadelDocs, error) {
return &management.ZitadelDocs{
Issuer: s.systemDefaults.ZitadelDocs.Issuer,
DiscoveryEndpoint: s.systemDefaults.ZitadelDocs.DiscoveryEndpoint,
}, nil
}
|
package config_test
import (
"encoding/json"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/gojek/fiber/config"
)
type durCfgTestSuite struct {
input string
duration time.Duration
success bool
}
func TestDurationUnmarshalJSON(t *testing.T) {
tests := map[string]durCfgTestSuite{
"valid_seconds": {
input: "2s",
duration: time.Second * 2,
success: true,
},
"valid_minute": {
input: "1m",
duration: time.Minute,
success: true,
},
"valid_quoted_time": {
input: "\"2s\"",
duration: time.Second * 2,
success: true,
},
"invalid_input": {
input: "xyz",
duration: 0,
success: false,
},
}
// Run the tests
for name, data := range tests {
t.Run(name, func(t *testing.T) {
var d config.Duration
// Unmarshal
err := d.UnmarshalJSON([]byte(data.input))
// Verify
assert.Equal(t, data.duration, time.Duration(d))
assert.Equal(t, data.success, err == nil)
})
}
}
func TestDurationMarshalJSON(t *testing.T) {
duration := config.Duration(time.Second * 2)
data, err := json.Marshal(duration)
assert.Equal(t, `"2s"`, string(data))
assert.NoError(t, err)
}
|
package main
import(
"image"
)
func (c *Challenge) Challenge016() {
path:=".\\Data\\016"
EnsureDir(path)
gif:="http://www.pythonchallenge.com/pc/return/mozart.gif"
filename:=path+"\\mozart.gif"
DownloadWithBasicAuth(gif, filename, "huge", "file")
im:=OpenImage(filename)
newImg:=straightImage(im)
giffile := path+"\\mozart.go.gif"
SaveImage(giffile, newImg, "gif")
ShowImage(giffile)
}
func straightImage(im image.Image) *image.RGBA {
bounds:=im.Bounds()
newImg:=image.NewRGBA(bounds)
for y := bounds.Min.Y; y < bounds.Max.Y; y++ {
for x := bounds.Min.X; x < bounds.Max.X; x++ {
r,g,b,_:=im.At(x,y).RGBA()
if r==65535 && g==0 && b==65535 {
for xx := bounds.Min.X; xx < bounds.Max.X; xx++ {
newImg.Set(xx,y,im.At((xx+x)%bounds.Dx(),y))
}
continue
}
}
}
return newImg
}
|
package notifier
import (
"sync"
"time"
"github.com/void616/gm.mint.sender/internal/sender/db/types"
"github.com/void616/gotask"
)
// Task loop
func (n *Notifier) Task(token *gotask.Token) {
var wg sync.WaitGroup
var stopper = make(chan struct{})
var sleep = func(d time.Duration) {
select {
case <-stopper:
case <-time.After(d):
}
}
// approvements
wg.Add(1)
go func() {
defer wg.Done()
logger := n.logger.WithField("approvements", "")
for {
select {
case <-stopper:
return
default:
}
// get list
list, err := n.dao.ListUnnotifiedApprovements(itemsPerShot)
if err != nil {
logger.WithError(err).Error("Failed to get unnotified requests")
sleep(time.Second * 30)
continue
}
// nothing
if len(list) == 0 {
sleep(time.Second * 30)
continue
}
out := false
for _, snd := range list {
if out {
break
}
// mark as notified
{
now := time.Now().UTC()
if snd.FirstNotifyAt == nil {
snd.FirstNotifyAt = &now
}
snd.NotifyAt = &now
snd.Notified = true
}
if err := n.dao.UpdateApprovement(snd); err != nil {
logger.
WithError(err).
WithField("id", snd.ID).
Error("Failed to update request")
sleep(time.Second * 30)
out = true
continue
}
notiErrorDesc := "Transaction failed"
if snd.Status == types.SendingConfirmed {
notiErrorDesc = ""
}
// notify
var notiErr error
switch snd.Transport {
case types.SendingNats:
if n.natsTransporter != nil {
notiErr = n.natsTransporter.PublishApprovedEvent(
snd.Status == types.SendingConfirmed,
notiErrorDesc,
snd.Service, snd.RequestID,
snd.To, snd.Digest,
)
} else {
logger.Warn("Nats transport is disabled, skipping notification")
continue
}
case types.SendingHTTP:
if n.httpTransporter != nil {
if snd.CallbackURL != "" {
notiErr = n.httpTransporter.PublishApprovedEvent(
snd.Status == types.SendingConfirmed,
notiErrorDesc,
snd.Service, snd.RequestID, snd.CallbackURL,
snd.To, snd.Digest,
)
}
} else {
logger.Warn("HTTP transport is disabled, skipping notification")
continue
}
default:
logger.Errorf("Transport %v is not implemented", snd.Transport)
continue
}
if notiErr != nil {
logger.WithField("id", snd.ID).WithError(notiErr).Error("Failed to notify")
// notify next time
when := time.Now().UTC()
if snd.FirstNotifyAt != nil {
mikes := time.Now().UTC().Sub(*snd.FirstNotifyAt).Minutes()
switch {
// for 5m: every 1m
case mikes < 5:
when = when.Add(time.Minute)
// then for 30m: every 5m
case mikes < 35:
when = when.Add(time.Minute * 5)
// then for 60m: every 10m
case mikes < 95:
when = when.Add(time.Minute * 10)
// then every 120m
default:
when = when.Add(time.Minute * 120)
}
} else {
when = when.Add(time.Hour * 24 * 365)
}
// mark as unnotified
snd.NotifyAt = &when
snd.Notified = false
if err := n.dao.UpdateApprovement(snd); err != nil {
logger.
WithError(err).
WithField("id", snd.ID).
Error("Failed to update request")
sleep(time.Second * 30)
out = true
continue
}
} else {
logger.WithField("id", snd.ID).Debug("Notified")
}
}
}
}()
// sendings
wg.Add(1)
go func() {
defer wg.Done()
logger := n.logger.WithField("sendings", "")
for {
select {
case <-stopper:
return
default:
}
// get list
list, err := n.dao.ListUnnotifiedSendings(itemsPerShot)
if err != nil {
logger.WithError(err).Error("Failed to get unnotified requests")
sleep(time.Second * 30)
continue
}
// nothing
if len(list) == 0 {
sleep(time.Second * 30)
continue
}
out := false
for _, snd := range list {
if out {
break
}
// mark as notified
{
now := time.Now().UTC()
if snd.FirstNotifyAt == nil {
snd.FirstNotifyAt = &now
}
snd.NotifyAt = &now
snd.Notified = true
}
if err := n.dao.UpdateSending(snd); err != nil {
logger.
WithError(err).
WithField("id", snd.ID).
Error("Failed to update request")
sleep(time.Second * 30)
out = true
continue
}
notiErrorDesc := "Transaction failed"
if snd.Status == types.SendingConfirmed {
notiErrorDesc = ""
}
// notify
var notiErr error
switch snd.Transport {
case types.SendingNats:
if n.natsTransporter != nil {
notiErr = n.natsTransporter.PublishSentEvent(
snd.Status == types.SendingConfirmed,
notiErrorDesc,
snd.Service, snd.RequestID,
snd.To, snd.Token, snd.Amount, snd.Digest,
)
} else {
logger.Warn("Nats transport is disabled, skipping notification")
continue
}
case types.SendingHTTP:
if n.httpTransporter != nil {
if snd.CallbackURL != "" {
notiErr = n.httpTransporter.PublishSentEvent(
snd.Status == types.SendingConfirmed,
notiErrorDesc,
snd.Service, snd.RequestID, snd.CallbackURL,
snd.To, snd.Token, snd.Amount, snd.Digest,
)
}
} else {
logger.Warn("HTTP transport is disabled, skipping notification")
continue
}
default:
logger.Errorf("Transport %v is not implemented", snd.Transport)
continue
}
if notiErr != nil {
logger.WithField("id", snd.ID).WithError(notiErr).Error("Failed to notify")
// notify next time
when := time.Now().UTC()
if snd.FirstNotifyAt != nil {
mikes := time.Now().UTC().Sub(*snd.FirstNotifyAt).Minutes()
switch {
// for 5m: every 1m
case mikes < 5:
when = when.Add(time.Minute)
// then for 30m: every 5m
case mikes < 35:
when = when.Add(time.Minute * 5)
// then for 60m: every 10m
case mikes < 95:
when = when.Add(time.Minute * 10)
// then every 120m
default:
when = when.Add(time.Minute * 120)
}
} else {
when = when.Add(time.Hour * 24 * 365)
}
// mark as unnotified
snd.NotifyAt = &when
snd.Notified = false
if err := n.dao.UpdateSending(snd); err != nil {
logger.
WithError(err).
WithField("id", snd.ID).
Error("Failed to update request")
sleep(time.Second * 30)
out = true
continue
}
} else {
logger.WithField("id", snd.ID).Debug("Notified")
}
}
}
}()
// wait interruption
for !token.Stopped() {
time.Sleep(time.Second)
}
close(stopper)
wg.Wait()
}
|
package config
import (
"log"
"time"
"github.com/BurntSushi/toml"
)
// ConfStruct is used to unmarshal the config.toml
type ConfStruct struct {
Prefix []string `toml:"Prefix"`
BotToken string `toml:"Bot_Token"`
MongoURL string `toml:"Mongo_URL"`
BotStatus string `toml:"Bot_Status"`
MaxCharacterRoll uint64 `toml:"Max_Character_Roll"`
MaxCharacterDrop uint `toml:"Max_Character_Drop"`
DropsOnInteract uint64 `toml:"Drops_On_Interact"`
ListLen int `toml:"List_Len"`
ListMaxUpdateTime duration `toml:"List_Max_Update_Time"`
TimeBetweenRolls duration `toml:"Time_Between_Rolls"`
}
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) (err error) {
d.Duration, err = time.ParseDuration(string(text))
return
}
// Retrieve retrieves the config from the file
func Retrieve(filename string) (config ConfStruct) {
if _, err := toml.DecodeFile(filename, &config); err != nil {
log.Fatalln("Couldn't read configuration : ", err)
}
return
}
|
package analytics
import (
"context"
"fmt"
"strconv"
"time"
"github.com/tilt-dev/clusterid"
"github.com/tilt-dev/tilt/internal/analytics"
"github.com/tilt-dev/tilt/internal/container"
"github.com/tilt-dev/tilt/internal/controllers/apis/liveupdate"
"github.com/tilt-dev/tilt/internal/feature"
"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
)
// How often to periodically report data for analytics while Tilt is running
const analyticsReportingInterval = time.Minute * 15
type AnalyticsReporter struct {
a *analytics.TiltAnalytics
store store.RStore
kClient k8s.Client
env clusterid.Product
featureDefaults feature.Defaults
started bool
}
func (ar *AnalyticsReporter) OnChange(ctx context.Context, st store.RStore, _ store.ChangeSummary) error {
if ar.started {
return nil
}
state := st.RLockState()
defer st.RUnlockState()
// wait until state has been kinda initialized
if !state.TiltStartTime.IsZero() && state.LastMainTiltfileError() == nil {
ar.started = true
go func() {
select {
case <-time.After(10 * time.Second):
ar.report(ctx) // report once pretty soon after startup...
case <-ctx.Done():
return
}
for {
select {
case <-time.After(analyticsReportingInterval):
// and once every <interval> thereafter
ar.report(ctx)
case <-ctx.Done():
return
}
}
}()
}
return nil
}
var _ store.Subscriber = &AnalyticsReporter{}
func ProvideAnalyticsReporter(
a *analytics.TiltAnalytics,
st store.RStore,
kClient k8s.Client,
env clusterid.Product,
fDefaults feature.Defaults) *AnalyticsReporter {
return &AnalyticsReporter{
a: a,
store: st,
kClient: kClient,
env: env,
featureDefaults: fDefaults,
started: false,
}
}
func (ar *AnalyticsReporter) report(ctx context.Context) {
st := ar.store.RLockState()
defer ar.store.RUnlockState()
var dcCount, k8sCount, liveUpdateCount, unbuiltCount,
sameImgMultiContainerLiveUpdate, multiImgLiveUpdate,
localCount, localServeCount, enabledCount int
labelKeySet := make(map[string]bool)
for _, mt := range st.ManifestTargets {
m := mt.Manifest
for key := range m.Labels {
labelKeySet[key] = true
}
if mt.State.DisableState == v1alpha1.DisableStateEnabled {
enabledCount++
}
if m.IsLocal() {
localCount++
lt := m.LocalTarget()
if !lt.ServeCmd.Empty() {
localServeCount++
}
}
var refInjectCounts map[string]int
if m.IsK8s() {
k8sCount++
refInjectCounts = m.K8sTarget().RefInjectCounts()
if len(m.ImageTargets) == 0 {
unbuiltCount++
}
}
if m.IsDC() {
dcCount++
}
var seenLU, multiImgLU, multiContainerLU bool
for _, it := range m.ImageTargets {
if !liveupdate.IsEmptySpec(it.LiveUpdateSpec) {
if !seenLU {
seenLU = true
liveUpdateCount++
} else if !multiImgLU {
multiImgLU = true
}
multiContainerLU = multiContainerLU ||
refInjectCounts[it.ImageMapSpec.Selector] > 0
}
}
if multiContainerLU {
sameImgMultiContainerLiveUpdate++
}
if multiImgLU {
multiImgLiveUpdate++
}
}
stats := map[string]string{
"up.starttime": st.TiltStartTime.Format(time.RFC3339),
"builds.completed_count": strconv.Itoa(st.CompletedBuildCount),
// env should really be a global tag, but there's a circular dependency
// between the global tags and env initialization, so we add it manually.
"env": k8s.AnalyticsEnv(ar.env),
"term_mode": strconv.Itoa(int(st.TerminalMode)),
}
if k8sCount > 1 {
registry := ar.kClient.LocalRegistry(ctx)
if !container.IsEmptyRegistry(registry) {
if registry.Host != "" {
stats["k8s.registry.host"] = "1"
}
if registry.HostFromContainerRuntime != registry.Host {
stats["k8s.registry.hostFromCluster"] = "1"
}
}
stats["k8s.runtime"] = string(ar.kClient.ContainerRuntime(ctx))
}
tiltfileIsInError := "false"
if st.LastMainTiltfileError() != nil {
tiltfileIsInError = "true"
} else {
// only report when there's no tiltfile error, to avoid polluting aggregations
stats["resource.count"] = strconv.Itoa(len(st.ManifestDefinitionOrder))
stats["resource.local.count"] = strconv.Itoa(localCount)
stats["resource.localserve.count"] = strconv.Itoa(localServeCount)
stats["resource.dockercompose.count"] = strconv.Itoa(dcCount)
stats["resource.k8s.count"] = strconv.Itoa(k8sCount)
stats["resource.liveupdate.count"] = strconv.Itoa(liveUpdateCount)
stats["resource.unbuiltresources.count"] = strconv.Itoa(unbuiltCount)
stats["resource.sameimagemultiplecontainerliveupdate.count"] = strconv.Itoa(sameImgMultiContainerLiveUpdate)
stats["resource.multipleimageliveupdate.count"] = strconv.Itoa(multiImgLiveUpdate)
stats["label.count"] = strconv.Itoa(len(labelKeySet))
stats["resource.enabled.count"] = strconv.Itoa(enabledCount)
}
stats["tiltfile.error"] = tiltfileIsInError
for k, v := range st.Features {
if ar.featureDefaults[k].Status == feature.Active && v {
stats[fmt.Sprintf("feature.%s.enabled", k)] = strconv.FormatBool(v)
}
}
ar.a.Incr("up.running", stats)
}
|
package anchors
import (
"net/rpc"
)
type NewResonanceTask struct {
Address string
Amount string
Signature string
}
type SigResonance struct {
Address string
Amount string
Signature string
}
type Client struct {
client *rpc.Client
path string
}
func NewClient(url string) (*Client, error) {
client, err := rpc.DialHTTP("tcp", url)
if err != nil {
return nil, err
}
return &Client{
client: client,
path: url}, nil
}
func (c *Client) ResonanceTrade(body *ResonanceMsg) error {
return c.client.Call("TradeServer.ResonanceTrade", body, nil)
}
|
package main
import (
"strconv"
"fmt"
)
func main() {
fmt.Println(genPalindrome(3))
fmt.Println(superpalindromesInRange("4", "1000"))
}
func superpalindromesInRange(L string, R string) int {
l64, _ := strconv.ParseInt(L, 10, 0)
r64, _ := strconv.ParseInt(R, 10, 0)
ret := 0
var i int64
for ii := 1; ii < 20; ii++ {
for _, i = range genPalindrome(ii) {
if i * i > r64 {
goto end
}
if i * i < l64 {
continue
}
if ispalin(strconv.FormatInt(i * i, 10)) {
fmt.Println(i * i)
ret += 1
}
}
}
end:
return ret
}
func ispalin(r string) bool {
if len(r) <= 1 {
return true
}
if r[0] == r[len(r) - 1] {
return ispalin(r[1:(len(r) - 1)])
}
return false
}
func genPalindrome(n int) []int64 {
// TODO.
if n == 0 {
return nil
}
if n == 1 {
return []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
}
if n == 2 {
return []int64{0, 11, 22, 33, 44, 55, 66, 77, 88, 99}
}
ret := make([]int64, 0)
sub := genPalindrome(n - 2)
//ret = append(ret,0)
var i int64
for i = 0; i <= 9; i++ {
for _, s := range sub {
r := i
for j := 1; j < n; j++ {
r *= 10
}
rt := r + s * 10 + i
ret = append(ret, rt)
}
}
return ret
}
|
package main
import (
"fmt"
)
func main() {
nums := []int{2, 3, 1, 1, 4}
fmt.Println(canJump(nums))
}
// 从前先后
// func canJump(nums []int) bool {
// start, end := 0, 0
// for start <= end && end < len(nums)-1 {
// end = int(math.Max(float64(end), float64(nums[start]+start)))
// start++
// }
// return end >= len(nums)-1
// }
// 从后向前
func canJump(nums []int) bool {
start, end := len(nums)-2, len(nums)-1
for start >= 0 {
if nums[start]+start >= end {
end = start
}
start--
}
return end <= 0
}
|
/*
@Time : 2018/10/24 15:02
@Author : zhaoxiaoqiang
@File : mong.go
@Software: GoLand
*/
package main
import (
"fmt"
mgo "gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
)
type Person struct {
Name string
Phone string
}
func main() {
session, err := mgo.Dial("localhost:27017")
if err != nil {
fmt.Println("mgo.Dial error:%v",err)
}
defer session.Close()
// Optional. Switch the session to a monotonic behavior.
session.SetMode(mgo.Monotonic, true)
c := session.DB("test").C("people")
err = c.Insert(&Person{"superWang", "13478808311"},
&Person{"David", "15040268074"})
if err != nil {
fmt.Println("c.insert error:%v",err)
}
result := Person{}
err = c.Find(bson.M{"name": "superWang"}).One(&result)
if err != nil {
fmt.Println("c.find error:%v",err)
}
fmt.Println("Name:", result.Name)
fmt.Println("Phone:", result.Phone)
fmt.Println("test1")
fmt.Println("test2")
fmt.Println("test3")
fmt.Println("test4")
fmt.Println("test5")
fmt.Println("test6")
fmt.Println("test7")
}
|
package main
import (
"fmt"
"strconv"
)
func main() {
//inteiro para float64
x := 2.4
y := 2
fmt.Println(x / float64(y))
//utilizamos esses conversores explicitos para que o compilador possa realizar a conta
//float64 para int
nota := 6.9
notaFinal := int(nota)
fmt.Println(notaFinal)
//int para string
fmt.Println("Teste " + strconv.Itoa(123))
//string para int
//Obs.: essa função retorna dois valores: o valor convertido e um erro, caso o valor passado como parâmetro não seja uma string
value, _ := strconv.Atoi("123")
fmt.Println(value - 122)
//string para booleano
//Obs.: ele aceita como verdadeiro <1,t,T,true,TRUE,True> e falso <0,f,F,false,FALSE,False>
//fora esses valores acima, ele retorna um erro
aux, _ := strconv.ParseBool("f")
fmt.Println(aux)
}
|
/*
trader API Engine
*/
package chbtc
import (
. "common"
. "config"
"encoding/json"
"errors"
"logger"
"util"
)
type ChTicker struct {
Ticker ChTickerPrice
}
type ChTickerPrice struct {
Buy string
High string
Last string
Low string
Sell string
Vol string
}
func (w *Chbtc) getTicker(symbol string) (ticker ChTicker, err error) {
ticker_url := Config["ch_ticker_url"]
body, err := util.HttpGet(ticker_url)
if err != nil {
logger.Errorln(err)
return
}
if err = json.Unmarshal([]byte(body), &ticker); err != nil {
logger.Infoln(err)
return
}
return
}
func (w *Chbtc) getDepth(symbol string) (orderBook OrderBook, err error) {
depth_url := Config["ch_depth_url"]
body, err := util.HttpGet(depth_url)
if err != nil {
logger.Errorln(err)
return
}
defaultstruct := make(map[string]interface{})
err = json.Unmarshal([]byte(body), &defaultstruct)
if err != nil {
logger.Infoln("defaultstruct", defaultstruct)
return
}
asks := defaultstruct["asks"].([]interface{})
bids := defaultstruct["bids"].([]interface{})
for i, ask := range asks {
_ask := ask.([]interface{})
price, ret := _ask[0].(float64)
if !ret {
err = errors.New("data wrong")
return orderBook, err
}
amount, ret := _ask[1].(float64)
if !ret {
err = errors.New("data wrong")
return orderBook, err
}
order := MarketOrder{
Price: price,
Amount: amount,
}
if i < DEPTH {
orderBook.Asks[i] = order
} else {
break
}
}
for i, bid := range bids {
_bid := bid.([]interface{})
price, ret := _bid[0].(float64)
if !ret {
err = errors.New("data wrong")
return orderBook, err
}
amount, ret := _bid[1].(float64)
if !ret {
err = errors.New("data wrong")
return orderBook, err
}
order := MarketOrder{
Price: price,
Amount: amount,
}
if i < DEPTH {
orderBook.Bids[i] = order
} else {
break
}
}
return
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build amd64
// +build amd64
package kvm
import (
"fmt"
"unsafe"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
)
// loadSegments copies the current segments.
//
// This may be called from within the signal context and throws on error.
//
//go:nosplit
func (c *vCPU) loadSegments(tid uint64) {
if _, _, errno := unix.RawSyscall(
unix.SYS_ARCH_PRCTL,
linux.ARCH_GET_FS,
uintptr(unsafe.Pointer(&c.CPU.Registers().Fs_base)),
0); errno != 0 {
throw("getting FS segment")
}
if _, _, errno := unix.RawSyscall(
unix.SYS_ARCH_PRCTL,
linux.ARCH_GET_GS,
uintptr(unsafe.Pointer(&c.CPU.Registers().Gs_base)),
0); errno != 0 {
throw("getting GS segment")
}
c.tid.Store(tid)
}
// setCPUID sets the CPUID to be used by the guest.
func (c *vCPU) setCPUID() error {
if _, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_CPUID2,
uintptr(unsafe.Pointer(&cpuidSupported))); errno != 0 {
return fmt.Errorf("error setting CPUID: %v", errno)
}
return nil
}
// getTSCFreq gets the TSC frequency.
//
// If mustSucceed is true, then this function panics on error.
func (c *vCPU) getTSCFreq() (uintptr, error) {
rawFreq, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_GET_TSC_KHZ,
0 /* ignored */)
if errno != 0 {
return 0, errno
}
return rawFreq, nil
}
// setTSCFreq sets the TSC frequency.
func (c *vCPU) setTSCFreq(freq uintptr) error {
if _, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_TSC_KHZ,
freq /* khz */); errno != 0 {
return fmt.Errorf("error setting TSC frequency: %v", errno)
}
return nil
}
// setTSCOffset sets the TSC offset to zero.
func (c *vCPU) setTSCOffset() error {
offset := uint64(0)
da := struct {
flags uint32
group uint32
attr uint64
addr unsafe.Pointer
}{
group: _KVM_VCPU_TSC_CTRL,
attr: _KVM_VCPU_TSC_OFFSET,
addr: unsafe.Pointer(&offset),
}
if _, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_DEVICE_ATTR,
uintptr(unsafe.Pointer(&da))); errno != 0 {
return fmt.Errorf("error setting tsc offset: %v", errno)
}
return nil
}
// setTSC sets the TSC value.
func (c *vCPU) setTSC(value uint64) error {
const _MSR_IA32_TSC = 0x00000010
registers := modelControlRegisters{
nmsrs: 1,
}
registers.entries[0].index = _MSR_IA32_TSC
registers.entries[0].data = value
if _, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_MSRS,
uintptr(unsafe.Pointer(®isters))); errno != 0 {
return fmt.Errorf("error setting tsc: %v", errno)
}
return nil
}
// setUserRegisters sets user registers in the vCPU.
//
//go:nosplit
func (c *vCPU) setUserRegisters(uregs *userRegs) unix.Errno {
if _, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_REGS,
uintptr(unsafe.Pointer(uregs))); errno != 0 {
return errno
}
return 0
}
// getUserRegisters reloads user registers in the vCPU.
//
// This is safe to call from a nosplit context.
//
//go:nosplit
func (c *vCPU) getUserRegisters(uregs *userRegs) unix.Errno {
if _, _, errno := unix.RawSyscall( // escapes: no.
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_GET_REGS,
uintptr(unsafe.Pointer(uregs))); errno != 0 {
return errno
}
return 0
}
// setSystemRegisters sets system registers.
func (c *vCPU) setSystemRegisters(sregs *systemRegs) error {
if _, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_SET_SREGS,
uintptr(unsafe.Pointer(sregs))); errno != 0 {
return fmt.Errorf("error setting system registers: %v", errno)
}
return nil
}
// getSystemRegisters sets system registers.
//
//go:nosplit
func (c *vCPU) getSystemRegisters(sregs *systemRegs) unix.Errno {
if _, _, errno := unix.RawSyscall(
unix.SYS_IOCTL,
uintptr(c.fd),
_KVM_GET_SREGS,
uintptr(unsafe.Pointer(sregs))); errno != 0 {
return errno
}
return 0
}
//go:nosplit
func seccompMmapSyscall(context unsafe.Pointer) (uintptr, uintptr, unix.Errno) {
ctx := bluepillArchContext(context)
// MAP_DENYWRITE is deprecated and ignored by kernel. We use it only for seccomp filters.
addr, _, e := unix.RawSyscall6(uintptr(ctx.Rax), uintptr(ctx.Rdi), uintptr(ctx.Rsi),
uintptr(ctx.Rdx), uintptr(ctx.R10)|unix.MAP_DENYWRITE, uintptr(ctx.R8), uintptr(ctx.R9))
ctx.Rax = uint64(addr)
return addr, uintptr(ctx.Rsi), e
}
|
package main
import "math"
func MinNumberOfCoinsForChange(n int, denoms []int) int {
numOfCoins := make([]int, n+1)
for i := range numOfCoins {
numOfCoins[i] = math.MaxInt32
}
numOfCoins[0] = 0
for _, denom := range denoms {
for amount := range numOfCoins {
if denom <= amount {
numOfCoins[amount] = min(numOfCoins[amount], numOfCoins[amount-denom]+1)
}
}
}
if numOfCoins[n] != math.MaxInt32 {
return numOfCoins[n]
}
return -1
}
func min(arg1 int, rest ...int) int {
curr := arg1
for _, num := range rest {
if num < curr {
curr = num
}
}
return curr
}
|
package main
import (
"fmt"
"reflect"
)
type User struct {
Id int
Name string
Age int
}
func (user User) Hello(name string) {
fmt.Println("hello world")
}
func main() {
u := User{1, "ok", 12}
Info(u)
x:=123
v:=reflect.ValueOf(&x)
v.Elem().SetInt(99)
fmt.Println(x)
v2:=reflect.ValueOf(u)
mv:=v2.MethodByName("Hello")
args:=[]reflect.Value{reflect.ValueOf("jon")}
mv.Call(args)
}
func Info(o interface{}) {
t := reflect.TypeOf(o)
fmt.Print("type:", t.Name())
if k := t.Kind(); k != reflect.Struct {
fmt.Println("xxx")
return
}
v := reflect.ValueOf(o)
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
val := v.Field(i).Interface()
fmt.Println(f.Name, f.Type, val)
}
for i := 0; i < t.NumMethod(); i++ {
m := t.Method(i)
fmt.Println(m.Name, m.Type)
}
}
|
package bcio
import (
"debug/elf"
"log"
"os"
"sort"
)
type memSegment struct {
Vaddr uint64
Offset int64
Memsz uint64
Data []uint8
changes map[int]byte
}
type codeSection struct {
Addr uint64
Size uint64
}
// Binary type stores information about how to load a file to memory.
type Binary struct {
filename string
memory []memSegment
Symbol2Addr map[string]uint64
Addr2Symbol map[uint64]string
CodeSections []codeSection
Entry uint64
MachineType string
}
// Save overwrites the changes into binary.
func (b *Binary) Save() {
f, err := os.OpenFile(b.filename, os.O_RDWR, 0644)
if err != nil {
panic("No such file")
}
defer f.Close()
for _, m := range b.memory {
for idx, val := range m.changes {
f.WriteAt([]byte{val}, m.Offset+int64(idx))
}
}
}
// ReadMemory reads memory bytes from binary.
func (b *Binary) ReadMemory(addr uint64, size uint64) []uint8 {
for _, m := range b.memory {
if addr >= m.Vaddr && addr < m.Vaddr+m.Memsz {
if addr-m.Vaddr+size <= uint64(len(m.Data)) {
return m.Data[addr-m.Vaddr : addr-m.Vaddr+size]
}
data := m.Data[addr-m.Vaddr:]
sz := uint64(len(data))
return append(data, b.ReadMemory(addr+sz, size-sz)...)
}
}
return nil
}
// WriteMemory write memory bytes into binary. If it tries to write multiple
// memory segments, only update the first segment.
func (b *Binary) WriteMemory(addr uint64, data []byte) int {
for _, m := range b.memory {
if addr >= m.Vaddr && addr < m.Vaddr+m.Memsz {
base := int(addr - m.Vaddr)
if base+len(data) <= len(m.Data) {
for i := 0; i < len(data); i++ {
m.changes[base+i] = data[i]
m.Data[base+i] = data[i]
}
return len(data)
}
size := len(m.Data) - base
for i := 0; i < size; i++ {
m.changes[base+i] = data[i]
m.Data[base+i] = data[i]
}
return size
}
}
return -1
}
func loadCodeSegments(f *os.File, _elf *elf.File) []memSegment {
memory := make([]memSegment, 0, len(_elf.Progs))
for _, prog := range _elf.Progs {
if prog.Type != elf.PT_LOAD {
continue
}
align := uint64(0x1000)
pageoffset := prog.Vaddr & (align - 1)
memsz := prog.Memsz + pageoffset
offset := int64(prog.Off - pageoffset)
filesz := prog.Filesz + pageoffset
vaddr := prog.Vaddr - pageoffset
memsz = (memsz + align) & ^(align - 1)
f.Seek(offset, os.SEEK_SET)
buf := make([]uint8, filesz, filesz)
_, err := f.Read(buf)
if err != nil {
log.Panicln(err)
}
memory = append(memory, memSegment{
Vaddr: vaddr,
Offset: offset,
Memsz: memsz,
Data: buf,
changes: make(map[int]byte),
})
}
return memory
}
func loadSymbols(_elf *elf.File) (map[string]uint64, map[uint64]string) {
symbol2addr := make(map[string]uint64)
addr2symbol := make(map[uint64]string)
symbols, err := _elf.Symbols()
if err != nil {
return symbol2addr, addr2symbol
}
for _, symbol := range symbols {
infoType := symbol.Info & 0xf
if infoType == 1 || infoType == 2 { // Symbol Type is STT_FUNC or STT_OBJECT
symbol2addr[symbol.Name] = symbol.Value
addr2symbol[symbol.Value] = symbol.Name
}
// TODO: Check Thumb or ARM type for ARM ELFs.
}
return symbol2addr, addr2symbol
}
func findCodeSection(_elf *elf.File) []codeSection {
codeSections := make([]codeSection, 0, len(_elf.Sections)/3)
for _, section := range _elf.Sections {
if section.Flags&elf.SHF_EXECINSTR == elf.SHF_EXECINSTR {
// We simply assume that every executable section is code section
// such as .text, .init, .plt.
codeSections = append(codeSections, codeSection{
Addr: section.Addr,
Size: section.Size,
})
}
}
sort.Slice(codeSections, func(i, j int) bool {
addr1 := codeSections[i].Addr
addr2 := codeSections[j].Addr
return addr1 < addr2 || (addr1 == addr2 && codeSections[i].Size < codeSections[j].Size)
})
return codeSections
}
// ReadElf loads a ELF binary.
func ReadElf(filename string) *Binary {
f, err := os.Open(filename)
if err != nil {
panic("No such file")
}
defer f.Close()
_elf, err := elf.NewFile(f)
if err != nil {
panic("Failed to load the ELF file")
}
memory := loadCodeSegments(f, _elf)
symbol2addr, addr2symbol := loadSymbols(_elf)
codeSections := findCodeSection(_elf)
return &Binary{
filename: filename,
memory: memory,
Symbol2Addr: symbol2addr,
Addr2Symbol: addr2symbol,
CodeSections: codeSections,
Entry: _elf.Entry,
MachineType: _elf.Machine.String(),
}
}
|
package console
import (
"fmt"
"io/ioutil"
"os"
"strings"
)
// init command list
var commands = map[string]Command{
"HELP": NewHelpCommand(),
"DIR": NewDirCommand(),
"LS": NewDirCommand(),
"CD": NewCDCommand(),
"MKDIR": NewMkDirCommand(),
"RUN": NewRunCommand(),
}
type command struct {
Name string
Desc string
Help string
}
type Command interface {
Exec(pb PixelBuffer, statement string) error
}
/*
Implemented commands
*/
type helpCommand struct {
command
}
type dirCommand struct {
command
}
type cdCommand struct {
command
}
type mkdirCommand struct {
command
}
type runCommand struct {
command
}
func NewHelpCommand() Command {
c := &helpCommand{
command{
Name: "HELP",
Desc: "Display help",
},
}
return c
}
func (h *helpCommand) Exec(pb PixelBuffer, statement string) error {
pb.Color(PICO8_BLUE)
pb.Print("COMMANDS")
pb.Print("")
pb.Color(PICO8_LIGHT_GRAY)
pb.Print("LOAD <FILENAME> SAVE <FILENAME>")
pb.Print("RUN RESUME")
pb.Print("SHUTDOWN REBOOT")
pb.Print("CD <DIRNAME> MKDIR <DIRNAME>")
pb.Print("CD .. TO GO UP A DIRECTORY")
pb.Print("KEYCONFIG TO CHOOSE BUTTONS")
pb.Color(PICO8_PINK)
pb.Print("SPLORE TO EXPLORE CARTRIGDES")
pb.Print("")
pb.Color(PICO8_LIGHT_GRAY)
pb.Print("PRESS ESC TO TOGGLE EDITOR VIEW")
pb.Print("ALT+ENTER TO TOGGLE FULLSCREEN")
pb.Print("CTRL-Q TO FASTQUIT")
pb.Color(PICO8_BLUE)
pb.Print("SEE PICOGO.TXT FOR MORE INFO")
pb.Print("OR VISIT WWW.??????.COM")
return nil
}
func NewDirCommand() Command {
c := &dirCommand{
command{
Name: "DIR",
Desc: "List directory",
},
}
return c
}
func pwd(pb PixelBuffer) {
pb.Color(PICO8_BLUE)
dirStr := fmt.Sprintf("DIRECTORY: %s", _console.currentDir)
pb.Print(dirStr)
}
func (d *dirCommand) Exec(pb PixelBuffer, statement string) error {
pwd(pb)
dir, err := os.Getwd()
if err != nil {
return err
}
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, file := range files {
name := strings.ToUpper(file.Name())
if file.IsDir() {
pb.Color(PICO8_PINK)
pb.Print(name)
} else {
pb.Color(PICO8_BLUE)
pb.Print(name)
}
}
return nil
}
func NewCDCommand() Command {
c := &cdCommand{
command{
Name: "CD",
Desc: "Change directory",
},
}
return c
}
func (c *cdCommand) Exec(pb PixelBuffer, statement string) error {
// split statement into tokens
tokens := strings.Split(statement, " ")
if len(tokens) < 2 {
// no change
return nil
} else {
newDir := strings.ToLower(tokens[1])
newDir = _console.currentDir + "/" + newDir + "/"
// TODO get code working for case insensitive dir names
if err := os.Chdir(_console.baseDir + newDir); err != nil {
pb.Color(PICO8_WHITE)
pb.Print("DIRECTORY NOT FOUND")
return nil
}
// get dir details from full dir path
fullDir, err := os.Getwd()
if err != nil {
return err
}
if len(fullDir) < len(_console.baseDir) {
// you can't cd .. above starting dir
os.Chdir(_console.baseDir + _console.currentDir)
pb.Color(PICO8_WHITE)
pb.Print("CD: FAILED")
return nil
}
if fullDir == _console.baseDir {
_console.currentDir = "/"
} else {
parts := strings.Split(fullDir, "/")
lastDir := parts[len(parts)-1]
_console.currentDir = "/" + lastDir + "/"
}
}
pwd(pb)
return nil
}
func NewMkDirCommand() Command {
c := &mkdirCommand{
command{
Name: "MKDIR",
Desc: "Make directory",
Help: "MAKE [NAME]",
},
}
return c
}
func (m *mkdirCommand) Exec(pb PixelBuffer, statement string) error {
// split statement into tokens
tokens := strings.Split(statement, " ")
if len(tokens) < 2 {
pb.Color(PICO8_LIGHT_GRAY)
pb.Print(m.Help)
return nil
} else {
newDir := strings.ToLower(tokens[1])
newDir = _console.baseDir + _console.currentDir + newDir
// TODO get code working for case insensitive dir names
if err := os.Mkdir(newDir, 0700); err != nil {
pb.Color(PICO8_WHITE)
pb.Print("MKDIR: FAILED")
return nil
}
}
return nil
}
func NewRunCommand() Command {
c := &runCommand{
command{
Name: "RUN",
Desc: "Run code",
},
}
return c
}
func (r *runCommand) Exec(pb PixelBuffer, statement string) error {
pb.Color(PICO8_GREEN)
pb.Print("RUNNING...")
if runMode, ok := _console.modes[RUNTIME]; ok {
runMode.Init()
_console.SetMode(RUNTIME)
return nil
}
panic("Unable to fetch RUNTIME mode!")
}
|
package mappers
import (
"RBStask/app/models/entity"
"database/sql"
"fmt"
)
type PersonMapper struct {
db *sql.DB
}
func (m *PersonMapper) Init(db *sql.DB) error {
m.db = db
return nil
}
func (m *PersonMapper) Add(receivedPerson *entity.Person) error {
fmt.Println(receivedPerson)
sqlSelect := `
INSERT INTO public."persons"(name, surname, old, id_group, position)
VALUES ($1, $2, $3, $4, $5);`
_, err := m.db.Exec(sqlSelect, receivedPerson.Name, receivedPerson.Surname, receivedPerson.Old, receivedPerson.IdGroup, receivedPerson.Position)
if err != nil {
fmt.Printf("ERR: %s", err)
return err
}
return nil
}
func (m *PersonMapper) GetPerson(GroupId int64) ([]entity.Person, error) {
var (
dbId sql.NullInt64
dbName sql.NullString
dbSurname sql.NullString
dbOld sql.NullInt64
dbIdGroup sql.NullInt64
dbPosition sql.NullString
)
sqlSelect := `SELECT * FROM public.persons
WHERE id_group = $1`
lines, err := m.db.Query(sqlSelect, GroupId)
if err != nil {
fmt.Println("ERR")
return nil, err
}
Persons := make([]entity.Person, 0)
for lines.Next() {
err = lines.Scan(&dbId, &dbName, &dbSurname, &dbOld, &dbIdGroup, &dbPosition)
if err != nil {
return nil, err
}
currentPerson := entity.Person{
Id: dbId.Int64,
Name: dbName.String,
Surname: dbSurname.String,
Old: dbOld.Int64,
IdGroup : dbIdGroup.Int64,
Position: dbPosition.String,
}
Persons = append(Persons, currentPerson)
}
return Persons, nil
}
func (m *PersonMapper) Edit(receivedPerson *entity.Person) error {
fmt.Println(receivedPerson)
sqlSelect := `
UPDATE persons SET
name = $2,
surname = $3,
old = $4,
id_group = $5,
position = $6
WHERE id = $1`
_, err := m.db.Exec(sqlSelect,
receivedPerson.Id,
receivedPerson.Name,
receivedPerson.Surname,
receivedPerson.Old,
receivedPerson.IdGroup,
receivedPerson.Position)
if err != nil {
fmt.Printf("ERR: %s", err)
return err
}
return nil
}
func (m *PersonMapper) Delete(PersonId *int64) error {
sqlSelect := `DELETE FROM public.persons WHERE id = $1 `
_, err := m.db.Exec(sqlSelect, PersonId)
if err != nil {
fmt.Printf("ERR: %s", err)
return err
}
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.