text stringlengths 11 4.05M |
|---|
// Copyright (c) 2019, Sylabs Inc. All rights reserved.
// This software is licensed under a 3-clause BSD license. Please consult the LICENSE.md file
// distributed with the sources of this project regarding your rights to use or distribute this
// software.
package client
import (
"io/ioutil"
"net/http"
"strings"
"testing"
)
func TestNewClient(t *testing.T) {
httpClient := &http.Client{}
tests := []struct {
name string
cfg *Config
wantErr bool
wantURL string
wantAuthToken string
wantUserAgent string
wantHTTPClient *http.Client
}{
{"NilConfig", nil, false, defaultBaseURL, "", "", http.DefaultClient},
{"HTTPBaseURL", &Config{
BaseURL: "http://library.staging.sylabs.io",
}, false, "http://library.staging.sylabs.io", "", "", http.DefaultClient},
{"HTTPSBaseURL", &Config{
BaseURL: "https://library.staging.sylabs.io",
}, false, "https://library.staging.sylabs.io", "", "", http.DefaultClient},
{"UnsupportedBaseURL", &Config{
BaseURL: "bad:",
}, true, "", "", "", nil},
{"BadBaseURL", &Config{
BaseURL: ":",
}, true, "", "", "", nil},
{"AuthToken", &Config{
AuthToken: "blah",
}, false, defaultBaseURL, "blah", "", http.DefaultClient},
{"UserAgent", &Config{
UserAgent: "Secret Agent Man",
}, false, defaultBaseURL, "", "Secret Agent Man", http.DefaultClient},
{"HTTPClient", &Config{
HTTPClient: httpClient,
}, false, defaultBaseURL, "", "", httpClient},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, err := NewClient(tt.cfg)
if (err != nil) != tt.wantErr {
t.Fatalf("got err %v, want %v", err, tt.wantErr)
}
if err == nil {
if got, want := c.BaseURL.String(), tt.wantURL; got != want {
t.Errorf("got host %v, want %v", got, want)
}
if got, want := c.AuthToken, tt.wantAuthToken; got != want {
t.Errorf("got auth token %v, want %v", got, want)
}
if got, want := c.UserAgent, tt.wantUserAgent; got != want {
t.Errorf("got user agent %v, want %v", got, want)
}
if got, want := c.HTTPClient, tt.wantHTTPClient; got != want {
t.Errorf("got HTTP client %v, want %v", got, want)
}
}
})
}
}
func TestNewRequest(t *testing.T) {
tests := []struct {
name string
cfg *Config
method string
path string
rawQuery string
body string
wantErr bool
wantURL string
wantAuthBearer string
wantUserAgent string
}{
{"BadMethod", nil, "b@d ", "", "", "", true, "", "", ""},
{"NilConfigGet", nil, http.MethodGet, "/path", "", "", false, "https://library.sylabs.io/path", "", ""},
{"NilConfigPost", nil, http.MethodPost, "/path", "", "", false, "https://library.sylabs.io/path", "", ""},
{"NilConfigPostRawQuery", nil, http.MethodPost, "/path", "a=b", "", false, "https://library.sylabs.io/path?a=b", "", ""},
{"NilConfigPostBody", nil, http.MethodPost, "/path", "", "body", false, "https://library.sylabs.io/path", "", ""},
{"HTTPBaseURL", &Config{
BaseURL: "http://library.staging.sylabs.io",
}, http.MethodGet, "/path", "", "", false, "http://library.staging.sylabs.io/path", "", ""},
{"HTTPSBaseURL", &Config{
BaseURL: "https://library.staging.sylabs.io",
}, http.MethodGet, "/path", "", "", false, "https://library.staging.sylabs.io/path", "", ""},
{"AuthToken", &Config{
AuthToken: "blah",
}, http.MethodGet, "/path", "", "", false, "https://library.sylabs.io/path", "BEARER blah", ""},
{"UserAgent", &Config{
UserAgent: "Secret Agent Man",
}, http.MethodGet, "/path", "", "", false, "https://library.sylabs.io/path", "", "Secret Agent Man"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, err := NewClient(tt.cfg)
if err != nil {
t.Fatalf("failed to create client: %v", err)
}
r, err := c.newRequest(tt.method, tt.path, tt.rawQuery, strings.NewReader(tt.body))
if (err != nil) != tt.wantErr {
t.Fatalf("got err %v, wantErr %v", err, tt.wantErr)
}
if err == nil {
if got, want := r.Method, tt.method; got != want {
t.Errorf("got method %v, want %v", got, want)
}
if got, want := r.URL.String(), tt.wantURL; got != want {
t.Errorf("got URL %v, want %v", got, want)
}
b, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Errorf("failed to read body: %v", err)
}
if got, want := string(b), tt.body; got != want {
t.Errorf("got body %v, want %v", got, want)
}
authBearer, ok := r.Header["Authorization"]
if got, want := ok, (tt.wantAuthBearer != ""); got != want {
t.Fatalf("presence of auth bearer %v, want %v", got, want)
}
if ok {
if got, want := len(authBearer), 1; got != want {
t.Fatalf("got %v auth bearer(s), want %v", got, want)
}
if got, want := authBearer[0], tt.wantAuthBearer; got != want {
t.Errorf("got auth bearer %v, want %v", got, want)
}
}
userAgent, ok := r.Header["User-Agent"]
if got, want := ok, (tt.wantUserAgent != ""); got != want {
t.Fatalf("presence of user agent %v, want %v", got, want)
}
if ok {
if got, want := len(userAgent), 1; got != want {
t.Fatalf("got %v user agent(s), want %v", got, want)
}
if got, want := userAgent[0], tt.wantUserAgent; got != want {
t.Errorf("got user agent %v, want %v", got, want)
}
}
}
})
}
}
|
package management
import (
"context"
"github.com/golang/protobuf/ptypes/empty"
"github.com/caos/zitadel/pkg/grpc/management"
)
func (s *Server) GetOrgMemberRoles(ctx context.Context, _ *empty.Empty) (*management.OrgMemberRoles, error) {
return &management.OrgMemberRoles{Roles: s.org.GetOrgMemberRoles()}, nil
}
func (s *Server) SearchMyOrgMembers(ctx context.Context, in *management.OrgMemberSearchRequest) (*management.OrgMemberSearchResponse, error) {
members, err := s.org.SearchMyOrgMembers(ctx, orgMemberSearchRequestToModel(in))
if err != nil {
return nil, err
}
return orgMemberSearchResponseFromModel(members), nil
}
func (s *Server) AddMyOrgMember(ctx context.Context, member *management.AddOrgMemberRequest) (*management.OrgMember, error) {
addedMember, err := s.org.AddMyOrgMember(ctx, addOrgMemberToModel(member))
if err != nil {
return nil, err
}
return orgMemberFromModel(addedMember), nil
}
func (s *Server) ChangeMyOrgMember(ctx context.Context, member *management.ChangeOrgMemberRequest) (*management.OrgMember, error) {
changedMember, err := s.org.ChangeMyOrgMember(ctx, changeOrgMemberToModel(member))
if err != nil {
return nil, err
}
return orgMemberFromModel(changedMember), nil
}
func (s *Server) RemoveMyOrgMember(ctx context.Context, member *management.RemoveOrgMemberRequest) (*empty.Empty, error) {
err := s.org.RemoveMyOrgMember(ctx, member.UserId)
return &empty.Empty{}, err
}
|
package hud
import (
"encoding/json"
"fmt"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/tilt-dev/tilt/internal/container"
"github.com/tilt-dev/tilt/internal/hud/view"
"github.com/tilt-dev/tilt/internal/rty"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
"github.com/tilt-dev/tilt/pkg/model/logstore"
"github.com/gdamore/tcell"
)
const testCID = container.ID("beep-boop")
var clockForTest = func() time.Time { return time.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC) }
func newView(resources ...view.Resource) view.View {
return view.View{
LogReader: newLogReader(""),
Resources: resources,
}
}
func newSpanLogReader(mn model.ManifestName, spanID logstore.SpanID, msg string) logstore.Reader {
logStore := logstore.NewLogStore()
logStore.Append(testLogAction{mn: mn, spanID: spanID, time: time.Now(), msg: msg}, nil)
return logstore.NewReader(&sync.RWMutex{}, logStore)
}
func newWarningLogReader(mn model.ManifestName, spanID logstore.SpanID, warnings []string) logstore.Reader {
logStore := logstore.NewLogStore()
for _, warning := range warnings {
logStore.Append(testLogAction{
mn: mn,
spanID: spanID,
time: time.Now(),
msg: warning,
level: logger.WarnLvl,
}, nil)
}
return logstore.NewReader(&sync.RWMutex{}, logStore)
}
func appendSpanLog(logStore *logstore.LogStore, mn model.ManifestName, spanID logstore.SpanID, msg string) {
logStore.Append(testLogAction{mn: mn, spanID: spanID, time: time.Now(), msg: msg}, nil)
}
func TestRender(t *testing.T) {
rtf := newRendererTestFixture(t)
v := newView(view.Resource{
Name: "foo",
ResourceInfo: view.K8sResourceInfo{},
})
plainVs := fakeViewState(1, view.CollapseNo)
rtf.run("one undeployed resource", 70, 20, v, plainVs)
v = newView(view.Resource{
Name: "a-a-a-aaaaabe vigoda",
BuildHistory: []model.BuildRecord{{
FinishTime: time.Now(),
Error: fmt.Errorf("oh no the build failed"),
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{},
})
v.LogReader = newSpanLogReader("a-a-a-aaaaabe vigoda", "vigoda:1",
"1\n2\n3\nthe compiler did not understand!\n5\n6\n7\n8\n")
rtf.run("inline build log", 70, 20, v, plainVs)
v = newView(view.Resource{
Name: "a-a-a-aaaaabe vigoda",
BuildHistory: []model.BuildRecord{{
FinishTime: time.Now(),
Error: fmt.Errorf("oh no the build failed"),
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{},
})
v.LogReader = newSpanLogReader("a-a-a-aaaaabe vigoda", "vigoda:1",
`STEP 1/2 — Building Dockerfile: [gcr.io/windmill-public-containers/servantes/snack]
│ Tarring context…
│ Applying via kubectl
╎ Created tarball (size: 11 kB)
│ Building image
╎ RUNNING: go install github.com/tilt-dev/servantes/snack
╎ ERROR IN: go install github.com/tilt-dev/servantes/snack
╎ → # github.com/tilt-dev/servantes/snack
src/github.com/tilt-dev/servantes/snack/main.go:16:36: syntax error: unexpected newline, expecting comma or }
ERROR: ImageBuild: executor failed running [/bin/sh -c go install github.com/tilt-dev/servantes/snack]: exit code 2`)
rtf.run("inline build log with wrapping", 117, 20, v, plainVs)
v = newView(view.Resource{
Name: "a-a-a-aaaaabe vigoda",
Endpoints: []string{"1.2.3.4:8080"},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodStatus: "Running",
PodRestarts: 1,
SpanID: "vigoda:pod",
RunStatus: v1alpha1.RuntimeStatusOK,
},
})
v.LogReader = newSpanLogReader("a-a-a-aaaaabe vigoda", "vigoda:pod",
"1\n2\n3\n4\nabe vigoda is now dead\n5\n6\n7\n8\n")
rtf.run("pod log displayed inline", 70, 20, v, plainVs)
v = newView(view.Resource{
Name: "a-a-a-aaaaabe vigoda",
BuildHistory: []model.BuildRecord{{
Error: fmt.Errorf("broken go code!"),
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{},
})
v.LogReader = newSpanLogReader("a-a-a-aaaaabe vigoda", "vigoda:1",
"mashing keys is not a good way to generate code")
rtf.run("manifest error and build error", 70, 20, v, plainVs)
ts := time.Now().Add(-5 * time.Minute)
v = newView(view.Resource{
Name: "a-a-a-aaaaabe vigoda",
LastDeployTime: ts,
BuildHistory: []model.BuildRecord{{
Edits: []string{"main.go", "cli.go"},
Error: fmt.Errorf("the build failed!"),
FinishTime: ts,
StartTime: ts.Add(-1400 * time.Millisecond),
}},
PendingBuildEdits: []string{"main.go", "cli.go", "vigoda.go"},
PendingBuildSince: ts,
CurrentBuild: model.BuildRecord{
Edits: []string{"main.go"},
StartTime: ts,
},
Endpoints: []string{"1.2.3.4:8080"},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodCreationTime: ts,
PodStatus: "Running",
RunStatus: v1alpha1.RuntimeStatusOK,
PodRestarts: 1,
SpanID: "vigoda:pod",
},
})
v.LogReader = newSpanLogReader("a-a-a-aaaaabe vigoda", "vigoda:pod",
"1\n2\n3\n4\nabe vigoda is now dead\n5\n6\n7\n8\n")
rtf.run("all the data at once", 70, 20, v, plainVs)
rtf.run("all the data at once 50w", 50, 20, v, plainVs)
rtf.run("all the data at once 10w", 10, 20, v, plainVs)
v = newView(view.Resource{
Name: "vigoda",
LastDeployTime: ts,
BuildHistory: []model.BuildRecord{{
Edits: []string{"main.go", "cli.go"},
FinishTime: ts,
StartTime: ts.Add(-1400 * time.Millisecond),
}},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodCreationTime: ts,
PodStatus: "Running",
RunStatus: v1alpha1.RuntimeStatusOK,
PodRestarts: 1,
SpanID: "vigoda:pod",
},
Endpoints: []string{"1.2.3.4:8080"},
})
v.LogReader = newSpanLogReader("vigoda", "vigoda:pod",
`abe vigoda is crashing
oh noooooooooooooooooo nooooooooooo noooooooooooo nooooooooooo
oh noooooooooooooooooo nooooooooooo noooooooooooo nooooooooooo nooooooooooo noooooooooooo nooooooooooo
oh noooooooooooooooooo nooooooooooo noooooooooooo nooooooooooo
oh noooooooooooooooooo nooooooooooo noooooooooooo nooooooooooo nooooooooooo noooooooooooo nooooooooooo nooooooooooo noooooooooooo nooooooooooo
oh noooooooooooooooooo nooooooooooo noooooooooooo nooooooooooo`)
rtf.run("pod log with inline wrapping", 70, 20, v, plainVs)
v = newView(view.Resource{
Name: model.UnresourcedYAMLManifestName,
BuildHistory: []model.BuildRecord{{
FinishTime: ts,
StartTime: ts.Add(-1400 * time.Millisecond),
}},
LastDeployTime: ts,
ResourceInfo: view.YAMLResourceInfo{
K8sDisplayNames: []string{"sancho:deployment"},
},
})
rtf.run("no collapse unresourced yaml manifest", 70, 20, v, plainVs)
rtf.run("default collapse unresourced yaml manifest", 70, 20, v, fakeViewState(1, view.CollapseAuto))
alertVs := plainVs
alertVs.AlertMessage = "this is only a test"
rtf.run("alert message", 70, 20, v, alertVs)
v = newView(view.Resource{
Name: "vigoda",
CurrentBuild: model.BuildRecord{
StartTime: ts.Add(-5 * time.Second),
Edits: []string{"main.go"},
},
ResourceInfo: view.K8sResourceInfo{},
})
rtf.run("build in progress", 70, 20, v, plainVs)
v = newView(view.Resource{
Name: "vigoda",
PendingBuildSince: ts.Add(-5 * time.Second),
PendingBuildEdits: []string{"main.go"},
ResourceInfo: view.K8sResourceInfo{
RunStatus: v1alpha1.RuntimeStatusPending,
},
})
rtf.run("pending build", 70, 20, v, plainVs)
v = newView(view.Resource{
Name: "vigoda",
LastDeployTime: ts.Add(-5 * time.Second),
BuildHistory: []model.BuildRecord{{
Edits: []string{"abbot.go", "costello.go", "harold.go"},
}},
ResourceInfo: view.K8sResourceInfo{
RunStatus: v1alpha1.RuntimeStatusPending,
},
})
rtf.run("edited files narrow term", 60, 20, v, plainVs)
rtf.run("edited files normal term", 80, 20, v, plainVs)
rtf.run("edited files wide term", 120, 20, v, plainVs)
}
func TestRenderTiltLog(t *testing.T) {
rtf := newRendererTestFixture(t)
v := newView()
v.LogReader = newLogReader(strings.Repeat("abcdefg", 30))
vs := fakeViewState(0, view.CollapseNo)
rtf.run("tilt log", 70, 20, v, vs)
vs.TiltLogState = view.TiltLogHalfScreen
rtf.run("tilt log half screen", 70, 20, v, vs)
vs.TiltLogState = view.TiltLogFullScreen
rtf.run("tilt log full screen", 70, 20, v, vs)
}
func TestRenderNarrationMessage(t *testing.T) {
rtf := newRendererTestFixture(t)
v := newView()
vs := view.ViewState{
ShowNarration: true,
NarrationMessage: "hi mom",
}
rtf.run("narration message", 60, 20, v, vs)
}
func TestAutoCollapseModes(t *testing.T) {
rtf := newRendererTestFixture(t)
goodView := newView(view.Resource{
Name: "vigoda",
ResourceInfo: view.K8sResourceInfo{},
})
badView := newView(view.Resource{
Name: "vigoda",
BuildHistory: []model.BuildRecord{{
FinishTime: time.Now(),
Error: fmt.Errorf("oh no the build failed"),
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{},
})
badView.LogReader = newSpanLogReader("vigoda", "vigoda:1",
"1\n2\n3\nthe compiler did not understand!\n5\n6\n7\n8\n")
autoVS := fakeViewState(1, view.CollapseAuto)
collapseYesVS := fakeViewState(1, view.CollapseYes)
collapseNoVS := fakeViewState(1, view.CollapseNo)
rtf.run("collapse-auto-good", 70, 20, goodView, autoVS)
rtf.run("collapse-auto-bad", 70, 20, badView, autoVS)
rtf.run("collapse-no-good", 70, 20, goodView, collapseNoVS)
rtf.run("collapse-yes-bad", 70, 20, badView, collapseYesVS)
}
func TestPodPending(t *testing.T) {
rtf := newRendererTestFixture(t)
ts := time.Now().Add(-30 * time.Second)
v := newView(view.Resource{
Name: "vigoda",
BuildHistory: []model.BuildRecord{{
StartTime: ts,
FinishTime: ts,
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
SpanID: "vigoda:pod",
PodStatus: "",
},
LastDeployTime: ts,
})
logStore := logstore.NewLogStore()
appendSpanLog(logStore, "vigoda", "vigoda:1", `STEP 1/2 — Building Dockerfile: [gcr.io/windmill-public-containers/servantes/snack]
│ Tarring context…
│ Applying via kubectl
╎ Created tarball (size: 11 kB)
│ Building image
`)
appendSpanLog(logStore, "vigoda", "vigoda:pod", "serving on 8080")
v.LogReader = logstore.NewReader(&sync.RWMutex{}, logStore)
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("pending pod no status", 80, 20, v, vs)
assert.Equal(t, statusDisplay{color: cPending},
combinedStatus(v.Resources[0]))
v.Resources[0].ResourceInfo = view.K8sResourceInfo{
PodCreationTime: ts,
PodStatus: "Pending",
RunStatus: v1alpha1.RuntimeStatusPending,
}
rtf.run("pending pod pending status", 80, 20, v, vs)
assert.Equal(t, statusDisplay{color: cPending, spinner: true},
combinedStatus(v.Resources[0]))
}
func TestNonCrashingPodNoInlineCrashLog(t *testing.T) {
rtf := newRendererTestFixture(t)
ts := time.Now().Add(-30 * time.Second)
v := newView(view.Resource{
Name: "vigoda",
Endpoints: []string{"1.2.3.4:8080"},
BuildHistory: []model.BuildRecord{{
SpanID: "vigoda:1",
StartTime: ts,
FinishTime: ts,
}},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodStatus: "Running",
RunStatus: v1alpha1.RuntimeStatusOK,
SpanID: "vigoda:pod",
PodUpdateStartTime: ts,
PodCreationTime: ts.Add(-time.Minute),
},
LastDeployTime: ts,
})
logStore := logstore.NewLogStore()
appendSpanLog(logStore, "vigoda", "vigoda:1",
"Building (1/2)\nBuilding (2/2)\n")
appendSpanLog(logStore, "vigoda", "vigoda:pod",
"Something's maybe wrong idk")
v.LogReader = logstore.NewReader(&sync.RWMutex{}, logStore)
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("non-crashing pod displays no logs inline even if crash log if present", 70, 20, v, vs)
}
func TestCompletedPod(t *testing.T) {
rtf := newRendererTestFixture(t)
ts := time.Now().Add(-30 * time.Second)
v := newView(view.Resource{
Name: "vigoda",
Endpoints: []string{"1.2.3.4:8080"},
BuildHistory: []model.BuildRecord{{
SpanID: "vigoda:1",
StartTime: ts,
FinishTime: ts,
}},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodStatus: "Completed",
RunStatus: v1alpha1.RuntimeStatusOK,
PodUpdateStartTime: ts,
PodCreationTime: ts.Add(-time.Minute),
},
LastDeployTime: ts,
})
v.LogReader = newSpanLogReader("vigoda", "vigoda:1",
"Building (1/2)\nBuilding (2/2)\n")
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("Completed is a good status", 70, 20, v, vs)
}
func TestBrackets(t *testing.T) {
rtf := newRendererTestFixture(t)
ts := time.Now().Add(-30 * time.Second)
v := newView(view.Resource{
Name: "[vigoda]",
BuildHistory: []model.BuildRecord{{
StartTime: ts,
FinishTime: ts,
}},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodStatus: "Running",
RunStatus: v1alpha1.RuntimeStatusOK,
PodCreationTime: ts,
},
LastDeployTime: ts,
})
v.LogReader = newLogReader(`[build] This line should be prefixed with 'build'
[hello world] This line should be prefixed with [hello world]
[hello world] this line too
`)
vs := fakeViewState(1, view.CollapseNo)
rtf.run("text in brackets", 80, 20, v, vs)
}
func TestPendingBuildInManualTriggerMode(t *testing.T) {
rtf := newRendererTestFixture(t)
ts := time.Now().Add(-30 * time.Second)
v := newView(view.Resource{
Name: "vigoda",
PendingBuildSince: ts.Add(-5 * time.Second),
PendingBuildEdits: []string{"main.go"},
TriggerMode: model.TriggerModeManualWithAutoInit,
ResourceInfo: view.K8sResourceInfo{},
})
vs := fakeViewState(1, view.CollapseNo)
rtf.run("pending build with manual trigger", 80, 20, v, vs)
}
func TestBuildHistory(t *testing.T) {
rtf := newRendererTestFixture(t)
ts := time.Now().Add(-30 * time.Second)
v := newView(view.Resource{
Name: "vigoda",
BuildHistory: []model.BuildRecord{
{
Edits: []string{"main.go"},
StartTime: ts.Add(-10 * time.Second),
FinishTime: ts,
},
{
Reason: model.BuildReasonFlagInit,
StartTime: ts.Add(-2 * time.Minute),
FinishTime: ts.Add(-2 * time.Minute).Add(5 * time.Second),
},
},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodStatus: "Running",
RunStatus: v1alpha1.RuntimeStatusOK,
PodUpdateStartTime: ts,
PodCreationTime: ts.Add(-time.Minute),
},
LastDeployTime: ts,
})
vs := fakeViewState(1, view.CollapseNo)
rtf.run("multiple build history entries", 80, 20, v, vs)
}
func TestDockerComposeUpExpanded(t *testing.T) {
rtf := newRendererTestFixture(t)
now := time.Now()
v := newView(view.Resource{
Name: "snack",
ResourceInfo: view.NewDCResourceInfo("running", testCID, "snack:dc", now.Add(-5*time.Second), v1alpha1.RuntimeStatusOK),
Endpoints: []string{"http://localhost:3000"},
CurrentBuild: model.BuildRecord{
StartTime: now.Add(-5 * time.Second),
Reason: model.BuildReasonFlagChangedFiles,
},
})
v.LogReader = newSpanLogReader("snack", "snack:dc", "hellllo")
vs := fakeViewState(1, view.CollapseNo)
rtf.run("docker-compose up expanded", 80, 20, v, vs)
}
func TestStatusBarDCRebuild(t *testing.T) {
rtf := newRendererTestFixture(t)
now := time.Now()
v := newView(view.Resource{
Name: "snack",
ResourceInfo: view.NewDCResourceInfo("exited", testCID, "snack:dc", now.Add(-5*time.Second), v1alpha1.RuntimeStatusError),
CurrentBuild: model.BuildRecord{
StartTime: now.Add(-5 * time.Second),
Reason: model.BuildReasonFlagChangedFiles,
},
})
v.LogReader = newSpanLogReader("snack", "snack:dc", "hellllo")
vs := fakeViewState(1, view.CollapseYes)
rtf.run("status bar after intentional DC restart", 60, 20, v, vs)
}
func TestDetectDCCrashExpanded(t *testing.T) {
rtf := newRendererTestFixture(t)
now := time.Now()
v := newView(view.Resource{
Name: "snack",
ResourceInfo: view.NewDCResourceInfo("exited", testCID, "snack:dc", now.Add(-5*time.Second), v1alpha1.RuntimeStatusError),
})
v.LogReader = newSpanLogReader("snack", "snack:dc", "hi im a crash")
vs := fakeViewState(1, view.CollapseNo)
rtf.run("detected docker compose build crash expanded", 80, 20, v, vs)
}
func TestDetectDCCrashNotExpanded(t *testing.T) {
rtf := newRendererTestFixture(t)
now := time.Now()
v := newView(view.Resource{
Name: "snack",
ResourceInfo: view.NewDCResourceInfo("exited", testCID, "snack:dc", now.Add(-5*time.Second), v1alpha1.RuntimeStatusError),
})
v.LogReader = newSpanLogReader("snack", "snack:dc", "hi im a crash")
vs := fakeViewState(1, view.CollapseYes)
rtf.run("detected docker compose build crash not expanded", 80, 20, v, vs)
}
func TestDetectDCCrashAutoExpand(t *testing.T) {
rtf := newRendererTestFixture(t)
now := time.Now()
v := newView(view.Resource{
Name: "snack",
ResourceInfo: view.NewDCResourceInfo("exited", testCID, "snack:dc", now.Add(-5*time.Second), v1alpha1.RuntimeStatusError),
})
v.LogReader = newSpanLogReader("snack", "snack:dc", "hi im a crash")
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("detected docker compose build crash auto expand", 80, 20, v, vs)
}
func TestTiltfileResource(t *testing.T) {
rtf := newRendererTestFixture(t)
v := newView(view.Resource{
Name: store.MainTiltfileManifestName,
IsTiltfile: true,
ResourceInfo: view.TiltfileResourceInfo{},
})
vs := fakeViewState(1, view.CollapseNo)
rtf.run("Tiltfile resource no run", 80, 20, v, vs)
now := time.Now()
v = newView(view.Resource{
Name: store.MainTiltfileManifestName,
IsTiltfile: true,
ResourceInfo: view.TiltfileResourceInfo{},
BuildHistory: []model.BuildRecord{
{
StartTime: now.Add(-5 * time.Second),
FinishTime: now.Add(-4 * time.Second),
Reason: model.BuildReasonFlagInit,
SpanID: "tiltfile:1",
},
},
})
rtf.run("Tiltfile resource first run", 80, 20, v, vs)
}
func TestTiltfileResourceWithWarning(t *testing.T) {
rtf := newRendererTestFixture(t)
now := time.Now()
v := newView(view.Resource{
Name: store.MainTiltfileManifestName,
IsTiltfile: true,
ResourceInfo: view.TiltfileResourceInfo{},
BuildHistory: []model.BuildRecord{
{
Edits: []string{"Tiltfile"},
StartTime: now.Add(-5 * time.Second),
FinishTime: now.Add(-4 * time.Second),
Reason: model.BuildReasonFlagConfig,
WarningCount: 2,
SpanID: "tiltfile:1",
},
},
})
v.LogReader = newWarningLogReader(
store.MainTiltfileManifestName,
"tiltfile:1",
[]string{"I am warning you\n", "Something is alarming here\n"})
vs := fakeViewState(1, view.CollapseNo)
rtf.run("Tiltfile resource with warning", 80, 20, v, vs)
}
func TestTiltfileResourcePending(t *testing.T) {
rtf := newRendererTestFixture(t)
now := time.Now()
v := newView(view.Resource{
Name: store.MainTiltfileManifestName,
IsTiltfile: true,
ResourceInfo: view.TiltfileResourceInfo{},
CurrentBuild: model.BuildRecord{
Edits: []string{"Tiltfile"},
StartTime: now.Add(-5 * time.Second),
Reason: model.BuildReasonFlagConfig,
SpanID: "tiltfile:1",
},
})
v.LogReader = newSpanLogReader(store.MainTiltfileManifestName, "tiltfile:1", "Building...")
vs := fakeViewState(1, view.CollapseNo)
rtf.run("Tiltfile resource pending", 80, 20, v, vs)
}
func TestRenderEscapedNbsp(t *testing.T) {
rtf := newRendererTestFixture(t)
plainVs := fakeViewState(1, view.CollapseNo)
v := newView(view.Resource{
Name: "vigoda",
BuildHistory: []model.BuildRecord{{
FinishTime: time.Now(),
Error: fmt.Errorf("oh no the build failed"),
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{},
})
v.LogReader = newSpanLogReader("vigoda", "vigoda:1", "\xa0 NBSP!")
rtf.run("escaped nbsp", 70, 20, v, plainVs)
}
func TestLineWrappingInInlineError(t *testing.T) {
rtf := newRendererTestFixture(t)
vs := fakeViewState(1, view.CollapseNo)
lines := []string{}
for i := 0; i < 10; i++ {
lines = append(lines, fmt.Sprintf("line %d: %s", i, strings.Repeat("xxx ", 20)))
}
v := newView(view.Resource{
Name: "vigoda",
BuildHistory: []model.BuildRecord{{
FinishTime: time.Now(),
Error: fmt.Errorf("failure"),
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{},
})
v.LogReader = newSpanLogReader("vigoda", "vigoda:1", strings.Join(lines, "\n"))
rtf.run("line wrapping in inline error", 80, 40, v, vs)
}
func TestRenderTabView(t *testing.T) {
rtf := newRendererTestFixture(t)
vs := fakeViewState(1, view.CollapseAuto)
now := time.Now()
v := newView(view.Resource{
Name: "vigoda",
BuildHistory: []model.BuildRecord{{
StartTime: now.Add(-time.Minute),
FinishTime: now,
SpanID: "vigoda:1",
}},
ResourceInfo: view.K8sResourceInfo{
PodName: "vigoda-pod",
PodCreationTime: now,
PodStatus: "Running",
RunStatus: v1alpha1.RuntimeStatusOK,
SpanID: "vigoda:pod",
},
LastDeployTime: now,
})
logStore := logstore.NewLogStore()
appendSpanLog(logStore, "vigoda", "vigoda:1",
`STEP 1/2 — Building Dockerfile: [gcr.io/windmill-public-containers/servantes/snack]
│ Tarring context…
│ Applying via kubectl
╎ Created tarball (size: 11 kB)
│ Building image
`)
appendSpanLog(logStore, "vigoda", "vigoda:pod", "serving on 8080")
v.LogReader = logstore.NewReader(&sync.RWMutex{}, logStore)
rtf.run("log tab default", 117, 20, v, vs)
vs.TabState = view.TabBuildLog
rtf.run("log tab build", 117, 20, v, vs)
vs.TabState = view.TabRuntimeLog
rtf.run("log tab pod", 117, 20, v, vs)
}
func TestPendingLocalResource(t *testing.T) {
rtf := newRendererTestFixture(t)
ts := time.Now().Add(-5 * time.Minute)
v := newView(view.Resource{
Name: "yarn-add",
CurrentBuild: model.BuildRecord{
StartTime: ts.Add(-5 * time.Second),
Edits: []string{"node.json"},
},
ResourceInfo: view.NewLocalResourceInfo(v1alpha1.RuntimeStatusPending, 0, model.LogSpanID("rt1")),
})
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("unfinished local resource", 80, 20, v, vs)
}
func TestFinishedLocalResource(t *testing.T) {
rtf := newRendererTestFixture(t)
v := newView(view.Resource{
Name: "yarn-add",
BuildHistory: []model.BuildRecord{
model.BuildRecord{FinishTime: time.Now()},
},
ResourceInfo: view.NewLocalResourceInfo(v1alpha1.RuntimeStatusNotApplicable, 0, model.LogSpanID("rt1")),
})
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("finished local resource", 80, 20, v, vs)
}
func TestFailedBuildLocalResource(t *testing.T) {
rtf := newRendererTestFixture(t)
v := newView(view.Resource{
Name: "yarn-add",
BuildHistory: []model.BuildRecord{
model.BuildRecord{
FinishTime: time.Now(),
Error: fmt.Errorf("help i'm trapped in an error factory"),
SpanID: "build:1",
},
},
ResourceInfo: view.LocalResourceInfo{},
})
v.LogReader = newSpanLogReader("yarn-add", "build:1",
"1\n2\n3\nthe compiler did not understand!\n5\n6\n7\n8\n")
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("failed build local resource", 80, 20, v, vs)
}
func TestLocalResourceErroredServe(t *testing.T) {
rtf := newRendererTestFixture(t)
v := newView(view.Resource{
Name: "yarn-add",
BuildHistory: []model.BuildRecord{
model.BuildRecord{FinishTime: time.Now()},
},
ResourceInfo: view.NewLocalResourceInfo(v1alpha1.RuntimeStatusError, 0, model.LogSpanID("rt1")),
})
vs := fakeViewState(1, view.CollapseAuto)
rtf.run("local resource errored serve", 80, 20, v, vs)
}
type rendererTestFixture struct {
i rty.InteractiveTester
}
func newRendererTestFixture(t rty.ErrorReporter) rendererTestFixture {
return rendererTestFixture{
i: rty.NewInteractiveTester(t, screen),
}
}
func (rtf rendererTestFixture) run(name string, w int, h int, v view.View, vs view.ViewState) {
rtf.i.T().Helper()
// Assert that the view is serializable
serialized, err := json.Marshal(v)
if err != nil {
rtf.i.T().Errorf("Malformed view: not serializable: %v\nView: %+q\n", err, v)
}
// Then, assert that the view can be marshaled back.
if !json.Valid(serialized) {
rtf.i.T().Errorf("Malformed view: bad serialization: %s", string(serialized))
}
r := NewRenderer(clockForTest)
r.rty = rty.NewRTY(tcell.NewSimulationScreen(""), rtf.i.T())
c := r.layout(v, vs)
rtf.i.Run(name, w, h, c)
}
var screen tcell.Screen
func TestMain(m *testing.M) {
rty.InitScreenAndRun(m, &screen)
}
func fakeViewState(count int, collapse view.CollapseState) view.ViewState {
vs := view.ViewState{}
for i := 0; i < count; i++ {
vs.Resources = append(vs.Resources, view.ResourceViewState{
CollapseState: collapse,
})
}
return vs
}
func newLogReader(msg string) logstore.Reader {
store := logstore.NewLogStoreForTesting(msg)
return logstore.NewReader(&sync.RWMutex{}, store)
}
type testLogAction struct {
mn model.ManifestName
spanID logstore.SpanID
time time.Time
msg string
level logger.Level
fields logger.Fields
}
func (e testLogAction) Fields() logger.Fields {
return e.fields
}
func (e testLogAction) Message() []byte {
return []byte(e.msg)
}
func (e testLogAction) Level() logger.Level {
if e.level == (logger.Level{}) {
return logger.InfoLvl
}
return e.level
}
func (e testLogAction) Time() time.Time {
return e.time
}
func (e testLogAction) ManifestName() model.ManifestName {
return e.mn
}
func (e testLogAction) SpanID() logstore.SpanID {
return e.spanID
}
|
package storage
import (
"github.com/mradile/rssfeeder"
"github.com/stretchr/testify/assert"
"testing"
)
func Test_userStorage(t *testing.T) {
db, err := getDB()
assert.Nil(t, err)
defer db.Close()
s := NewUserStorage(db)
//add user1
user1 := &rssfeeder.User{
Login: "bla",
Password: "blub",
}
assert.NoError(t, s.Add(user1))
//add user2
user2 := &rssfeeder.User{
Login: "bla2",
Password: "blub2",
}
assert.NoError(t, s.Add(user2))
//test unique constraint
user1Duplicate := &rssfeeder.User{
Login: "bla",
}
assert.Error(t, s.Add(user1Duplicate))
//get user
getUser1, err := s.Get(user1.Login)
assert.Nil(t, err)
assert.Equal(t, user1, getUser1)
//get not existing user
notExists, err := s.Get("asdsad")
assert.Nil(t, err)
assert.Nil(t, notExists)
//update
user1.Password = "changed"
assert.NoError(t, s.Update(user1))
updated, err := s.Get(user1.Login)
assert.Nil(t, err)
assert.Equal(t, user1, updated)
//update user without id
assert.Error(t, s.Update(&rssfeeder.User{
Login: user2.Login,
Password: "changed",
}))
//delete
assert.NoError(t, s.Delete(user1.Login))
deleted, err := s.Get(user1.Login)
assert.Nil(t, err)
assert.Nil(t, deleted)
//delete not existing user
assert.Error(t, s.Delete(user1.Login))
}
|
package config
import (
"excho-job/migration"
"fmt"
"os"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
func Connection() *gorm.DB {
dbUser := os.Getenv("DB_USERNAME")
dbPass := os.Getenv("DB_PASSWORD")
dbHost := os.Getenv("DB_HOST")
dbPort := os.Getenv("DB_PORT")
dbName := os.Getenv("DB_NAME")
dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local", dbUser, dbPass, dbHost, dbPort, dbName)
// dsn := "root:@tcp(localhost)/excho_job"
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
if err != nil {
panic(err.Error())
}
db.AutoMigrate(&migration.JobSeeker{})
db.AutoMigrate(&migration.JobSeekerDetails{})
db.AutoMigrate(&migration.Hire{})
db.AutoMigrate(&migration.Job{})
db.AutoMigrate(&migration.Resume{})
db.AutoMigrate(&migration.JobProfile{})
db.AutoMigrate(&migration.JobSeekerProfile{})
return db
}
|
package main
import (
"fmt"
"os"
"strings"
)
func main() {
p := NewParser(os.Stdout, strings.NewReader("9-5+2"))
if err := p.Parse(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
fmt.Println()
}
|
package main
import (
"os"
"log"
"github.com/kniren/gota/dataframe"
"gonum.org/v1/plot"
"gonum.org/v1/plot/plotter"
"gonum.org/v1/plot/vg"
)
func main() {
advertisingCsv,err:=os.Open("linearregression/data/Advertising.csv")
if err!=nil{
log.Fatal(err)
}
defer advertisingCsv.Close()
advertisingDF:=dataframe.ReadCSV(advertisingCsv)
yVal:=advertisingDF.Col("Sales").Float()
for _,colName:= range advertisingDF.Names() {
pts:=make(plotter.XYs,advertisingDF.Nrow())
for i,floatVal:=range advertisingDF.Col(colName).Float(){
pts[i].Y=yVal[i]
pts[i].X=floatVal
}
plot,err :=plot.New()
if err!=nil{
log.Fatal(err)
}
graph,err:=plotter.NewScatter(pts)
if err!=nil{
log.Fatal(err)
}
plot.Y.Label.Text="Sales"
plot.X.Label.Text=colName
plot.Add(plotter.NewGrid())
graph.GlyphStyle.Radius = vg.Points(3)
plot.Add(graph)
if err=plot.Save(4 * vg.Inch,4 * vg.Inch,"linearregression/data/"+colName+"_scatter.png");err!=nil{
log.Fatal(err)
}
}
}
|
/*
* Lean tool - hypothesis testing application
*
* https://github.com/MikaelLazarev/willie/
* Copyright (c) 2020. Mikhail Lazarev
*
*/
package middlewares
import (
"context"
"github.com/MikaelLazarev/willie/server/core"
"github.com/MikaelLazarev/willie/server/errors/sentry"
"github.com/gin-gonic/gin"
uuid "github.com/satori/go.uuid"
"log"
"strings"
)
func CookieAuthHandler(us core.MarketingServiceI) gin.HandlerFunc {
return func(c *gin.Context) {
var userID string
var err error
// When browser asks for manifest.json, it doesn't provide cookie!
if strings.Contains(c.Request.URL.Path, "manifest.json") {
return
}
userID, err = c.Cookie("user_id")
if err != nil {
userID = uuid.NewV4().String()
err := us.CreateLead(context.TODO(), userID, c.Request.URL.Path, c.Request.UserAgent())
if err != nil {
log.Println("Cant create user")
sentry.ReportError(err)
}
SetCookieID(c, userID)
}
// Set UserID and Role to context
c.Set("userId", userID)
}
}
func SetCookieID(c *gin.Context, cookieID string) {
c.SetCookie("user_id", cookieID, 3600*24*30*12, "", "", false, false)
}
|
package core
import (
"encoding/json"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
func TestVariationMatrixMarshalJSON(t *testing.T) {
for _, test := range []struct {
name string
matrix *VariationMatrix
expected string
}{
{
"recursive",
&VariationMatrix{
Children: map[string]*VariationMatrix{
"f2f501ad-5829-4eec-bcb6-b35a48a2ea93": &VariationMatrix{
Products: map[string]string{
"e9af3f88-1bae-4069-a3e0-51b0abe24931": "751b294d-1b1b-4de2-9d91-6f240eb921c4",
},
},
},
Products: map[string]string{
"90c1dd19-ff71-4eeb-b1a0-2e12a82ba357": "19f9672c-5c37-4f63-abe6-c06ccb71d999",
},
},
`{"90c1dd19-ff71-4eeb-b1a0-2e12a82ba357":"19f9672c-5c37-4f63-abe6-c06ccb71d999","f2f501ad-5829-4eec-bcb6-b35a48a2ea93":{"e9af3f88-1bae-4069-a3e0-51b0abe24931":"751b294d-1b1b-4de2-9d91-6f240eb921c4"}}`,
},
} {
t.Run(test.name, func(t *testing.T) {
data, err := json.Marshal(test.matrix)
if err != nil {
t.Fatal(err)
}
if string(data) != test.expected {
t.Errorf(
"\nexpected: %s\ngot: %s\n",
test.expected,
string(data),
)
}
})
}
}
func TestVariationMatrixUnmarshalJSON(t *testing.T) {
for _, test := range []struct {
name string
json string
expected *VariationMatrix
}{
{
"recursive",
`{"90c1dd19-ff71-4eeb-b1a0-2e12a82ba357":"19f9672c-5c37-4f63-abe6-c06ccb71d999","f2f501ad-5829-4eec-bcb6-b35a48a2ea93":{"e9af3f88-1bae-4069-a3e0-51b0abe24931":"751b294d-1b1b-4de2-9d91-6f240eb921c4"}}`,
&VariationMatrix{
Children: map[string]*VariationMatrix{
"f2f501ad-5829-4eec-bcb6-b35a48a2ea93": &VariationMatrix{
Products: map[string]string{
"e9af3f88-1bae-4069-a3e0-51b0abe24931": "751b294d-1b1b-4de2-9d91-6f240eb921c4",
},
},
},
Products: map[string]string{
"90c1dd19-ff71-4eeb-b1a0-2e12a82ba357": "19f9672c-5c37-4f63-abe6-c06ccb71d999",
},
},
},
} {
t.Run(test.name, func(t *testing.T) {
var matrix *VariationMatrix
err := json.Unmarshal([]byte(test.json), &matrix)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(matrix, test.expected) {
expectedS := spew.Sprintf("%#v", test.expected)
matrixS := spew.Sprintf("%#v", matrix)
t.Errorf(
"\nexpected: %#v\ngot: %#v\n",
expectedS,
matrixS,
)
}
})
}
}
|
package ravendb
import "reflect"
// ConcurrencyCheckMode describes concurrency check
type ConcurrencyCheckMode int
const (
// ConcurrencyCheckAuto is automatic optimistic concurrency check depending on UseOptimisticConcurrency setting or provided Change Vector
ConcurrencyCheckAuto ConcurrencyCheckMode = iota
// ConcurrencyCheckForced forces optimistic concurrency check even if UseOptimisticConcurrency is not set
ConcurrencyCheckForced
// ConcurrencyCheckDisabled disables optimistic concurrency check even if UseOptimisticConcurrency is set
ConcurrencyCheckDisabled
)
// documentInfo stores information about entity in a session
// TODO: maybe route all places where we compare enity for equality via
// documentInfo.Equal(other interface{}), so that we can catch
// mismatches of *struct and **struct
type documentInfo struct {
id string
changeVector *string
concurrencyCheckMode ConcurrencyCheckMode
ignoreChanges bool
metadata map[string]interface{}
document map[string]interface{}
metadataInstance *MetadataAsDictionary
entity interface{}
newDocument bool
collection string
}
// we want to route assignments to entity through this functions
// so that we can maintain invariant that entity is *struct (and
// not, e.g., **struct). It's hard to track the difference between
// *struct and **struct otherwise
func (d *documentInfo) setEntity(value interface{}) {
// TODO: maybe also support value of type *map[string]interface{}?
if _, ok := value.(map[string]interface{}); ok {
d.entity = value
return
}
tp := reflect.TypeOf(value)
if tp.Kind() == reflect.Struct {
panicIf(true, "trying to set struct %T", value)
d.entity = value
}
if tp.Kind() != reflect.Ptr || tp.Elem() == nil {
panicIf(tp.Kind() != reflect.Ptr || tp.Elem() == nil, "expected value to be *struct or **struct, is %T", value)
d.entity = value
return
}
tp = tp.Elem()
if tp.Kind() == reflect.Struct {
// if it's *struct, just assign
d.entity = value
return
}
if tp.Kind() != reflect.Ptr || tp.Elem() == nil || tp.Elem().Kind() != reflect.Struct {
//panicIf(tp.Kind() != reflect.Ptr || tp.Elem() == nil || tp.Elem().Kind() != reflect.Struct, "expected value to be *struct or **struct, is %T", value)
//TODO: re-enable this panic and fix places that trigger it
d.entity = value
return
}
// it's **struct, so extract *struct
rv := reflect.ValueOf(value)
rv = rv.Elem() // it's *struct now
d.entity = rv.Interface()
}
func getNewDocumentInfo(document map[string]interface{}) *documentInfo {
metadataV, ok := document[MetadataKey]
// TODO: maybe convert to errors
panicIf(!ok, "Document must have a metadata")
metadata, ok := metadataV.(map[string]interface{})
panicIf(!ok, "Document metadata is not a valid type %T", metadataV)
// TODO: return an error?
id, ok := jsonGetAsText(metadata, MetadataID)
// TODO: return an error?
panicIf(!ok || id == "", "Document must have an id")
changeVector := jsonGetAsTextPointer(metadata, MetadataChangeVector)
// TODO: return an error?
panicIf(changeVector == nil, "Document must have a Change Vector")
newDocumentInfo := &documentInfo{}
newDocumentInfo.id = id
newDocumentInfo.document = document
newDocumentInfo.metadata = metadata
newDocumentInfo.changeVector = changeVector
return newDocumentInfo
}
|
package leetcode
func isValid(s string) bool {
if len(s)%2 == 1 {
return false
}
dicts := make(map[byte]byte)
stack := []byte{}
dicts[')'] = '('
dicts[']'] = '['
dicts['}'] = '{'
for i := 0; i < len(s); i++ {
if dicts[s[i]] > 0 {
if len(stack) == 0 || stack[len(stack)-1] != dicts[s[i]] {
return false
} else {
stack = stack[:len(stack)-1] // pop
}
} else {
stack = append(stack, s[i]) // push
}
}
return len(stack) == 0
}
|
/*-------------------------------------------------------------------------
*
* export_runner.go
* Export Runner
*
*
* Copyright (c) 2021, Alibaba Group Holding Limited
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* IDENTIFICATION
* internal/gather/export_runner.go
*-------------------------------------------------------------------------
*/
package gather
import (
"fmt"
"github.com/ApsaraDB/PolarDB-NodeAgent/common/consts"
"github.com/ApsaraDB/PolarDB-NodeAgent/common/log"
"sync"
"time"
)
// ExportRunner a runner only for export functions
type ExportRunner struct {
Runner
firstRun bool
}
// NewExportRunner new a docker runner object, one plugin conf will run only one runner and one instance in this runner
func NewExportRunner() RunnerService {
er := &ExportRunner{firstRun: true}
return er
}
// RunnerInit init this runner
func (er *ExportRunner) RunnerInit(mInfoMap *sync.Map, pInfo *PluginInfo, pCtx *PluginCtx) error {
return er.runnerInit(mInfoMap, pInfo, pCtx)
}
// RunnerRun export runner run
func (er *ExportRunner) RunnerRun(wg *sync.WaitGroup) error {
er.running = true
wg.Add(1)
defer wg.Done()
log.Info("[export_runner] Run", log.String("module", er.pInfo.Name))
ins := &Instance{
running: true,
insName: "",
pInfo: er.pInfo,
stop: make(chan bool, 1),
dataLogger: log.NewDataLogger(er.pInfo.Name, consts.DataJsonConf),
}
if err := er.lazyLoadModule(); err != nil {
log.Error("[export_runner] lazyLoadModule failed", log.String("module", er.pInfo.Name), log.String("err", err.Error()))
return fmt.Errorf("export_runner lazyLoadModule failed,module:%s,err:%v", er.pInfo.Name, err.Error())
}
info, ok := er.mInfoMap.Load(er.pInfo.Name)
if !ok {
log.Error("[export_runner] module info not found", log.String("module", er.pInfo.Name))
return fmt.Errorf("export_runner module not found:%s", er.pInfo.Name)
}
ins.mInfo, _ = info.(*ModuleInfo)
log.Info("[export_runner] find a new instance",
log.String("runner", er.pInfo.Name),
log.String("target", er.pInfo.Target))
er.instances[er.pInfo.Name] = ins
go er.runSingleExportRunner(ins, wg)
return nil
}
// RunnerStatus get status
func (er *ExportRunner) RunnerStatus() interface{} {
return er.status
}
// RunnerNotify notify a runner
func (er *ExportRunner) RunnerNotify() {
// set all runner instance `notify` from false to true
for _, v := range er.instances {
v.notify = true
}
}
// RunnerStop stop runner
func (er *ExportRunner) RunnerStop() {
log.Info("[export_runner] Stop", log.String("module", er.pInfo.Name))
for _, v := range er.instances {
v.running = false
v.stop <- true
}
return
}
func (er *ExportRunner) runSingleExportRunner(ins *Instance, wg *sync.WaitGroup) {
wg.Add(1)
defer er.errorHandler(ins, wg)
var initCtx interface{}
var err error
var stop bool
// invoke plugin for gathering
for {
if !ins.running {
log.Info("[export_runner] not running",
log.String("runner", er.pInfo.Name),
log.String("target", er.pInfo.Target))
break
}
// init plugin
initCtx, err = er.initModule(ins)
if err != nil {
goto SLEEP_INTERVAL
}
// store init context
ins.mInfo.Contexts.Store(ins.pInfo.Name, initCtx)
// run module
er.runModule(initCtx, ins)
err = er.exitModule(initCtx, ins)
if err != nil {
log.Error("[export_runner] exit module failed",
log.String("name", er.pInfo.Name),
log.String("insName", ins.insName))
}
// if module crashed, wait 1min and reinit
SLEEP_INTERVAL:
select {
case <-ins.stop:
stop = true
break
case <-time.After(3 * time.Second):
}
if stop {
break
}
}
}
// backendCtx not used yet, always nil
func (er *ExportRunner) runModule(initCtx interface{}, ins *Instance) {
errCount := 0
var costMillis int64
mainRun := ins.mInfo.PluginABI.Run
collectContentMap := make(map[string]interface{}, 8)
// interval configured in SECOND
ticker := time.NewTicker(time.Duration(60) * time.Second)
for {
if ins.notify {
ins.notify = false
// TODO notify action
}
if !ins.running {
break
}
collectStartTimeMillis := time.Now().UnixNano() / 1e6
// 2. invoke PluginRun in plugin.so
err := mainRun(initCtx, collectContentMap)
if err != nil {
errCount++
if errCount > 3 {
log.Error("[export_runner] collector plugin run error, reinit it",
log.String("name", er.pInfo.Name),
log.Int("errCount", errCount))
break
}
log.Error("[export_runner] collector plugin run error",
log.String("name", er.pInfo.Name),
log.String("err", err.Error()))
goto SLEEP_INTERVAL
}
// post process
costMillis = time.Now().UnixNano()/1e6 - collectStartTimeMillis
log.Info("[export_runner] all costs ms",
log.Int64("cost", costMillis), log.String("name", er.pInfo.Name))
SLEEP_INTERVAL:
select {
case <-ins.stop:
ticker.Stop()
ins.stop <- true
break
case <-ticker.C:
if er.firstRun {
// 为了应对agent 重启后磁盘相关数据需要较长时间才能获取到,对第一次运行做特殊化处理
er.firstRun = false
ticker.Stop()
ticker = time.NewTicker(time.Duration(er.pInfo.Interval) * time.Second)
log.Info("[export_runner] new ticker",
log.Int("interval", er.pInfo.Interval), log.String("name", er.pInfo.Name))
}
break
}
}
}
|
package main
import (
"log"
"main/utils"
)
/**
给定一个排序数组,你需要在原地删除重复出现的元素,使得每个元素只出现一次,返回移除后数组的新长度。
不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。
示例 1:
给定数组 nums = [1,1,2],
函数应该返回新的长度 2, 并且原数组 nums 的前两个元素被修改为 1, 2。
你不需要考虑数组中超出新长度后面的元素。
示例 2:
给定 nums = [0,0,1,1,1,2,2,3,3,4],
函数应该返回新的长度 5, 并且原数组 nums 的前五个元素被修改为 0, 1, 2, 3, 4。
你不需要考虑数组中超出新长度后面的元素。
说明:
为什么返回数值是整数,但输出的答案是数组呢?
请注意,输入数组是以“引用”方式传递的,这意味着在函数里修改输入数组对于调用者是可见的。
你可以想象内部操作如下:
// nums 是以“引用”方式传递的。也就是说,不对实参做任何拷贝
int len = removeDuplicates(nums);
// 在函数里修改输入数组对于调用者是可见的。
// 根据你的函数返回的长度, 它会打印出数组中该长度范围内的所有元素。
for (int i = 0; i < len; i++) {
print(nums[i]);
}
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
*/
// 删除重复的元素
func removeDuplicates(nums []int) int {
if len(nums) == 0 {
return 0
}
utils.RemoveDuplicateArrayWithChange(&nums)
return len(nums)
}
// 删除重复的元素(这是一个有序数组,并且不用考虑替换以后的值,只会检查你返回长度的元素)
func removeDuplicates2(nums []int) int {
if len(nums) == 0 {
return 0
}
actualLen := 0
for i := 0; i < len(nums); i += 1 {
nums[actualLen] = nums[i]
for i + 1 < len(nums) && nums[i + 1] == nums[actualLen] {
i += 1
}
actualLen += 1
}
return actualLen
}
func main() {
lenN := removeDuplicates([]int{1, 1, 2, 2, 3})
if lenN != 3 {
log.Printf("error: %v", lenN)
return
}
lenN = removeDuplicates([]int{1, 1, 2})
if lenN != 2 {
log.Printf("error: %v", lenN)
return
}
lenN = removeDuplicates2([]int{1, 1, 2})
if lenN != 2 {
log.Printf("error: %v", lenN)
return
}
lenN = removeDuplicates([]int{})
if lenN != 0 {
log.Printf("error: %v", lenN)
return
}
log.Printf("success")
}
|
package main
import (
"bufio"
"fmt"
"math"
"os"
"strconv"
)
var in = bufio.NewScanner(os.Stdin)
var ab [3001]int
func init() {
in.Split(bufio.ScanWords)
}
func main() {
n, v := readInt(), readInt()
for i := 0; i < n; i++ {
a, b := readInt(), readInt()
ab[a-1] += b
}
remain, col := 0, 0
for i := 0; i < 3001; i++ {
col += int(math.Min(float64(v), float64(remain+ab[i])))
remain = int(math.Max(float64(ab[i])-math.Max(float64(v-remain), 0), 0))
}
fmt.Printf("%d\n", col)
}
func readInt() int {
in.Scan()
n, _ := strconv.Atoi(in.Text())
return n
}
|
package core
import (
"reflect"
"testing"
)
func TestSpotifyID_ToBase62(t *testing.T) {
tests := []struct {
name string
s SpotifyID
want string
}{
{
s: SpotifyID([]byte{0x00, 0x0d, 0x53, 0x65, 0x35, 0x86, 0x4e, 0x0f, 0x99, 0x76, 0x1f, 0x9d, 0xa9, 0x00, 0xb1, 0xc1}),
want: "0065zxtT6XKaQww7cLne0h",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.s.ToBase62(); got != tt.want {
t.Errorf("SpotifyID.ToBase62() = %v, want %v", got, tt.want)
}
})
}
}
func TestNewSpotifyIDFromBase62(t *testing.T) {
type args struct {
id string
}
tests := []struct {
name string
args args
want SpotifyID
wantErr bool
}{
{
args: args{id: "0065zxtT6XKaQww7cLne0h"},
want: SpotifyID([]byte{0x00, 0x0d, 0x53, 0x65, 0x35, 0x86, 0x4e, 0x0f, 0x99, 0x76, 0x1f, 0x9d, 0xa9, 0x00, 0xb1, 0xc1}),
wantErr: false,
},
{
args: args{id: "0065z*(&}{}}||^^**&***&*&*&*tT6XKaQww7cLne0h"},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewSpotifyIDFromBase62(tt.args.id)
if (err != nil) != tt.wantErr {
t.Errorf("NewSpotifyIDFromBase62() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewSpotifyIDFromBase62() = %v, want %v", got, tt.want)
}
})
}
}
func TestNewSpotifyIDFromRaw(t *testing.T) {
type args struct {
data []byte
}
tests := []struct {
name string
args args
want SpotifyID
wantErr bool
}{
{
args: args{data: []byte{0x00, 0x0d, 0x53, 0x65, 0x35, 0x86, 0x4e, 0x0f, 0x99, 0x76, 0x1f, 0x9d, 0xa9, 0x00, 0xb1, 0xc1}},
want: SpotifyID([]byte{0x00, 0x0d, 0x53, 0x65, 0x35, 0x86, 0x4e, 0x0f, 0x99, 0x76, 0x1f, 0x9d, 0xa9, 0x00, 0xb1, 0xc1}),
wantErr: false,
},
{
args: args{data: []byte{0x00, 0x00, 0x00, 0x0d, 0x53, 0x65, 0x35, 0x86, 0x4e, 0x0f, 0x99, 0x76, 0x1f, 0x9d, 0xa9, 0x00, 0xb1, 0xc1}},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewSpotifyIDFromRaw(tt.args.data)
if (err != nil) != tt.wantErr {
t.Errorf("NewSpotifyIDFromRaw() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewSpotifyIDFromRaw() = %v, want %v", got, tt.want)
}
})
}
}
func Test_reverse(t *testing.T) {
type args struct {
s string
}
tests := []struct {
name string
args args
want string
}{
{
args: args{s: "abcdefg"},
want: "gfedcba",
},
{
args: args{s: "123456"},
want: "654321",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := reverse(tt.args.s); got != tt.want {
t.Errorf("reverse() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import (
"fmt"
"math"
)
func main() {
n :=1011
v :=0
i :=0
for n >0 {
l := n%10;
v += int(math.Pow(2.0, float64(i)))*l
n = n/10
i++
}
fmt.Println(v)
}
|
package zk
import (
"net"
"sync"
"sync/atomic"
)
const (
DefaultPort = 2181 // 默认端口号
RecvTimeout = 1 // 接收消息超时,单位:秒
SessionTimeout = 4000 // 客户端会话超时,单位:毫秒
PingInterval = 2000 // Ping超时,单位:毫秒
BufferSize = 2 * 1024 // 1K
SentChanSize = 16 // 发送请求队列大小
RecvChanSize = 16 // 接收响应队列大小
)
type ZkCli struct {
xid int32 // 请求编号,用于映射请求响应
reqMap map[int32]*request // 请求响应映射
reqLock sync.Mutex //请求锁,在操作请求映射可能需要加锁
protocolversion int32
sessiontimeout int32
sessionid int64
password []byte
conn *net.TCPConn
state int32
sentchan chan *request
}
type request struct {
xid int32 //
opcode int32 //
reqbuf []byte // 用于直接发送的字节数组
resheader *responseHeader //
resbuf []byte // 直接接收到的字节数组
err error // 错误信息
done chan bool // 是否处理完成
}
// API:新建一个实例
func New() *ZkCli {
zkCli := ZkCli{
xid: 0,
reqMap: make(map[int32]*request),
protocolversion: 0,
sessiontimeout: 0,
sessionid: 0,
password: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
conn: nil,
state: stateDisconnect,
sentchan: make(chan *request, SentChanSize),
}
return &zkCli
}
func (zkCli *ZkCli) getNextXid() int32 {
return atomic.AddInt32(&zkCli.xid, 1)
}
|
package api
import (
"net/http"
"sort"
"github.com/gin-gonic/gin"
"github.com/pegasus-cloud/iam_client/iam"
"github.com/pegasus-cloud/iam_client/utility"
)
func listPermissionActions(c *gin.Context) {
getActions := iam.Actions.GetActions()
sort.Strings(getActions)
utility.ResponseWithType(c, http.StatusOK, &actions{
Actions: getActions,
})
}
|
package user
import (
log "github.com/sirupsen/logrus"
)
func Name() {
log.Info("Logging from user package")
} |
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package utils holds common testing utilities for tcpip.
package utils
import (
"testing"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/checksum"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/link/ethernet"
"gvisor.dev/gvisor/pkg/tcpip/link/nested"
"gvisor.dev/gvisor/pkg/tcpip/link/pipe"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
"gvisor.dev/gvisor/pkg/tcpip/prependable"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/testutil"
"gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
)
// Common NIC IDs used by tests.
const (
Host1NICID = 1
RouterNICID1 = 2
RouterNICID2 = 3
Host2NICID = 4
)
// Common NIC names used by tests.
const (
Host1NICName = "host1NIC"
RouterNIC1Name = "routerNIC1"
RouterNIC2Name = "routerNIC2"
Host2NICName = "host2NIC"
)
// Common link addresses used by tests.
const (
LinkAddr1 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x06")
LinkAddr2 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x07")
LinkAddr3 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x08")
LinkAddr4 = tcpip.LinkAddress("\x02\x03\x03\x04\x05\x09")
)
// Common IP addresses used by tests.
var (
Ipv4Addr = tcpip.AddressWithPrefix{
Address: testutil.MustParse4("192.168.1.58"),
PrefixLen: 24,
}
Ipv4Subnet = Ipv4Addr.Subnet()
Ipv4SubnetBcast = Ipv4Subnet.Broadcast()
Ipv6Addr = tcpip.AddressWithPrefix{
Address: testutil.MustParse6("200a::1"),
PrefixLen: 64,
}
Ipv6Subnet = Ipv6Addr.Subnet()
Ipv6SubnetBcast = Ipv6Subnet.Broadcast()
Ipv4Addr1 = tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse4("192.168.0.1"),
PrefixLen: 24,
},
}
Ipv4Addr2 = tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse4("192.168.0.2"),
PrefixLen: 8,
},
}
Ipv4Addr3 = tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse4("192.168.0.3"),
PrefixLen: 8,
},
}
Ipv6Addr1 = tcpip.ProtocolAddress{
Protocol: ipv6.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse6("a::1"),
PrefixLen: 64,
},
}
Ipv6Addr2 = tcpip.ProtocolAddress{
Protocol: ipv6.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse6("a::2"),
PrefixLen: 64,
},
}
Ipv6Addr3 = tcpip.ProtocolAddress{
Protocol: ipv6.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse6("a::3"),
PrefixLen: 64,
},
}
// Remote addrs.
RemoteIPv4Addr = testutil.MustParse4("10.0.0.1")
RemoteIPv6Addr = testutil.MustParse6("200b::1")
)
// Common ports for testing.
const (
RemotePort = 5555
LocalPort = 80
)
// Common IP addresses used for testing.
var (
Host1IPv4Addr = tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse4("192.168.0.2"),
PrefixLen: 24,
},
}
RouterNIC1IPv4Addr = tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse4("192.168.0.1"),
PrefixLen: 24,
},
}
RouterNIC2IPv4Addr = tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse4("10.0.0.3"),
PrefixLen: 8,
},
}
Host2IPv4Addr = tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse4("10.0.0.2"),
PrefixLen: 8,
},
}
Host1IPv6Addr = tcpip.ProtocolAddress{
Protocol: ipv6.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse6("a::2"),
PrefixLen: 64,
},
}
RouterNIC1IPv6Addr = tcpip.ProtocolAddress{
Protocol: ipv6.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse6("a::1"),
PrefixLen: 64,
},
}
RouterNIC2IPv6Addr = tcpip.ProtocolAddress{
Protocol: ipv6.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse6("b::1"),
PrefixLen: 64,
},
}
Host2IPv6Addr = tcpip.ProtocolAddress{
Protocol: ipv6.ProtocolNumber,
AddressWithPrefix: tcpip.AddressWithPrefix{
Address: testutil.MustParse6("b::2"),
PrefixLen: 64,
},
}
)
// NewEthernetEndpoint returns an ethernet link endpoint that wraps an inner
// link endpoint and checks the destination link address before delivering
// network packets to the network dispatcher.
//
// See ethernet.Endpoint for more details.
func NewEthernetEndpoint(ep stack.LinkEndpoint) *EndpointWithDestinationCheck {
var e EndpointWithDestinationCheck
e.Endpoint.Init(ethernet.New(ep), &e)
return &e
}
// EndpointWithDestinationCheck is a link endpoint that checks the destination
// link address before delivering network packets to the network dispatcher.
type EndpointWithDestinationCheck struct {
nested.Endpoint
}
var _ stack.NetworkDispatcher = (*EndpointWithDestinationCheck)(nil)
var _ stack.LinkEndpoint = (*EndpointWithDestinationCheck)(nil)
// DeliverNetworkPacket implements stack.NetworkDispatcher.
func (e *EndpointWithDestinationCheck) DeliverNetworkPacket(proto tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
if dst := header.Ethernet(pkt.LinkHeader().Slice()).DestinationAddress(); dst == e.Endpoint.LinkAddress() || dst == header.EthernetBroadcastAddress || header.IsMulticastEthernetAddress(dst) {
e.Endpoint.DeliverNetworkPacket(proto, pkt)
}
}
// SetupRouterStack creates the NICs, sets forwarding, adds addresses and sets
// the route table for a stack that should operate as a router.
func SetupRouterStack(t *testing.T, s *stack.Stack, ep1, ep2 stack.LinkEndpoint) {
if err := s.SetForwardingDefaultAndAllNICs(ipv4.ProtocolNumber, true); err != nil {
t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d): %s", ipv4.ProtocolNumber, err)
}
if err := s.SetForwardingDefaultAndAllNICs(ipv6.ProtocolNumber, true); err != nil {
t.Fatalf("s.SetForwardingDefaultAndAllNICs(%d): %s", ipv6.ProtocolNumber, err)
}
for _, setup := range []struct {
nicID tcpip.NICID
nicName string
ep stack.LinkEndpoint
addresses [2]tcpip.ProtocolAddress
}{
{
nicID: RouterNICID1,
nicName: RouterNIC1Name,
ep: ep1,
addresses: [2]tcpip.ProtocolAddress{RouterNIC1IPv4Addr, RouterNIC1IPv6Addr},
},
{
nicID: RouterNICID2,
nicName: RouterNIC2Name,
ep: ep2,
addresses: [2]tcpip.ProtocolAddress{RouterNIC2IPv4Addr, RouterNIC2IPv6Addr},
},
} {
opts := stack.NICOptions{Name: setup.nicName}
if err := s.CreateNICWithOptions(setup.nicID, setup.ep, opts); err != nil {
t.Fatalf("s.CreateNICWithOptions(%d, _, %#v): %s", setup.nicID, opts, err)
}
for _, addr := range setup.addresses {
if err := s.AddProtocolAddress(setup.nicID, addr, stack.AddressProperties{}); err != nil {
t.Fatalf("s.AddProtocolAddress(%d, %#v, {}): %s", setup.nicID, addr, err)
}
}
}
s.SetRouteTable([]tcpip.Route{
{
Destination: RouterNIC1IPv4Addr.AddressWithPrefix.Subnet(),
NIC: RouterNICID1,
},
{
Destination: RouterNIC1IPv6Addr.AddressWithPrefix.Subnet(),
NIC: RouterNICID1,
},
{
Destination: RouterNIC2IPv4Addr.AddressWithPrefix.Subnet(),
NIC: RouterNICID2,
},
{
Destination: RouterNIC2IPv6Addr.AddressWithPrefix.Subnet(),
NIC: RouterNICID2,
},
})
}
// SetupRoutedStacks creates the NICs, sets forwarding, adds addresses and sets
// the route tables for the passed stacks.
func SetupRoutedStacks(t *testing.T, host1Stack, routerStack, host2Stack *stack.Stack) {
const maxFrameSize = header.IPv6MinimumMTU + header.EthernetMinimumSize
host1NIC, routerNIC1 := pipe.New(LinkAddr1, LinkAddr2, maxFrameSize)
routerNIC2, host2NIC := pipe.New(LinkAddr3, LinkAddr4, maxFrameSize)
SetupRouterStack(t, routerStack, NewEthernetEndpoint(routerNIC1), NewEthernetEndpoint(routerNIC2))
{
opts := stack.NICOptions{Name: Host1NICName}
if err := host1Stack.CreateNICWithOptions(Host1NICID, NewEthernetEndpoint(host1NIC), opts); err != nil {
t.Fatalf("host1Stack.CreateNICWithOptions(%d, _, %#v): %s", Host1NICID, opts, err)
}
}
{
opts := stack.NICOptions{Name: Host2NICName}
if err := host2Stack.CreateNICWithOptions(Host2NICID, NewEthernetEndpoint(host2NIC), opts); err != nil {
t.Fatalf("host2Stack.CreateNICWithOptions(%d, _, %#v): %s", Host2NICID, opts, err)
}
}
if err := host1Stack.AddProtocolAddress(Host1NICID, Host1IPv4Addr, stack.AddressProperties{}); err != nil {
t.Fatalf("host1Stack.AddProtocolAddress(%d, %+v, {}): %s", Host1NICID, Host1IPv4Addr, err)
}
if err := host2Stack.AddProtocolAddress(Host2NICID, Host2IPv4Addr, stack.AddressProperties{}); err != nil {
t.Fatalf("host2Stack.AddProtocolAddress(%d, %+v, {}): %s", Host2NICID, Host2IPv4Addr, err)
}
if err := host1Stack.AddProtocolAddress(Host1NICID, Host1IPv6Addr, stack.AddressProperties{}); err != nil {
t.Fatalf("host1Stack.AddProtocolAddress(%d, %+v, {}): %s", Host1NICID, Host1IPv6Addr, err)
}
if err := host2Stack.AddProtocolAddress(Host2NICID, Host2IPv6Addr, stack.AddressProperties{}); err != nil {
t.Fatalf("host2Stack.AddProtocolAddress(%d, %+v, {}): %s", Host2NICID, Host2IPv6Addr, err)
}
host1Stack.SetRouteTable([]tcpip.Route{
{
Destination: Host1IPv4Addr.AddressWithPrefix.Subnet(),
NIC: Host1NICID,
},
{
Destination: Host1IPv6Addr.AddressWithPrefix.Subnet(),
NIC: Host1NICID,
},
{
Destination: Host2IPv4Addr.AddressWithPrefix.Subnet(),
Gateway: RouterNIC1IPv4Addr.AddressWithPrefix.Address,
NIC: Host1NICID,
},
{
Destination: Host2IPv6Addr.AddressWithPrefix.Subnet(),
Gateway: RouterNIC1IPv6Addr.AddressWithPrefix.Address,
NIC: Host1NICID,
},
})
host2Stack.SetRouteTable([]tcpip.Route{
{
Destination: Host2IPv4Addr.AddressWithPrefix.Subnet(),
NIC: Host2NICID,
},
{
Destination: Host2IPv6Addr.AddressWithPrefix.Subnet(),
NIC: Host2NICID,
},
{
Destination: Host1IPv4Addr.AddressWithPrefix.Subnet(),
Gateway: RouterNIC2IPv4Addr.AddressWithPrefix.Address,
NIC: Host2NICID,
},
{
Destination: Host1IPv6Addr.AddressWithPrefix.Subnet(),
Gateway: RouterNIC2IPv6Addr.AddressWithPrefix.Address,
NIC: Host2NICID,
},
})
}
// ICMPv4Echo returns an ICMPv4 echo packet.
func ICMPv4Echo(src, dst tcpip.Address, ttl uint8, ty header.ICMPv4Type) []byte {
totalLen := header.IPv4MinimumSize + header.ICMPv4MinimumSize
hdr := prependable.New(totalLen)
pkt := header.ICMPv4(hdr.Prepend(header.ICMPv4MinimumSize))
pkt.SetType(ty)
pkt.SetCode(header.ICMPv4UnusedCode)
pkt.SetChecksum(0)
pkt.SetChecksum(^checksum.Checksum(pkt, 0))
ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
ip.Encode(&header.IPv4Fields{
TotalLength: uint16(totalLen),
Protocol: uint8(icmp.ProtocolNumber4),
TTL: ttl,
SrcAddr: src,
DstAddr: dst,
})
ip.SetChecksum(^ip.CalculateChecksum())
return hdr.View()
}
// RxICMPv4EchoRequest constructs and injects an ICMPv4 echo request packet on
// the provided endpoint.
func RxICMPv4EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
Payload: buffer.MakeWithData(ICMPv4Echo(src, dst, ttl, header.ICMPv4Echo)),
})
defer newPkt.DecRef()
e.InjectInbound(header.IPv4ProtocolNumber, newPkt)
}
// RxICMPv4EchoReply constructs and injects an ICMPv4 echo reply packet on
// the provided endpoint.
func RxICMPv4EchoReply(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
Payload: buffer.MakeWithData(ICMPv4Echo(src, dst, ttl, header.ICMPv4EchoReply)),
})
defer newPkt.DecRef()
e.InjectInbound(header.IPv4ProtocolNumber, newPkt)
}
// ICMPv6Echo returns an ICMPv6 echo packet.
func ICMPv6Echo(src, dst tcpip.Address, ttl uint8, ty header.ICMPv6Type) []byte {
totalLen := header.IPv6MinimumSize + header.ICMPv6MinimumSize
hdr := prependable.New(totalLen)
pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6MinimumSize))
pkt.SetType(ty)
pkt.SetCode(header.ICMPv6UnusedCode)
pkt.SetChecksum(0)
pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
Header: pkt,
Src: src,
Dst: dst,
}))
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
PayloadLength: header.ICMPv6MinimumSize,
TransportProtocol: icmp.ProtocolNumber6,
HopLimit: ttl,
SrcAddr: src,
DstAddr: dst,
})
return hdr.View()
}
// RxICMPv6EchoRequest constructs and injects an ICMPv6 echo request packet on
// the provided endpoint.
func RxICMPv6EchoRequest(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
Payload: buffer.MakeWithData(ICMPv6Echo(src, dst, ttl, header.ICMPv6EchoRequest)),
})
defer newPkt.DecRef()
e.InjectInbound(header.IPv6ProtocolNumber, newPkt)
}
// RxICMPv6EchoReply constructs and injects an ICMPv6 echo reply packet on
// the provided endpoint.
func RxICMPv6EchoReply(e *channel.Endpoint, src, dst tcpip.Address, ttl uint8) {
newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
Payload: buffer.MakeWithData(ICMPv6Echo(src, dst, ttl, header.ICMPv6EchoReply)),
})
defer newPkt.DecRef()
e.InjectInbound(header.IPv6ProtocolNumber, newPkt)
}
|
package server
import (
"net"
"time"
"github.com/iotaledger/hive.go/autopeering/peer"
"github.com/iotaledger/hive.go/crypto/identity"
)
const (
packetExpiration = 20 * time.Second
)
// Protocol provides a basis for server protocols handling incoming messages.
type Protocol struct {
Sender Sender // interface to send own requests
}
// Send sends the data to the given peer.
func (p *Protocol) Send(to *peer.Peer, data []byte) {
p.Sender.Send(to.Address(), data)
}
// SendExpectingReply sends request data to a peer and expects a response of the given type.
// On an incoming matching request the callback is executed to perform additional verification steps.
func (p *Protocol) SendExpectingReply(dstAddr *net.UDPAddr, toID identity.ID, data []byte, replyType MType, callback func(Message) bool) <-chan error {
return p.Sender.SendExpectingReply(dstAddr, toID, data, replyType, callback)
}
// IsExpired checks whether the given UNIX time stamp is too far in the past.
func (p *Protocol) IsExpired(ts int64) bool {
return time.Since(time.Unix(ts, 0)) >= packetExpiration
}
|
package cli
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/klog/v2"
)
func TestResourceVersionTooOldWarningsSilenced(t *testing.T) {
out := bytes.NewBuffer(nil)
initKlog(out)
PrintWatchEndedV4()
klog.Flush()
assert.Equal(t, "", out.String())
PrintWatchEndedWarning()
klog.Flush()
assert.Contains(t, out.String(), "klog_test.go")
assert.Contains(t, out.String(), "watch ended")
}
func TestResourceVersionTooOldWarningsPrinted(t *testing.T) {
klogLevel = 5
defer func() {
klogLevel = 0
}()
out := bytes.NewBuffer(nil)
initKlog(out)
PrintWatchEndedV4()
klog.Flush()
assert.Contains(t, out.String(), "watch ended")
}
func TestEmptyGroupVersionErrorsSilenced(t *testing.T) {
out := bytes.NewBuffer(nil)
initKlog(out)
klog.Error("couldn't get resource list for external.metrics.k8s.io/v1beta1: Got empty response for: external.metrics.k8s.io/v1beta1")
klog.Flush()
assert.Empty(t, out.String())
}
func PrintWatchEndedV4() {
klog.V(4).Infof("watch ended")
}
func PrintWatchEndedWarning() {
klog.Warningf("watch ended")
}
|
package zermelo
import (
"github.com/shawnsmithdev/zermelo/v2/internal"
"slices"
"testing"
)
const (
// Const int size thanks to kostya-sh@github
intSize uint = 1 << (5 + (^uint(0))>>32&1)
testSize = 2 * compSortCutoff64
)
func TestSort(t *testing.T) {
testSort[int8](t, internal.RandInteger[int8](), false)
testSort[int16](t, internal.RandInteger[int16](), false)
testSort[int32](t, internal.RandInteger[int32](), false)
testSort[int64](t, internal.RandInteger[int64](), false)
testSort[int](t, internal.RandInteger[int](), false)
testSort[uint8](t, internal.RandInteger[uint8](), false)
testSort[uint16](t, internal.RandInteger[uint16](), false)
testSort[uint32](t, internal.RandInteger[uint32](), false)
testSort[uint64](t, internal.RandInteger[uint64](), false)
testSort[uintptr](t, internal.RandInteger[uintptr](), false)
testSort[uint](t, internal.RandInteger[uint](), false)
}
func TestSortBYOB(t *testing.T) {
testSort[int8](t, internal.RandInteger[int8](), true)
testSort[int16](t, internal.RandInteger[int16](), true)
testSort[int32](t, internal.RandInteger[int32](), true)
testSort[int64](t, internal.RandInteger[int64](), true)
testSort[int](t, internal.RandInteger[int](), true)
testSort[uint8](t, internal.RandInteger[uint8](), true)
testSort[uint16](t, internal.RandInteger[uint16](), true)
testSort[uint32](t, internal.RandInteger[uint32](), true)
testSort[uint64](t, internal.RandInteger[uint64](), true)
testSort[uintptr](t, internal.RandInteger[uintptr](), true)
testSort[uint](t, internal.RandInteger[uint](), true)
}
func testSort[N Integer](t *testing.T, rng func() N, byob bool) {
for i := 0; i <= testSize; i++ {
toTest := make([]N, i)
internal.FillSlice(toTest, rng)
control := slices.Clone(toTest)
slices.Sort(control)
if byob {
SortBYOB(toTest, make([]N, i))
} else {
Sort(toTest)
}
if !slices.Equal(control, toTest) {
t.Fatal(control, toTest)
}
}
}
|
package repositories
import "gopkg.in/go-playground/validator.v9"
type (
Taxes struct {
ID string `json:"-"`
TaxName string `json:"tax_name", valid:"required"`
TaxCode string `json:"tax_code", valid:"required,min:1,max:3"`
Amount float64 `json:"amount", valid:"required,numeric"`
}
CustomValidator struct {
validator *validator.Validate
}
TaxCode struct {
ID string `json:"code_id"`
Name string `json:"code_name"`
}
CalculatedTaxes struct {
TaxName string `json:"tax_name"`
Amount float64 `json:"amount"`
TaxCode int64 `json:"tax_code"`
TaxType string `json:"tax_type"`
TaxAmount float64 `json:"tax_amount"`
TotalAmount float64 `json:"total_amount"`
}
) |
// +build ignore
/*
只能发送的通道类型为chan<-,只能接收的通道类型为<-chan
单向通道有利于代码接口的严谨性
*/
package main
func main() {
ch := make(chan int)
// 声明一个只能发送的通道类型, 并赋值为ch
var chSendOnly chan<- int = ch
//声明一个只能接收的通道类型, 并赋值为ch
var chReadOnly <-chan int = ch
// 当然,使用 make 创建通道时,也可以创建一个只发送或只读取的通道:
// ch := make(<-chan int)
// var chReadOnly <-chan int = ch
// <-chReadOnly
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package power
import (
"bufio"
"context"
"fmt"
"regexp"
"strings"
"time"
"chromiumos/tast/common/servo"
"chromiumos/tast/common/usbutils"
"chromiumos/tast/ctxutil"
"chromiumos/tast/dut"
"chromiumos/tast/errors"
"chromiumos/tast/remote/powercontrol"
"chromiumos/tast/ssh/linuxssh"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type peripheralsPowerMode int
const (
suspendTest peripheralsPowerMode = iota
coldbootTest
)
type peripheralsTestParams struct {
powerMode peripheralsPowerMode
iter int
}
func init() {
testing.AddTest(&testing.Test{
Func: SystemPeripheralsFunctionalityCheck,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verifies connected peripherals detection before and after power operations",
Contacts: []string{"ambalavanan.m.m@intel.com", "intel-chrome-system-automation-team@intel.com"},
SoftwareDeps: []string{"chrome", "reboot"},
ServiceDeps: []string{"tast.cros.security.BootLockboxService"},
VarDeps: []string{"servo"},
HardwareDeps: hwdep.D(hwdep.ChromeEC(), hwdep.InternalDisplay()),
Params: []testing.Param{{
Name: "suspend_quick",
Val: peripheralsTestParams{powerMode: suspendTest, iter: 1},
Timeout: 5 * time.Minute,
}, {
Name: "suspend_bronze",
Val: peripheralsTestParams{powerMode: suspendTest, iter: 20},
Timeout: 8 * time.Minute,
}, {
Name: "suspend_silver",
Val: peripheralsTestParams{powerMode: suspendTest, iter: 50},
Timeout: 13 * time.Minute,
}, {
Name: "suspend_gold",
Val: peripheralsTestParams{powerMode: suspendTest, iter: 100},
Timeout: 20 * time.Minute,
}, {
Name: "coldboot_quick",
Val: peripheralsTestParams{powerMode: coldbootTest, iter: 1},
Timeout: 5 * time.Minute,
}, {
Name: "coldboot_bronze",
Val: peripheralsTestParams{powerMode: coldbootTest, iter: 20},
Timeout: 25 * time.Minute,
}, {
Name: "coldboot_silver",
Val: peripheralsTestParams{powerMode: coldbootTest, iter: 50},
Timeout: 45 * time.Minute,
}, {
Name: "coldboot_gold",
Val: peripheralsTestParams{powerMode: coldbootTest, iter: 100},
Timeout: 85 * time.Minute,
},
}})
}
// SystemPeripheralsFunctionalityCheck verifies connected peripherals is detected
// or not while performing power mode operations.
// Pre-requisite: Below devices need to be connected to DUT before executing test.
// 1. USB2.0
// 2. USB3.0
// 3. SD_CARD
// 4. External HDMI display
func SystemPeripheralsFunctionalityCheck(ctx context.Context, s *testing.State) {
ctxForCleanUp := ctx
ctx, cancel := ctxutil.Shorten(ctx, 2*time.Minute)
defer cancel()
dut := s.DUT()
testParam := s.Param().(peripheralsTestParams)
servoSpec := s.RequiredVar("servo")
pxy, err := servo.NewProxy(ctx, servoSpec, dut.KeyFile(), dut.KeyDir())
if err != nil {
s.Fatal("Failed to connect to servo: ", err)
}
defer pxy.Close(ctxForCleanUp)
defer func(ctx context.Context) {
if !dut.Connected(ctx) {
if err := powercontrol.PowerOntoDUT(ctx, pxy, dut); err != nil {
s.Fatal("Failed to power-on DUT at cleanup: ", err)
}
}
}(ctxForCleanUp)
// Perform initial Chrome login.
if err := powercontrol.ChromeOSLogin(ctx, dut, s.RPCHint()); err != nil {
s.Fatal("Failed to log in to Chrome: ", err)
}
// Check for all peripheral devices detection before suspend/cold boot.
if err := connectedPeripheralsDetection(ctx, dut); err != nil {
s.Fatal("Failed to detect connected peripherals devices before cold boot: ", err)
}
iter := testParam.iter
switch testParam.powerMode {
case suspendTest:
for i := 1; i <= iter; i++ {
s.Logf("Suspend test iteration: %d/%d", i, iter)
if err := performDUTSuspend(ctx, pxy, dut); err != nil {
s.Fatal("Failed to perform suspend and wake DUT: ", err)
}
// Check for all peripheral devices detection after suspend.
if err := connectedPeripheralsDetection(ctx, dut); err != nil {
s.Fatal("Failed to detect connected peripherals devices during suspend test: ", err)
}
}
case coldbootTest:
for i := 1; i <= iter; i++ {
s.Logf("Cold boot test iteration: %d/%d", i, iter)
powerState := "S5"
if err := powercontrol.ShutdownAndWaitForPowerState(ctx, pxy, dut, powerState); err != nil {
s.Fatalf("Failed to shutdown and wait for %q powerstate: %v", powerState, err)
}
if err := powercontrol.PowerOntoDUT(ctx, pxy, dut); err != nil {
s.Fatal("Failed to power on DUT: ", err)
}
// Performing chrome login after powering on DUT from cold boot.
if err := powercontrol.ChromeOSLogin(ctx, dut, s.RPCHint()); err != nil {
s.Fatal("Failed to log in to Chrome after cold boot: ", err)
}
// Check for all peripheral devices detection after cold boot.
if err := connectedPeripheralsDetection(ctx, dut); err != nil {
s.Fatal("Failed to detect connected peripherals devices after cold boot: ", err)
}
// Perfoming prev_sleep_state check.
expectedPrevSleepState := 5
if err := powercontrol.ValidatePrevSleepState(ctx, dut, expectedPrevSleepState); err != nil {
s.Fatal("Failed to validate previous sleep state: ", err)
}
}
}
}
// sdCardDetection performs SD card detection validation.
func sdCardDetection(ctx context.Context, dut *dut.DUT) error {
const sdMmcSpecFile = "/sys/kernel/debug/mmc0/ios"
sdCardSpecRe := regexp.MustCompile(`timing spec:.[0-9]+.\((?:sd|mmc).*`)
return testing.Poll(ctx, func(ctx context.Context) error {
isSDCardConnected := sdCardConnected(ctx, dut)
if !isSDCardConnected {
return errors.New("failed to find SD card")
}
sdCardSpecOut, err := linuxssh.ReadFile(ctx, dut.Conn(), sdMmcSpecFile)
if err != nil {
return errors.Wrap(err, "failed to execute sd card /sys/kernel' command")
}
if got := string(sdCardSpecOut); !sdCardSpecRe.MatchString(got) {
return errors.Errorf("failed to get MMC spec info in /sys/kernel/ = got %q, want match %q", got, sdCardSpecRe)
}
return nil
}, &testing.PollOptions{Timeout: 10 * time.Second})
}
// usbStorageDevicesDetection verifies whether connected USB storage device
// detected or not.
func usbStorageDevicesDetection(ctx context.Context, dut *dut.DUT) error {
usbStorageClassName := "Mass Storage"
usb2DeviceSpeed := "480M"
usb3DeviceSpeed := "5000M"
// Check for USB device(s) detection after cold boot.
usbDevicesList, err := usbutils.ListDevicesInfo(ctx, dut)
if err != nil {
return errors.Wrap(err, "failed to get USB devices list")
}
usbDevicesSpeed := []string{usb2DeviceSpeed, usb3DeviceSpeed}
for _, deviceSpeeed := range usbDevicesSpeed {
got := usbutils.NumberOfUSBDevicesConnected(usbDevicesList, usbStorageClassName, deviceSpeeed)
if want := 1; got != want {
return errors.Errorf("unexpected number of USB devices connected with %q speed: got %d, want %d", deviceSpeeed, got, want)
}
}
return nil
}
// connectedPeripheralsDetection verified whether all connected peripheral devices
// detected or not.
func connectedPeripheralsDetection(ctx context.Context, dut *dut.DUT) error {
var (
nativeHDMIRe = regexp.MustCompile(`\[CONNECTOR:\d+:HDMI.*status: connected`)
typeCHDMIRe = regexp.MustCompile(`Type: HDMI`)
)
if err := usbStorageDevicesDetection(ctx, dut); err != nil {
return errors.Wrap(err, "failed to detect connected USB storage devices")
}
if err := sdCardDetection(ctx, dut); err != nil {
return errors.Wrap(err, "failed to detect connected SD Card")
}
numberOfDisplays := 1
nativeDisplayInfoPatterns := []*regexp.Regexp{nativeHDMIRe}
typeCDisplayInfoPatterns := []*regexp.Regexp{typeCHDMIRe}
if err := usbutils.ExternalDisplayDetectionForRemote(ctx, dut, numberOfDisplays, nativeDisplayInfoPatterns); err != nil {
if err := usbutils.ExternalDisplayDetectionForRemote(ctx, dut, numberOfDisplays, typeCDisplayInfoPatterns); err != nil {
return errors.Wrap(err, "failed to detect external HDMI display")
}
}
return nil
}
// sdCardConnected return SD card detection status.
func sdCardConnected(ctx context.Context, dut *dut.DUT) bool {
sdFound := false
sysOut, err := dut.Conn().CommandContext(ctx, "ls", "/sys/block").Output()
if err != nil {
return sdFound
}
stringOut := strings.TrimSpace(string(sysOut))
sc := bufio.NewScanner(strings.NewReader(stringOut))
for sc.Scan() {
sysBlockFile := fmt.Sprintf("/sys/block/%s/device/type", sc.Text())
sdOut, err := dut.Conn().CommandContext(ctx, "cat", sysBlockFile).Output()
if err == nil {
if strings.TrimSpace(string(sdOut)) == "SD" {
sdFound = true
break
}
}
}
return sdFound
}
// performDUTSuspend performs DUT suspend with 'powerd_dbus_suspend' command and
// wakes DUT with servo power key press.
func performDUTSuspend(ctx context.Context, pxy *servo.Proxy, dut *dut.DUT) error {
powerOffCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
if err := dut.Conn().CommandContext(powerOffCtx, "powerd_dbus_suspend").Run(); err != nil && !errors.Is(err, context.DeadlineExceeded) {
return errors.Wrap(err, "failed to power off DUT")
}
sdCtx, cancel := context.WithTimeout(ctx, 40*time.Second)
defer cancel()
if err := dut.WaitUnreachable(sdCtx); err != nil {
return errors.Wrap(err, "failed to wait for unreachable")
}
if err := powercontrol.PowerOntoDUT(ctx, pxy, dut); err != nil {
return errors.Wrap(err, "failed to power on DUT")
}
return nil
}
|
package grpchandler
import (
pbwallet "WalletPOC/apidoc/grpc/gen"
"WalletPOC/internal/core/application"
"context"
)
type walletHandler struct {
walletSvc application.WalletService
}
func NewWalletHandler(walletSvc application.WalletService) pbwallet.WalletServer {
return walletHandler{
walletSvc: walletSvc,
}
}
func (w walletHandler) GenSeed(ctx context.Context, request *pbwallet.GenSeedRequest) (
reply *pbwallet.GenSeedReply,
errResp error,
) {
seed, err := w.walletSvc.GenSeed(ctx)
if err != nil {
errResp = err
return
}
reply = &pbwallet.GenSeedReply{
SeedMnemonic: seed,
}
return
}
func (w walletHandler) InitWallet(ctx context.Context, request *pbwallet.InitWalletRequest) (*pbwallet.InitWalletReply, error) {
panic("implement me")
}
func (w walletHandler) UnlockWallet(ctx context.Context, request *pbwallet.UnlockWalletRequest) (*pbwallet.UnlockWalletReply, error) {
panic("implement me")
}
func (w walletHandler) ChangePassword(ctx context.Context, request *pbwallet.ChangePasswordRequest) (*pbwallet.ChangePasswordReply, error) {
panic("implement me")
}
func (w walletHandler) WalletAddress(ctx context.Context, request *pbwallet.WalletAddressRequest) (*pbwallet.WalletAddressReply, error) {
panic("implement me")
}
func (w walletHandler) WalletBalance(ctx context.Context, request *pbwallet.WalletBalanceRequest) (*pbwallet.WalletBalanceReply, error) {
panic("implement me")
}
func (w walletHandler) SendToMany(ctx context.Context, request *pbwallet.SendToManyRequest) (*pbwallet.SendToManyReply, error) {
panic("implement me")
}
|
// Copyright (c) 2016-2019 Uber Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhdfs
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"testing"
"github.com/uber/kraken/lib/backend/backenderrors"
"github.com/uber/kraken/utils/randutil"
"github.com/uber/kraken/utils/rwutil"
"github.com/uber/kraken/utils/testutil"
"github.com/go-chi/chi"
"github.com/stretchr/testify/require"
)
const _testFile = "/root/test"
type testServer struct {
getName, getData, putName, putData http.HandlerFunc
}
func (s *testServer) handler() http.Handler {
r := chi.NewRouter()
r.Get("/webhdfs/v1*", s.getName)
r.Get("/datanode/webhdfs/v1*", s.getData)
r.Put("/webhdfs/v1*", s.putName)
r.Put("/datanode/webhdfs/v1*", s.putData)
return r
}
func redirectToDataNode(w http.ResponseWriter, r *http.Request) {
datanode := fmt.Sprintf(
"http://%s/%s?%s",
r.Host, path.Join("datanode", r.URL.Path), r.URL.Query().Encode())
http.Redirect(w, r, datanode, http.StatusTemporaryRedirect)
}
func writeResponse(status int, body []byte) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(status)
w.Write(body)
}
}
func checkBody(t *testing.T, expected []byte) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body)
require.NoError(t, err)
require.Equal(t, string(expected), string(b))
w.WriteHeader(http.StatusCreated)
}
}
func newClient(nodes ...string) Client {
c, err := NewClient(Config{}, nodes, "")
if err != nil {
panic(err)
}
return c
}
func TestNewClientError(t *testing.T) {
require := require.New(t)
_, err := NewClient(Config{}, nil, "")
require.Error(err)
}
func TestClientOpen(t *testing.T) {
require := require.New(t)
data := randutil.Text(64)
server := &testServer{
getName: redirectToDataNode,
getData: writeResponse(http.StatusOK, data),
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
var b bytes.Buffer
require.NoError(client.Open(_testFile, &b))
require.Equal(data, b.Bytes())
}
func TestClientOpenRetriesNextNameNode(t *testing.T) {
require := require.New(t)
data := randutil.Text(64)
server1 := &testServer{
getName: redirectToDataNode,
getData: writeResponse(http.StatusForbidden, nil),
}
addr1, stop := testutil.StartServer(server1.handler())
defer stop()
server2 := &testServer{
getName: redirectToDataNode,
getData: writeResponse(http.StatusOK, data),
}
addr2, stop := testutil.StartServer(server2.handler())
defer stop()
client := newClient(addr1, addr2)
var b bytes.Buffer
require.NoError(client.Open(_testFile, &b))
require.Equal(data, b.Bytes())
}
func TestClientOpenErrBlobNotFound(t *testing.T) {
require := require.New(t)
server := &testServer{
getName: writeResponse(http.StatusNotFound, []byte("file not found")),
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
f, err := ioutil.TempFile("", "hdfs3test")
require.NoError(err)
defer os.Remove(f.Name())
var b bytes.Buffer
require.Equal(backenderrors.ErrBlobNotFound, client.Open(_testFile, &b))
}
func TestClientCreate(t *testing.T) {
require := require.New(t)
data := randutil.Text(64)
server := &testServer{
putName: redirectToDataNode,
putData: checkBody(t, data),
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
require.NoError(client.Create(_testFile, bytes.NewReader(data)))
}
func TestClientCreateUnknownFailure(t *testing.T) {
require := require.New(t)
server := &testServer{
putName: redirectToDataNode,
putData: writeResponse(http.StatusInternalServerError, []byte("unknown error")),
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
data := randutil.Text(64)
require.Error(client.Create(_testFile, bytes.NewReader(data)))
}
func TestClientCreateRetriesNextNameNode(t *testing.T) {
tests := []struct {
desc string
server1 *testServer
}{
{
"name node forbidden",
&testServer{
putName: writeResponse(http.StatusForbidden, nil),
},
}, {
"data node forbidden",
&testServer{
putName: redirectToDataNode,
putData: writeResponse(http.StatusForbidden, nil),
},
},
}
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
require := require.New(t)
data := randutil.Text(64)
addr1, stop := testutil.StartServer(test.server1.handler())
defer stop()
server2 := &testServer{
putName: redirectToDataNode,
putData: checkBody(t, data),
}
addr2, stop := testutil.StartServer(server2.handler())
defer stop()
client := newClient(addr1, addr2)
require.NoError(client.Create(_testFile, bytes.NewReader(data)))
// Ensure bytes.Buffer can replay data.
require.NoError(client.Create(_testFile, bytes.NewBuffer(data)))
// Ensure non-buffer non-seekers can replay data.
require.NoError(client.Create(_testFile, rwutil.PlainReader(data)))
})
}
}
func TestClientCreateErrorsWhenExceedsBufferGuard(t *testing.T) {
require := require.New(t)
client, err := NewClient(Config{BufferGuard: 50}, []string{"dummy-addr"}, "")
require.NoError(err)
// Exceeds BufferGuard.
data := randutil.Text(100)
err = client.Create(_testFile, rwutil.PlainReader(data))
require.Error(err)
_, ok := err.(drainSrcError).err.(exceededCapError)
require.True(ok)
}
func TestClientRename(t *testing.T) {
require := require.New(t)
from := "/root/from"
to := "/root/to"
called := false
server := &testServer{
putName: redirectToDataNode,
putData: func(w http.ResponseWriter, r *http.Request) {
called = true
require.Equal("/datanode/webhdfs/v1"+from, r.URL.Path)
require.Equal(to, r.URL.Query().Get("destination"))
},
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
require.NoError(client.Rename(from, to))
require.True(called)
}
func TestClientMkdirs(t *testing.T) {
require := require.New(t)
called := false
server := &testServer{
putName: redirectToDataNode,
putData: func(w http.ResponseWriter, r *http.Request) {
called = true
require.Equal("/datanode/webhdfs/v1"+_testFile, r.URL.Path)
},
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
require.NoError(client.Mkdirs(_testFile))
require.True(called)
}
func TestClientGetFileStatus(t *testing.T) {
require := require.New(t)
var resp fileStatusResponse
resp.FileStatus.Length = 32
b, err := json.Marshal(resp)
require.NoError(err)
server := &testServer{
getName: redirectToDataNode,
getData: writeResponse(http.StatusOK, b),
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
fs, err := client.GetFileStatus(_testFile)
require.NoError(err)
require.Equal(resp.FileStatus, fs)
}
func TestClientGetFileStatusErrBlobNotFound(t *testing.T) {
require := require.New(t)
server := &testServer{
getName: redirectToDataNode,
getData: writeResponse(http.StatusNotFound, nil),
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
_, err := client.GetFileStatus(_testFile)
require.Equal(backenderrors.ErrBlobNotFound, err)
}
func TestClientListFileStatus(t *testing.T) {
require := require.New(t)
data := fmt.Sprintf(`
{
"FileStatuses": {
"FileStatus": [{
"accessTime" : 1320171722771,
"blockSize" : 33554432,
"group" : "supergroup",
"length" : 24930,
"modificationTime": 1320171722771,
"owner" : "webuser",
"pathSuffix" : %q,
"permission" : "644",
"replication" : 1,
"type" : "FILE"
}]
}
}
`, _testFile)
server := &testServer{
getName: redirectToDataNode,
getData: writeResponse(http.StatusOK, []byte(data)),
}
addr, stop := testutil.StartServer(server.handler())
defer stop()
client := newClient(addr)
result, err := client.ListFileStatus("/root")
require.NoError(err)
require.Equal([]FileStatus{{
PathSuffix: _testFile,
Type: "FILE",
Length: 24930,
}}, result)
}
|
package wyrand
import (
"testing"
"time"
)
func BenchmarkNext(b *testing.B) {
b.ReportAllocs()
w := New(uint64(time.Now().UnixNano()))
for i := 0; i < b.N; i++ {
w.Next()
}
}
|
package pack
import (
"bytes"
"math/rand"
"testing"
"unsafe"
"github.com/stretchr/testify/assert"
)
func makeIndex() IndexFile {
idx := make(IndexFile, 1000)
for i := range idx {
idx[i].Offset = rand.Uint32()
idx[i].Length = rand.Uint32()
idx[i].Type = rand.Uint32()
for j := range idx[i].Sum {
idx[i].Sum[j] = byte(rand.Intn(256))
}
}
return idx
}
func TestIndex(t *testing.T) {
buf := new(bytes.Buffer)
idx := makeIndex()
_, err := idx.WriteTo(buf)
assert.Nil(t, err)
var newIdx IndexFile
_, err = (&newIdx).ReadFrom(bytes.NewReader(buf.Bytes()))
assert.Nil(t, err)
assert.Equal(t, idx, newIdx)
}
func TestIndexSize(t *testing.T) {
rec := IndexRecord{}
t.Logf("index record size: %d", unsafe.Sizeof(rec))
}
|
package models
type Tree struct {
Id string `json:"id"`
ParentId string `json:"parentId"`
Name string `json:"name"`
Type string `json:"type"`
CatalogItemId string `json:"catalogItemId"`
Children []Tree `json:"children"`
GroupIds []string `json:"groupIds"`
Members []string `json:"members"`
}
type TaskSummary struct {
SubmitTimeUtc int64 `json:"submitTimeUtc"`
EndTimeUtc int64 `json:"endTimeUtc"`
IsCancelled bool `json:"isCancelled"`
CurrentStatus string `json:"currentStatus"`
BlockingTask LinkTaskWithMetadata `json:"blockingTask"`
DisplayName string `json:"displayName"`
Streams map[string]LinkStreamsWithMetadata `json:"streams"`
Description string `json:"description"`
EntityId string `json:"entityId"`
EntityDisplayName string `json:"entityDisplayName"`
Error bool `json:"error"`
SubmittedByTask LinkTaskWithMetadata `json:"submittedByTask"`
Result interface{} `json:"result"`
IsError bool `json:"isError"`
DetailedStatus string `json:"detailedStatus"`
Children []LinkTaskWithMetadata `json:"children"`
BlockingDetails string `json:"blockingDetails"`
Cancelled bool `json:"cancelled"`
Links map[string]URI `json:"links"`
Id string `json:"id"`
StartTimeUtc int64 `json:"startTimeUtc"`
}
type ApplicationSummary struct {
Links map[string]URI `json:"links"`
Id string `json:"id"`
Spec ApplicationSpec `json:"spec"`
Status Status `json:"status"`
}
type ApplicationSpec struct {
Name string `json:"name"`
Type string `json:"type"`
Locations []string `json:"locations"`
}
type Status string
type LinkWithMetadata struct {
}
type LinkStreamsWithMetadata struct {
Link string `json:"link"`
Metadata LinkStreamMetadata `json:"metadata"`
}
type LinkStreamMetadata struct {
Name string `json:"name"`
Size int64 `json:"size"`
SizeText string `json:"sizeText"`
}
type LinkTaskWithMetadata struct {
Link string `json:"link"`
Metadata LinkTaskMetadata `json:"metadata"`
}
type LinkTaskMetadata struct {
Id string `json:"id"`
TaskName string `json:"taskName"`
EntityId string `json:"entityId"`
EntityDisplayName string `json:"entityDisplayName"`
}
type URI string
|
// Copyright © 2019 Kerem Karatal
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lib
import (
"context"
"fmt"
"net/http"
homedir "github.com/mitchellh/go-homedir"
"golang.org/x/oauth2"
)
func GenerateChannelFolderPath() (string, error) {
home, err := homedir.Dir()
if err != nil {
return "", err
}
return home + "/.coding-challenges", err
}
func getTokenClient(token string) *http.Client {
context := context.Background()
tokenService := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
tokenClient := oauth2.NewClient(context, tokenService)
return tokenClient
}
func generateChallengeRepositoryName(candidateName string, discipline string) string {
return "test_" + discipline + "_" + candidateName
}
func generateTemplateRepositoryURL(owner string, organization string, templateRepoName string) string {
formatString := "https://github.com/%v/%v.git"
accountName := ownerOrOrganization(owner, organization)
return fmt.Sprintf(formatString, accountName, templateRepoName)
}
func ownerOrOrganization(owner string, organization string) string {
if organization != "" {
return organization
} else {
return owner
}
}
func generateTaskDescriptionFilePath(relativePath string) (string, error) {
folderPath, err := GenerateChannelFolderPath()
if err != nil {
return "", err
}
return folderPath + "/issue-templates/" + relativePath, err
}
|
package glubcms
import (
"net/http"
"path/filepath"
)
// The StaticHandler behaves like http.ServeContent without directoy listings.
// It also implements the http.Filesystem interface.
type StaticHandler struct {
fs http.FileSystem
prefix string
}
// Serve the file requestet by r. Error 404 on directory access.
func (sh StaticHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
f, err := sh.Open(r.URL.Path)
if err != nil {
http.Error(w, r.URL.Path, http.StatusNotFound)
return
}
defer f.Close()
stat, err := f.Stat()
if err != nil {
http.Error(w, r.URL.Path, http.StatusInternalServerError)
return
}
if stat.IsDir() {
http.Error(w, r.URL.Path, http.StatusNotFound)
return
}
http.ServeContent(w, r, stat.Name(), stat.ModTime(), f)
}
// Return a new StaticHandler with new root directory.
func (sh StaticHandler) Cd(path string) StaticHandler {
path = filepath.Clean(path)
sh.prefix = filepath.Join(sh.prefix, path)
return sh
}
// Implement the http.Filesystem interface.
func (sh StaticHandler) Open(name string) (http.File, error) {
name = filepath.Clean(name)
return sh.fs.Open(filepath.Clean(filepath.Join(sh.prefix, name)))
}
// Serves all files from fs.
func NewStaticHandler(fs http.FileSystem) StaticHandler {
return StaticHandler{fs: fs}
}
|
package todo
import (
"context"
"database/sql"
"fmt"
"time"
pb "github.com/qclaogui/golang-api-server/pkg/api/todopb/v1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
)
// MysqlRepository fulfills the Repository interface
type MysqlRepository struct {
db *sql.DB
}
// NewMysqlRepository is a factory function to generate a new repository
func NewMysqlRepository(db *sql.DB) (*MysqlRepository, error) {
repo := &MysqlRepository{db: db}
return repo, nil
}
func (m *MysqlRepository) connect(ctx context.Context) (*sql.Conn, error) {
c, err := m.db.Conn(ctx)
if err != nil {
return nil, fmt.Errorf("failed to connect to database-> " + err.Error())
}
return c, nil
}
func (m *MysqlRepository) Read(ctx context.Context, req *pb.ReadRequest) (*pb.ReadResponse, error) {
// get SQL connection from pool
c, err := m.connect(ctx)
if err != nil {
return nil, err
}
defer func() { _ = c.Close() }()
id := req.GetId()
rows, err := c.QueryContext(ctx, "SELECT `ID`, `Title`, `Description`, `Reminder` FROM ToDo WHERE `ID`=?", id)
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to select from ToDo-> ")+err.Error())
}
defer func() { _ = rows.Close() }()
if !rows.Next() {
if err = rows.Err(); err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to select from ToDo-> ")+err.Error())
}
}
todo := &pb.ToDo{}
var reminder time.Time
if err = rows.Scan(&todo.Id, &todo.Title, &todo.Description, &reminder); err != nil {
return nil, fmt.Errorf("failed to retrieve field values from ToDo row-> " + err.Error())
}
todo.Reminder = timestamppb.New(reminder)
if rows.Next() {
return nil, status.Error(codes.Unknown, fmt.Sprintf("found multiple ToDo rows with ID='%s'", id))
}
return &pb.ReadResponse{Api: apiVersion, ToDo: todo}, nil
}
func (m *MysqlRepository) Create(ctx context.Context, req *pb.CreateRequest) (*pb.CreateResponse, error) {
// get SQL connection from pool
c, err := m.connect(ctx)
if err != nil {
return nil, err
}
defer func() { _ = c.Close() }()
todo := req.GetToDo()
_, err = c.ExecContext(ctx, "INSERT INTO ToDo(`ID`, `Title`, `Description`, `Reminder`) VALUES(?, ?, ?, ?)",
todo.GetId(), todo.GetTitle(), todo.GetDescription(), todo.GetReminder().AsTime())
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to insert into ToDo-> "+err.Error()))
}
return &pb.CreateResponse{Api: apiVersion, Id: todo.GetId()}, nil
}
func (m *MysqlRepository) Update(ctx context.Context, req *pb.UpdateRequest) (*pb.UpdateResponse, error) {
// get SQL connection from pool
c, err := m.connect(ctx)
if err != nil {
return nil, err
}
defer func() { _ = c.Close() }()
todo := req.GetToDo()
res, err := c.ExecContext(ctx, "UPDATE ToDo SET `Title`=?, `Description`=?, `Reminder`=? WHERE `ID`=?",
todo.Title, todo.Description, todo.Reminder.AsTime(), todo.Id)
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to update ToDo-> "+err.Error()))
}
rows, err := res.RowsAffected()
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to retrieve rows affected value-> "+err.Error()))
}
if rows == 0 {
return nil, status.Error(codes.Unknown, fmt.Sprintf("ToDo with ID='%s' is not found", todo.Id))
}
return &pb.UpdateResponse{Api: apiVersion, Updated: rows}, nil
}
func (m *MysqlRepository) Delete(ctx context.Context, req *pb.DeleteRequest) (*pb.DeleteResponse, error) {
// get SQL connection from pool
c, err := m.connect(ctx)
if err != nil {
return nil, err
}
defer func() { _ = c.Close() }()
id := req.GetId()
res, err := c.ExecContext(ctx, "DELETE FROM ToDo WHERE `ID`=?", id)
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to delete ToDo-> "+err.Error()))
}
rows, err := res.RowsAffected()
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to retrieve rows affected value-> "+err.Error()))
}
if rows == 0 {
return nil, status.Error(codes.Unknown, fmt.Sprintf("ToDo with ID='%s' is not found", id))
}
return &pb.DeleteResponse{Api: apiVersion, Deleted: rows}, nil
}
func (m *MysqlRepository) ReadAll(ctx context.Context, _ *pb.ReadAllRequest) (*pb.ReadAllResponse, error) {
// get SQL connection from pool
c, err := m.connect(ctx)
if err != nil {
return nil, err
}
defer func() { _ = c.Close() }()
rows, err := c.QueryContext(ctx, "SELECT `ID`, `Title`, `Description`, `Reminder` FROM ToDo")
if err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to select from ToDo-> "+err.Error()))
}
defer func() { _ = rows.Close() }()
var todos []*pb.ToDo
for rows.Next() {
var todo = &pb.ToDo{}
var reminder time.Time
if err = rows.Scan(&todo.Id, &todo.Title, &todo.Description, &reminder); err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to retrieve field values from ToDo row-> "+err.Error()))
}
todo.Reminder = timestamppb.New(reminder)
todos = append(todos, todo)
}
if err = rows.Err(); err != nil {
return nil, status.Error(codes.Unknown, fmt.Sprintf("failed to retrieve data from ToDo-> "+err.Error()))
}
return &pb.ReadAllResponse{Api: apiVersion, ToDos: todos}, nil
}
|
package main
//Valid
//Checks how append instruction returns slice type is given the samee type to LHS variables in Short Declarations
func f () {
var a1 []int
a1 = append(a1, 2)
a2, _ := append(a1, 1), append (a1, 3)
} |
package util
// Filter removes matching strings from a string slice
func Filter(strings []string, predicate func(int, string) bool) (ret []string) {
for i, s := range strings {
if predicate(i, s) {
ret = append(ret, s)
}
}
return
}
|
package main
import (
"fmt"
"strconv"
)
func processMain2(codes [][]string) {
ship := &Robot{
RestingChar: "S",
}
ship.Init()
waypoint := &Robot{
RestingChar: "W",
}
waypoint.Init()
waypoint.X = 10
waypoint.Y = 1
m := &Map{}
m.Init()
m.AddRobot(ship)
m.AddRobot(waypoint)
fmt.Println("---")
fmt.Println("Ship", ship.X, ship.Y)
fmt.Println("Waypoint", waypoint.X, waypoint.Y)
for _, c := range codes {
v, _ := strconv.Atoi(c[1])
switch c[0] {
case "N":
waypoint.MoveUp(false, v)
case "S":
waypoint.MoveDown(false, v)
case "E":
waypoint.MoveRight(false, v)
case "W":
waypoint.MoveLeft(false, v)
case "L":
for range seq(v / 90) {
waypoint.TurnLeftAround(0, 0)
}
case "R":
for range seq(v / 90) {
waypoint.TurnRightAround(0, 0)
}
case "F":
ship.X += v * waypoint.X
ship.Y += v * waypoint.Y
}
m.SetXY(ship.X, ship.Y, CASE_UNKNOW)
m.SetXY(waypoint.X, waypoint.Y, CASE_UNKNOW)
}
fmt.Printf("Bounds: %v\n", m.BoundsList())
fmt.Println("Ship", ship.X, ship.Y)
fmt.Println("Waypoint", waypoint.X, waypoint.Y)
fmt.Printf("Result: %d\n", AbsInt(ship.X)+AbsInt(ship.Y))
}
func main2() {
processMain2(parseFileText2DFirstChar("list.test.txt"))
processMain2(parseFileText2DFirstChar("list.txt"))
}
|
package main
import "fmt"
// User struct, since it Capitalized, this will be eported from this package
// Custom types with member attributes
// So any variable of type User would have one of these members defined?
type User struct {
ID int
FirstName string
LastName string
Email string
}
// Group struct
type Group struct {
ID int
role string
users []User // defining this member as a collection of items that are of type User
latestUser User
isFull bool
}
// func sampleOfPassingAnObjectOfTypeStruct(u User) string{
// }
func main() {
u := User{ID: 1, FirstName: "Thitla"}
g := Group{
ID: 1,
role: "shady",
users: []User{u},
latestUser u,
isFull false // If this value needs to be changed programatically based on some condition, simply setting g.isFull = true will work in the fucntion (because the value is passed by value and not by reference)
// where the operation is happening but the value will remain unchanged in memory... hence, pointers (hold memory refernce) in JS it is passed by reference
}
// fmt.Print(u.LastName) // nil
fmt.Print(u) // Prints ugly ass object like thing.. with no keys
}
|
package cmd
import (
"database/sql"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"log"
"net/http"
"os"
_ "github.com/lib/pq"
"github.com/spf13/cobra"
)
type Post struct {
ID string `json:"post-id"`
Body string `json:"post-body"`
Ts string `json:"time-stamp"`
}
var (
globalDB *sql.DB
serverPort = os.Getenv("PORT")
databaseURL = os.Getenv("DATABASE_URL")
)
func getPost(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
var post Post
postID := mux.Vars(r)["id"]
log.Println("[getPost] - Fetching ID: " + postID)
sqlStatement := fmt.Sprintf(`
select uri, posts, date
from posts
where uri = '%s'`, postID)
err := globalDB.QueryRow(sqlStatement).Scan(&post.ID, &post.Body, &post.Ts)
if err != nil {
log.Printf("[getPost] - err: %v\n", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
fmt.Printf("[getPost] - Found record {ID: %s, TimeStamep: %s }\n", post.ID, post.Ts)
if err = json.NewEncoder(w).Encode(post); err != nil {
log.Printf("[getPost] - err: %v\n", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
}
func uploadPost(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Headers", "*")
w.Header().Set("Content-Type", "application/json")
var post Post
_ = json.NewDecoder(r.Body).Decode(&post)
sqlStatement := fmt.Sprintf(`
INSERT INTO posts (posts)
VALUES ('%s')
RETURNING uri, date`, post.Body)
err := globalDB.QueryRow(sqlStatement).Scan(&post.ID, &post.Ts)
if err != nil {
log.Printf("[uploadPost] - err: %v\n", err)
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
fmt.Printf("[uploadPost] - New record {ID: %s, TimeStamep: %s }\n", post.ID, post.Ts)
if err = json.NewEncoder(w).Encode(post); err != nil {
log.Printf("[uploadPost] - err: %v\n", err)
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
return
}
}
var rootCmd = &cobra.Command{
Use: "server",
Short: "Webserver for scuffed-bin project",
Long: `Webserver for scuffed-bin project`,
Run: func(cmd *cobra.Command, args []string) {
var err error
globalDB, err = sql.Open("postgres", databaseURL)
if err != nil {
panic(err)
}
defer globalDB.Close()
err = globalDB.Ping()
if err != nil {
log.Printf("Error on pinging database: %v\n", err)
os.Exit(1)
}
fmt.Println("Successfully connected!")
if _, err = globalDB.Exec(`
CREATE EXTENSION IF NOT EXISTS "uuid-ossp"`);
err != nil {
log.Printf("Error on creating EXTENSION: %v\n", err)
os.Exit(1)
}
if _, err = globalDB.Exec(`
CREATE TABLE IF NOT EXISTS posts
(
uri UUID NOT NULL PRIMARY KEY DEFAULT uuid_generate_v1() ,
posts text,
date timestamp NOT NULL DEFAULT NOW()
)`);
err != nil {
log.Printf("Error on creating TABLE: %v\n", err)
os.Exit(1)
}
router := mux.NewRouter()
router.HandleFunc("/post/{id}", getPost).Methods("GET")
router.HandleFunc("/post", uploadPost).Methods("POST", "OPTIONS")
fs := http.FileServer(http.Dir("./dist/scuffed-bin"))
router.PathPrefix("").Handler(fs)
log.Fatal(http.ListenAndServe(":"+serverPort, router))
},
}
func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
}
|
// Copyright 2016 Granitic. All rights reserved.
// Use of this source code is governed by an Apache 2.0 license that can be found in the LICENSE file at the root of this project.
package rdbms
// A function able execute an insert statement and return an RDBMS generated ID as an int64.
// If your implementation requires access to the context, it is available on the *RdbmsClient
type InsertWithReturnedId func(string, *RdbmsClient, *int64) error
// An implementation of InsertWithReturnedId that will work with any Go database driver that implements LastInsertId
func DefaultInsertWithReturnedId(query string, client *RdbmsClient, target *int64) error {
if r, err := client.Exec(query); err != nil {
return err
} else {
if id, err := r.LastInsertId(); err != nil {
return err
} else {
*target = id
return nil
}
}
}
|
package main
import "fmt"
func main() {
//missing switch expression defaults to 'true'
switch {
case false:
fmt.Println("the false case")
case true:
fmt.Println("the true case")
} //END - switch
//* -------------------
//switch on value:
//And multiple cases per case
//v := "Bond"
//switch v {
//case "Moneypenny", "Bond", "Dr No":
// fmt.Println("miss money or Bond or Dr No")
//case "M":
// fmt.Println("This is M")
//case "Q":
// fmt.Println("This is Q")
//default:
// fmt.Println("The default case")
//} //END - switch
//switch on value:
//Can either use literal or variable
//n := "Bond"
//switch n {
//case "Moneypenny":
// fmt.Println("miss money")
//case "Bond":
// fmt.Println("James Bond")
//case "Q":
// fmt.Println("This is Q")
//default:
// fmt.Println("The default case")
//} //END - switch
//switch using literal
//switch "Bond" {
//case "Moneypenny":
// fmt.Println("miss money")
//case "Bond":
// fmt.Println("James Bond")
//case "Q":
// fmt.Println("This is Q")
//default:
// fmt.Println("The default case")
//} //END - switch
//* -------------------
//default case basic:
//switch {
//case false:
// fmt.Println("this should not print")
//case (2 == 4):
// fmt.Println("this should not print2")
//case (3 == 5):
// fmt.Println("this should not print3")
//default:
// fmt.Println("The default case")
//} //END - switch
//* -------------------
//Funky switch with fallthrough
//it prints when case is false
//switch {
//case false:
// fmt.Println("this should not print")
//case (2 == 4):
// fmt.Println("this should not print2")
//case (3 == 3):
// fmt.Println("print") //Without fallthrough case 3 prints only
// fallthrough
//case (4 == 4):
// fmt.Println("also true, and does print")
// fallthrough
//case (7 == 9):
// fmt.Println("not true 1")
// fallthrough
//case (11 == 14):
// fmt.Println("not true 2")
// fallthrough
//case (15 == 15):
// fmt.Println("not true 15")
//} //END - switch
//* -------------------
//fallthrough example on the first true case
//switch {
//case false:
// fmt.Println("this should not print")
//case (2 == 4):
// fmt.Println("this should not print2")
//case (3 == 3):
// fmt.Println("print") //Without fallthrough case 3 prints only
// fallthrough
// case (4 == 4):
// fmt.Println("also true, and does print")
//} //END - switch
//* -------------------
//No default fall through case. prints first true case only
//switch {
//case false:
// fmt.Println("this should not print")
//case (2 == 4):
// fmt.Println("this should not print2")
//case (3 == 3):
// fmt.Println("print") //==> this case will only get printed
//case (4 == 4):
// fmt.Println("also true, but should not print")
//} //END - switch
} //END - main
//DOCUMENTATION FOR switch
//There are two forms: expression switches and type switches
//the examples above are ALL expression switches
//In type switches the cases contain the types that are
//compared against the type of a specially annotated
//switch expression. (Conversion aka in C as casting)
//A missing switch expression is equivalent to a boolean value true
//default can occur anywhere in a switch statement
//the first case that equals the switch expression
//is executed--the other cases are skipped
//ExprSwitchStmt = "switch" [ SimpleStmt ";" ] [ Expression ] "{" { ExprCaseClause } "}" . **The [] brackets mean OPTIONAL
|
package scaler
import (
"strconv"
"strings"
"time"
)
type Expression string
func (e Expression) Match(t time.Time) bool {
a := strings.Split(string(e), " ")
if len(a) != 6 {
return false
}
minute := pattern(a[0])
hour := pattern(a[1])
day := pattern(a[2])
month := pattern(a[3])
year := pattern(a[4])
weekday := pattern(a[5])
return minute.Match(t.Minute(), convert) &&
hour.Match(t.Hour(), convert) &&
day.Match(t.Day(), convert) &&
month.Match(int(t.Month()), convertMonth) &&
year.Match(t.Year(), convert) &&
weekday.Match(int(t.Weekday()), convertWeekday)
}
type pattern string
type convertFunc func(s string) (int, error)
func convert(s string) (int, error) {
return strconv.Atoi(s)
}
var months = map[string]int{
"Jan": 1,
"Feb": 2,
"Mar": 3,
"Apr": 4,
"May": 5,
"Jun": 6,
"Jul": 7,
"Aug": 8,
"Sep": 9,
"Oct": 10,
"Nov": 11,
"Dec": 12,
}
func convertMonth(s string) (int, error) {
n, ok := months[s]
if ok {
return n, nil
}
return strconv.Atoi(s)
}
var weekdays = map[string]int{
"Sun": 0,
"Mon": 1,
"Tue": 2,
"Wed": 3,
"Thu": 4,
"Fri": 5,
"Sat": 6,
}
func convertWeekday(s string) (int, error) {
n, ok := weekdays[s]
if ok {
return n, nil
}
return strconv.Atoi(s)
}
func (p pattern) Match(n int, cf convertFunc) bool {
if p == "*" {
return true
}
a := strings.Split(string(p), ",")
for _, b := range a {
c := strings.Split(b, "-")
if len(c) == 0 || len(c) >= 3 {
continue
}
if len(c) == 2 {
d, e := c[0], c[1]
if len(d) >= 1 && len(e) >= 1 {
f, err := cf(d)
if err != nil {
continue
}
g, err := cf(e)
if err != nil {
continue
}
if n >= f && n <= g {
return true
}
continue
}
if len(d) >= 1 && len(e) == 0 {
f, err := cf(d)
if err != nil {
continue
}
if n >= f {
return true
}
continue
}
if len(d) == 0 && len(e) >= 1 {
f, err := cf(e)
if err != nil {
return false
}
if n <= f {
return true
}
continue
}
continue
}
d, err := cf(c[0])
if err != nil {
continue
}
if n == d {
return true
}
}
return false
}
|
package config
import (
"fmt"
"github.com/google/logger"
"github.com/spf13/viper"
)
type DbConfig interface {
GetUser() string
GetDatabase() string
GetPort() string
GetHost() string
GetPassword() string
GetConnectionString() string
}
type dbConfig struct {
user string
database string
port string
password string
host string
}
func (d *dbConfig) GetPort() string {
if d.port == "" {
logger.Fatal("port is empty")
}
return d.port
}
func (d *dbConfig) GetUser() string {
if d.user == "" {
logger.Fatal("user is empty")
}
return d.user
}
func (d *dbConfig) GetDatabase() string {
if d.database == "" {
logger.Fatal("database is empty")
}
return d.database
}
func (d *dbConfig) GetPassword() string {
if d.password == "" {
logger.Fatal("password is empty")
}
return d.password
}
func (d *dbConfig) GetHost() string {
if d.host == "" {
logger.Fatal("host is empty")
}
return d.host
}
func (d *dbConfig) GetConnectionString() string {
connStr := fmt.Sprintf(
"user=%v dbname=%v sslmode=disable port=%v password=%v host=%v",
d.GetUser(),
d.GetDatabase(),
d.GetPort(),
d.GetPassword(),
d.GetHost(),
)
return connStr
}
func NewDbConfig(v *viper.Viper, prefix string) (DbConfig, error) {
v.SetDefault(prefix+"host", "localhost")
v.BindEnv(prefix+"host", prefix+"HOST")
v.SetDefault(prefix+"port", "5434")
v.BindEnv(prefix+"port", prefix+"PORT")
v.SetDefault(prefix+"user", "dev")
v.BindEnv(prefix+"user", prefix+"USER")
v.SetDefault(prefix+"database", "dev")
v.BindEnv(prefix+"database", prefix+"DATABASE")
v.SetDefault(prefix+"password", "dev")
v.BindEnv(prefix+"password", prefix+"PASSWORD")
return &dbConfig{
user: v.GetString(prefix + "user"),
database: v.GetString(prefix + "database"),
host: v.GetString(prefix + "host"),
port: v.GetString(prefix + "port"),
password: v.GetString(prefix + "password"),
}, nil
}
|
package actions
import (
"errors"
"github.com/LiveSocket/bot/command-service/models"
"github.com/LiveSocket/bot/conv"
"github.com/LiveSocket/bot/service"
"github.com/LiveSocket/bot/service/socket"
"github.com/gammazero/nexus/v3/wamp"
)
type getInput struct {
Channel string
}
// Get Get a list of commands for a channel
//
// public.command.get
// {channel string}
//
// Returns [Command...]
func Get(service *service.Service) func(*socket.Invocation) socket.Result {
return func(invocation *wamp.Invocation) socket.Result {
// Get input args from call
input, err := getGetInput(invocation.ArgumentsKw)
if err != nil {
return socket.Error(err)
}
// Find all commands for channel
commands, err := models.GetCommands(service, input.Channel)
if err != nil {
return socket.Error(err)
}
// Make genric list of commands
list := make([]interface{}, len(commands))
for i, command := range commands {
list[i] = command
}
// Return list of commands
return socket.Success(list...)
}
}
func getGetInput(kwargs wamp.Dict) (*getInput, error) {
if kwargs["channel"] == nil {
return nil, errors.New("Missing channel")
}
return &getInput{Channel: conv.ToString(kwargs["channel"])}, nil
}
|
package errorsx
import (
"context"
"net"
)
// Dialer establishes network connections.
type Dialer interface {
// DialContext behaves like net.Dialer.DialContext.
DialContext(ctx context.Context, network, address string) (net.Conn, error)
}
// ErrorWrapperDialer is a dialer that performs error wrapping. The connection
// returned by the DialContext function will also perform error wrapping.
type ErrorWrapperDialer struct {
// Dialer is the underlying dialer.
Dialer
}
// DialContext implements Dialer.DialContext.
func (d *ErrorWrapperDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
conn, err := d.Dialer.DialContext(ctx, network, address)
if err != nil {
return nil, &ErrWrapper{
Failure: toFailureString(err),
Operation: ConnectOperation,
WrappedErr: err,
}
}
return &errorWrapperConn{Conn: conn}, nil
}
// errorWrapperConn is a net.Conn that performs error wrapping.
type errorWrapperConn struct {
// Conn is the underlying connection.
net.Conn
}
// Read implements net.Conn.Read.
func (c *errorWrapperConn) Read(b []byte) (int, error) {
count, err := c.Conn.Read(b)
if err != nil {
return 0, &ErrWrapper{
Failure: toFailureString(err),
Operation: ReadOperation,
WrappedErr: err,
}
}
return count, nil
}
// Write implements net.Conn.Write.
func (c *errorWrapperConn) Write(b []byte) (int, error) {
count, err := c.Conn.Write(b)
if err != nil {
return 0, &ErrWrapper{
Failure: toFailureString(err),
Operation: WriteOperation,
WrappedErr: err,
}
}
return count, nil
}
// Close implements net.Conn.Close.
func (c *errorWrapperConn) Close() error {
err := c.Conn.Close()
if err != nil {
return &ErrWrapper{
Failure: toFailureString(err),
Operation: CloseOperation,
WrappedErr: err,
}
}
return nil
}
|
package com
import (
"regexp"
"strconv"
"strings"
)
const (
// Movie type
Movie byte = 0
// SeasonTV is TV has season
SeasonTV byte = 1
// NoSeasonTV is TV has no season
NoSeasonTV byte = 2
// UnknownType x
UnknownType byte = 255
)
const (
minTargetKeywordSize = 3
minPrimaryKeywordSize = 3
)
// MyKeyword is keyword in my system, low cases except @OrgKeywords
type MyKeyword struct {
OrgKeywords []string // from user input
SearchKeywords []string // search keywords of name, for DouBan search
NameKeywords []string // keywords of name based on user input
Season int // 0: all seasons for TV and movies
}
// Item is getting from internet, e.g. DouBan
type Item struct {
Type byte
OrgName string
ChName string
OtherChName string
}
// MyKeywordStruct is used for KAD search with multiple target keywords.
type MyKeywordStruct struct {
TargetKeywords []string // For KAD search
MyKeyword *MyKeyword // from user
Items []*Item // from Internet or database
}
var theWords = map[string]bool{"the": true, "these": true, "that": true, "a": true, "this": true,
"he": true, "she": true, "we": true, "you": true, "us": true, "his": true, "her": true, "it": true, "my": true, "our": true,
"no": true, "yes": true, "not": true, "is": true, "are": true,
"in": true, "on": true, "of": true}
var theChDigits = map[string]int{"零": 0, "一": 1, "二": 2, "三": 3, "四": 4, "五": 5, "六": 6, "七": 7, "八": 8, "九": 9}
func parseDigit(s string, bCh bool) (int, bool) {
// @s is keyword, which means no space or other else.
// For Chinese parse, only identify less than 100.
v, err := strconv.Atoi(s)
if err == nil {
return v, true
}
if !bCh {
return -1, false
}
// think it as Chinese
v = -1
for _, c := range s {
if c == '十' {
if v == -1 {
v = 1
}
v *= 10
} else {
d, ok := theChDigits[string(c)]
if ok {
if v == -1 {
v = 0
}
v += d
} else {
return -1, false
}
}
}
return v, true
}
// NewMyKeyword is converting keywords from user to my format
func NewMyKeyword(keywords []string) *MyKeyword {
pattern := regexp.MustCompile(`^(?P<season>\d+)x$`)
var ignoreI = -1
myKeyword := MyKeyword{Season: -1}
myKeyword.OrgKeywords = keywords // keywords from user
for i, key := range keywords {
if i == ignoreI {
continue
}
// check if season or episode keyword.
// if so, only extract season or episode information, without thinking it as keyword.
if key == "season" {
// assume next keyword is specific season
if i < len(keywords)-1 {
season, ok := parseDigit(keywords[i+1], true)
if ok {
myKeyword.Season = season
ignoreI = i + 1
continue
}
}
} else {
//patata
match := pattern.MatchString(key)
var seasonStr string
if match {
seasonStr = pattern.FindStringSubmatch(key)[index(pattern.SubexpNames(),"season")]
}else{
// get digit string
text := []rune(key)
switch text[0] {
case 's':
seasonStr = string(text[1:])
}
}
// convert to int
if seasonStr != "" {
season, ok := parseDigit(seasonStr, true)
if ok {
myKeyword.Season = season
continue
}
}
}
// name keyword
myKeyword.NameKeywords = append(myKeyword.NameKeywords, key)
// search keyword
// used for DouBan search
myKeyword.SearchKeywords = append(myKeyword.SearchKeywords, GetPrimaryKeywordsByKeyword(key)...)
}
return &myKeyword
}
func index(slice []string, item string) int {
for i, _ := range slice {
if slice[i] == item {
return i
}
}
return -1
}
// FilterItems is checking items from internet/database if satisfying user search keyword.
// We use NameKeywords, not PrimaryKeywords for accurate matching.
func FilterItems(m []*Item, myKeyword *MyKeyword) []*Item {
// filter
var items []*Item
for _, item := range m {
orgName := strings.ToLower(item.OrgName)
bSatisfied := true
for _, key := range myKeyword.NameKeywords {
if strings.Index(orgName, key) == -1 &&
strings.Index(item.ChName, key) == -1 &&
strings.Index(item.OtherChName, key) == -1 {
bSatisfied = false
break
}
}
if bSatisfied {
items = append(items, item)
}
}
// sort it for later file name classification
// TV first
var tvItems, movieItems, otherItems []*Item
for _, item := range items {
switch item.Type {
case SeasonTV:
fallthrough
case NoSeasonTV:
tvItems = append(tvItems, item)
case Movie:
movieItems = append(movieItems, item)
default: // Unknown type
otherItems = append(otherItems, item)
}
}
newItems := append(tvItems, movieItems...)
newItems = append(newItems, otherItems...)
return newItems
}
// GetPrimaryKeywords is getting primary keyword slice and map via name
// Used for KAD search
func GetPrimaryKeywords(s string) ([]string, map[string]bool) {
keywordMap := make(map[string]bool)
var keywordSlice []string
keys := Split2PrimaryKeywords(s)
for _, key := range keys {
newKeys := GetPrimaryKeywordsByKeyword(key)
for _, newKey := range newKeys {
if !keywordMap[newKey] {
keywordSlice = append(keywordSlice, newKey)
keywordMap[newKey] = true
}
}
}
if len(keywordSlice) == 0 {
return nil, nil
}
return keywordSlice, keywordMap
}
// GetPrimaryKeywordsByKeyword is get primary keywords by native keyword.
func GetPrimaryKeywordsByKeyword(keyword string) []string {
if theWords[keyword] {
return nil
}
if len(keyword) < minPrimaryKeywordSize {
return nil
}
return []string{keyword}
}
// NewMyKeywordStruct is created for KAD search.
func NewMyKeywordStruct(myKeyword *MyKeyword, items []*Item) *MyKeywordStruct {
targetKeywords := getTargetKeywords(items)
if targetKeywords == nil {
HhjLog.Warningf("No target keywords for MyKeyword: %+v", myKeyword)
return nil
}
return &MyKeywordStruct{TargetKeywords: targetKeywords, MyKeyword: myKeyword, Items: items}
}
// get target keywords for KAD
func getTargetKeywords(items []*Item) []string {
// get target keyword map
targetKeywordMap := make(map[string]bool)
for _, item := range items {
// check if target keyword existing or not
keywordSlice, keywordMap := GetPrimaryKeywords(item.OrgName)
existing := false
for keyword := range keywordMap {
if targetKeywordMap[keyword] {
existing = true
break
}
}
// get new target keyword
if !existing {
targetKeyword := getTargetKeyword(keywordSlice, targetKeywordMap)
if targetKeyword != "" {
targetKeywordMap[targetKeyword] = true
}
}
}
// convert to slice
var targetKeywords []string
for keyword := range targetKeywordMap {
targetKeywords = append(targetKeywords, keyword)
}
return targetKeywords
}
func getTargetKeyword(primaryKeywords []string, targetKeywordMap map[string]bool) string {
word := primaryKeywords[0]
for _, key := range primaryKeywords {
if len(key) > len(word) {
word = key
}
}
return word
}
|
package handle
import (
"github.com/valyala/fasthttp"
"mygo/model"
"mygo/service"
"strconv"
)
func OrderList(ctx *fasthttp.RequestCtx) {
var order model.OrderList
req := ctx.Request.Body()
order.UnmarshalJSON(req)
result := service.GetOrderList(order)
resp.Data = result
CommonWriteSuccess(ctx, resp)
}
//生成订单
func OrderDo(ctx *fasthttp.RequestCtx) {
var order model.CreateOrder
req := ctx.Request.Body()
err := order.UnmarshalJSON(req)
if err != nil {
resp.Msg = "参数错误"
CommonWriteError(ctx, resp)
}
user_id := ctx.Request.Header.Peek("uid")
uid, _ := strconv.ParseInt(string(user_id), 10, 64)
result := service.CreateOrder(order.CartId, order.AddressId, uid)
resp.StatusCode = result.StatusCode
resp.Msg = result.Msg
resp.Data = result.Data
CommonWrite(ctx, resp)
}
|
package list
import ("store"
)
func ListAllEmployees(employees *([]store.Employee)) []store.Employee {
list := make([]store.Employee, 0)
for _, empl:= range *employees{
if empl.There == true {list = append(list, empl)}
}
return list
}
func ListEmployeesByDept(dept string, deptEmpMap *(map[string]*([]store.Employee))) []store.Employee {
// log.Println(dept)
list := make([]store.Employee, 0)
// log.Println(*deptEmpMap)
_, ok := (*deptEmpMap)[dept]
if(ok){
employees := (*deptEmpMap)[dept]
for _, empl:= range *employees{
if empl.There == true {list = append(list, empl)
}
}
}
return list
}
// func ListEmployeesByLocPrint(loc int, locEmpMap *map[int]*([]store.Employee)) []store.Employee {
// list := make([]store.Employee, 0)
// _, ok := (*locEmpMap)[loc]
// if(ok){
// employees := (*locEmpMap)[loc]
// for _, empl:= range *employees{
// {if empl.There == true {list = append(list, empl)}}
// }
// }
// }
func ListEmployeesByLoc(loc int, locEmpMap *map[int]*([]store.Employee)) []store.Employee {
list := make([]store.Employee, 0)
_, ok := (*locEmpMap)[loc]
if(ok){
employees := (*locEmpMap)[loc]
for _, empl:= range *employees{
if empl.There == true {list = append(list, empl)}
}
}
return list
}
func ListEmployeesByDoorNoAtLoc(doorno int, employeesAtLoc *([]store.Employee)) []store.Employee{
list := make([]store.Employee, 0)
for _, employee := range *employeesAtLoc{
for _, thisDoorNo := range employee.GetDoorNos(){
if thisDoorNo==doorno{
if employee.There == true {list = append(list, employee)
}
}
}
}
return list
}
func ListEmployeesByStreetAtLoc(street string, employeesAtLoc *([]store.Employee)) []store.Employee{
list := make([]store.Employee, 0)
for _, employee := range *employeesAtLoc{
for _, thisStreet := range employee.GetStreets(){
if thisStreet==street{
if employee.There == true {list = append(list, employee)
}
}
}
}
return list
}
func ListEmployeesByLocalityAtLoc(locality string, employeesAtLoc *([]store.Employee)) []store.Employee{
list := make([]store.Employee, 0)
for _, employee := range *employeesAtLoc{
for _, thisLocality := range employee.GetLocalities(){
if thisLocality==locality{
if employee.There == true {list = append(list, employee)
}
}
}
}
return list
}
|
package main
import "time"
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
)
const IssuesURL = "https://api.github.com/search/issues"
type IssuesSearchResult struct {
TotalCount int `json:"total_count"`
Items []*Issue
}
type Issue struct {
Number int
HTMLURL string `json:html_url`
Title string
State string
User *User
CreateAt time.Time
Body string
}
type User struct {
Login string
HTMLURL string `json:"html_url"`
}
func main() {
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compute
import (
"yunion.io/x/onecloud/cmd/climc/shell"
"yunion.io/x/onecloud/pkg/mcclient"
"yunion.io/x/onecloud/pkg/mcclient/modulebase"
"yunion.io/x/onecloud/pkg/mcclient/modules"
"yunion.io/x/onecloud/pkg/mcclient/options"
"yunion.io/x/onecloud/pkg/mcclient/options/compute"
)
func init() {
cmd := shell.NewResourceCmd(&modules.DBInstance) //.WithContextManager(&modules.Networks)
cmd.List(&compute.DBInstanceListOptions{})
cmd.Create(&compute.DBInstanceCreateOptions{})
cmd.Update(&compute.DBInstanceUpdateOptions{})
cmd.Show(&compute.DBInstanceIdOptions{})
cmd.Delete(&compute.DBInstanceDeleteOptions{})
cmd.Perform("renew", &compute.DBInstanceRenewOptions{})
cmd.Perform("change-config", &compute.DBInstanceChangeConfigOptions{})
cmd.Perform("public-connection", &compute.DBInstancePublicConnectionOptions{})
cmd.Perform("recovery", &compute.DBInstanceRecoveryOptions{})
cmd.Perform("reboot", &compute.DBInstanceIdOptions{})
cmd.Perform("purge", &compute.DBInstanceIdOptions{})
cmd.Perform("syncstatus", &compute.DBInstanceIdOptions{})
cmd.Perform("sync", &compute.DBInstanceIdOptions{})
cmd.Perform("change-owner", &compute.DBInstanceChangeOwnerOptions{})
cmd.PerformWithKeyword("add-tag", "user-metadata", &options.ResourceMetadataOptions{})
cmd.PerformWithKeyword("set-tag", "set-user-metadata", &options.ResourceMetadataOptions{})
cmd.Perform("remote-update", &compute.DBInstanceRemoteUpdateOptions{})
cmd.Perform("set-secgroup", &compute.DBInstanceSetSecgroupOptions{})
type DBInstanceNetworkListOptions struct {
options.BaseListOptions
DBInstance string `help:"ID or Name of DBInstance" json:"dbinstance"`
Network string `help:"Network ID or name"`
}
R(&DBInstanceNetworkListOptions{}, "dbinstance-network-list", "List DB instance networks", func(s *mcclient.ClientSession, opts *DBInstanceNetworkListOptions) error {
params, err := options.ListStructToParams(opts)
if err != nil {
return err
}
var result *modulebase.ListResult
if len(opts.DBInstance) > 0 {
result, err = modules.DBInstanceNetworks.ListDescendent(s, opts.DBInstance, params)
} else if len(opts.Network) > 0 {
result, err = modules.DBInstanceNetworks.ListDescendent2(s, opts.Network, params)
} else {
result, err = modules.DBInstanceNetworks.List(s, params)
}
if err != nil {
return err
}
printList(result, modules.DBInstanceNetworks.GetColumns(s))
return nil
})
type DBInstanceParameterListOptions struct {
options.BaseListOptions
DBInstance string `help:"ID or Name of DBInstance" json:"dbinstance"`
}
R(&DBInstanceParameterListOptions{}, "dbinstance-parameter-list", "List DB instance parameters", func(s *mcclient.ClientSession, opts *DBInstanceParameterListOptions) error {
params, err := options.ListStructToParams(opts)
if err != nil {
return err
}
result, err := modules.DBInstanceParameters.List(s, params)
if err != nil {
return err
}
printList(result, modules.DBInstanceParameters.GetColumns(s))
return nil
})
type DBInstancePrivilegeListOptions struct {
options.BaseListOptions
DBInstanceaccount string `help:"ID or Name of DBInstanceaccount" json:"dbinstanceaccount"`
DBInstancedatabase string `help:"ID or Name of DBInstancedatabase" json:"dbinstancedatabase"`
}
R(&DBInstancePrivilegeListOptions{}, "dbinstance-privilege-list", "List DB instance accounts", func(s *mcclient.ClientSession, opts *DBInstancePrivilegeListOptions) error {
params, err := options.ListStructToParams(opts)
if err != nil {
return err
}
result, err := modules.DBInstancePrivileges.List(s, params)
if err != nil {
return err
}
printList(result, modules.DBInstancePrivileges.GetColumns(s))
return nil
})
}
|
package main
import (
"Round2_two/yep"
"log"
"net/http"
)
func main() {
//路由
//访问前面的路径/ --》执行后面的方法
http.HandleFunc("/", yep.Interesting)
http.HandleFunc("/init", yep.Build_up)
http.HandleFunc("/delete", yep.Delete_one)
http.HandleFunc("/query", yep.Query_one)
http.HandleFunc("/response", yep.Response)
errr := http.ListenAndServe(":8080", nil)
if errr != nil {
log.Fatal("ListenAndserve:", errr)
}
}
// dsn := fmt.Sprintf("%s:%s@%s(%s:%d)/%s", USERNAME, PASSWORD, NETWORK, SERVER, PORT, DATABASE)
// db, err := sql.Open("mysql", dsn)
// if err != nil {
// fmt.Printf("Open mysql failed,err:%v\n", err)
// return
// }
// db.SetConnMaxLifetime(100 * time.Second) //最大连接周期,超过时间的连接就close
// db.SetMaxOpenConns(100) //设置最大连接数
// db.SetMaxIdleConns(16) //设置闲置连接数
//多行查询
// rows, err := db.Query("select id,name,story from withwater")
// if err != nil {
// fmt.Println(err)
// }
// for rows.Next() {
// rows.Scan(&id, &name, &story)
// fmt.Println(name, story)
// }
// defer rows.Close()
//单行查询
// var id int
// var name string
// var story string
// roww := db.QueryRow("SELECT * FROM withwater ORDER BY RAND() limit 1")
// roww.Scan(&id, &name, &story)
// fmt.Println(name, story)
//插入元素
// ret, _ := db.Exec("insert into withwater(id,name,story) values(3,'秋','我自关山点酒')")
// //LastInsertId返回一个数据库生成的回应命令的整数。
// //返回插入的ID
// insID, _ := ret.LastInsertId()
// fmt.Println(insID)
//删除元素
// ret3, _ := db.Exec("delete from withwater where id = ?", 3)
// delNums, _ := ret3.RowsAffected()
// fmt.Println(delNums)
// //关闭数据库
// defer db.Close()
|
package _2_defer
import (
"sync"
)
var (
mutex = &sync.Mutex{}
counter int
)
func LockWithDefer() {
mutex.Lock()
defer mutex.Unlock()
doSomething(counter)
doSomethingElse()
}
func LockNoDefer(mutex *sync.Mutex) {
mutex.Lock()
doSomething(counter)
mutex.Unlock()
doSomethingElse()
}
func UseCopy(mutex *sync.Mutex) {
mutex.Lock()
copyOfCounter := counter
mutex.Unlock()
doSomething(copyOfCounter)
doSomethingElse()
}
func doSomething(val int) {
// not implemented
}
func doSomethingElse() {
// not implemented
}
|
package number
type BitNumber uint32
func NewBitNumber() *BitNumber {
return new(BitNumber)
}
func (n *BitNumber) Get() uint32 {
return uint32(*n)
}
func (n *BitNumber) Set(i uint32) {
*n = BitNumber(i)
}
func (n *BitNumber) Clear() {
n.Set(0)
}
func (n *BitNumber) Mark(i int) {
*n |= 1 << i
}
func (n *BitNumber) IsMarked(i int) bool {
return *n&(1<<i) != 0
}
func (n *BitNumber) IsLimited(i int) bool {
return i < 32
}
func (n *BitNumber) Counts() int {
count := 0
for i := uint8(0); i < 32; i++ {
if *n&(1<<i) != 0 {
count++
}
}
return count
}
|
package domain
type ConfigurationRequest struct {
AppName string `json:"appName"`
Namespace string `json:"namespace"`
Data []Configuration `json:"data"`
}
|
package main
import (
"crypto/aes"
"crypto/sha256"
"flag"
"io/ioutil"
"log"
"os"
)
var key = flag.String("k", "", "key: the key used to encrypt/decrypt the message")
var encrypt = flag.Bool("e", false, "encrypt: ecrypt mode (default)")
var decrypt = flag.Bool("d", false, "decrypt: decrypt mode")
var verbose = flag.Bool("v", false, "verbose: print extraneous debug info (to stderr)")
var raw = flag.Bool("r", false, "raw: do not append a newline to the output")
var out = log.New(os.Stderr, "", log.Ltime|log.Lshortfile)
func main() {
// parse flags, ensure we have a key
flag.Parse()
if *key == "" {
out.Fatalf("The `-k` option is mandatory. See `-h` for help.\n")
}
// read from stdin (trim mysterious character at the end)
msg, err := ioutil.ReadAll(os.Stdin)
if err != nil {
out.Fatalf("ReadAll(): %v\n", err)
}
msg = msg[:len(msg)-1]
// salt key & create cipher
salty_key := sha256.Sum256([]byte(*key))
cipher, err := aes.NewCipher(salty_key[:])
if err != nil {
out.Fatalf("NewCipher(): %v\n", err)
}
blockSize := cipher.BlockSize()
difference := len(msg) % blockSize
// we want the length of our message congruent modulo the block size of our cipher
if difference != 0 {
adjustment := blockSize - difference
for i := 0; i < adjustment; i++ {
msg = append(msg, 0)
}
}
// get 'er done
var result []byte
for i := 0; i < len(msg)/blockSize; i++ {
local_res := make([]byte, blockSize)
local_msg := msg[i*blockSize:]
if *decrypt {
if *encrypt {
out.Printf("What are you doing?")
}
cipher.Decrypt(local_res, local_msg)
} else {
cipher.Encrypt(local_res, local_msg)
}
result = append(result, local_res...)
}
// output
if !*raw {
result = append(result, '\n')
}
os.Stdout.Write(result)
}
|
// Package kafka provides producer and consumer to work with kafka topics
package kafka
import (
"sync"
"testing"
"github.com/stretchr/testify/assert"
)
// TestIntegrationNewConsumer will be passed only if kafka broker is started on localhost:9092
func TestIntegrationNewConsumer(t *testing.T) {
config := &Config{
Host: "localhost",
Port: "9092",
Version: "2.4.1",
ConsumerTopic: "testTopic",
ConsumerGroupID: "1",
}
consumer, err := NewConsumer(config)
assert.NotNil(t, consumer)
assert.NotNil(t, consumer.closeChan)
assert.Equal(t, config, consumer.Config)
assert.NoError(t, err)
}
func TestNewConsumerIncorrectVersion(t *testing.T) {
config := &Config{
Host: "localhost",
Port: "9092",
Version: "11111",
ConsumerGroupID: "1",
}
consumer, err := NewConsumer(config)
assert.EqualError(t, err, "invalid version `11111`")
assert.Nil(t, consumer)
}
func TestNewConsumerIncorrectHost(t *testing.T) {
config := &Config{
Host: "localghost",
Port: "0",
Version: "2.4.1",
ConsumerGroupID: "1",
}
consumer, err := NewConsumer(config)
assert.EqualError(t, err, "kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
assert.Nil(t, consumer)
}
func TestConsumerClose(t *testing.T) {
consumer := &Consumer{
closeChan: make(chan bool),
once: sync.Once{},
}
err := consumer.Close()
assert.NoError(t, err)
select {
case _, ok := <-consumer.closeChan:
assert.False(t, ok)
default:
t.Error("Channel is not closed")
}
}
func TestConsumerDoubleClose(t *testing.T) {
consumer := &Consumer{
closeChan: make(chan bool),
once: sync.Once{},
}
err := consumer.Close()
assert.NoError(t, err)
err = consumer.Close()
assert.NoError(t, err)
select {
case _, ok := <-consumer.closeChan:
assert.False(t, ok)
default:
t.Error("Channel is not closed")
}
}
func TestGetKafkaAddr(t *testing.T) {
config := &Config{
Host: "localhost",
Port: "80",
}
expectedAddr := []string{"localhost:80"}
addr := getKafkaAddr(config)
assert.Equal(t, expectedAddr, addr)
}
|
package main
import (
"fmt"
"testing"
)
/*Our assert library*/
func AssertEqual(t *testing.T, message string, item1, item2 interface{}) {
if item1 != item2 {
t.Error("FAILED:", message, "- item1:", item1, "item2:", item2)
} else {
fmt.Println("PASS:", message)
}
}
func AssertNotEqual(t *testing.T, message string, item1, item2 interface{}) {
if item1 == item2 {
t.Error("FAILED:", message, "- item1:", item1, "item2:", item2)
} else {
fmt.Println("PASS:", message)
}
}
func AssertTrue(t *testing.T, message string, item bool) {
if item {
fmt.Println("PASS:", message)
} else {
t.Error("FAILED:", message, "- item:", item)
}
}
func AssertFalse(t *testing.T, message string, item bool) {
AssertTrue(t, message, !item)
}
|
package plug
import (
"mqtts/core"
"mqtts/utils"
"strings"
)
// token errors: paho.mqtt.golang@v1.3.4/packets/packets.go
// error types
// unacceptable protocol version
// identifier rejected
// server Unavailable
// bad user name or password
// not Authorized
// network Error
// protocol Violation
func ClientIdCheck(opts *core.TargetOptions) bool {
utils.OutputInfoMessage(opts.Host, opts.Port, "Check if current clientId available...")
client := core.GetMQTTClient(opts)
err := client.Connect()
if err != nil && strings.EqualFold(err.Error(), "identifier rejected") {
utils.OutputInfoMessage(opts.Host, opts.Port, "ClientId "+client.ClientOptions.ClientID+" unavailable")
return false
} else {
utils.OutputInfoMessage(opts.Host, opts.Port, "ClientId "+client.ClientOptions.ClientID+" available")
return true
}
}
|
/*
Implements a console logger for stdout/stderr.
*/
package console
import (
"fmt"
"github.com/rightscale/rlog/common"
"os"
)
// Console logger (type exported for deprecated stdout module but fields are private).
type ConsoleLogger struct {
removeNewlines bool
outputFile *os.File
}
// Creates a logger for stdout.
//
// removeNewlines: true to replace newlines
//
// return: instace of console logger
func NewStdoutLogger(removeNewlines bool) *ConsoleLogger {
logger := new(ConsoleLogger)
logger.removeNewlines = removeNewlines
logger.outputFile = os.Stdout
return logger
}
// Creates a logger for stderr.
//
// removeNewlines: true to replace newlines
//
// return: instace of console logger
func NewStderrLogger(removeNewlines bool) *ConsoleLogger {
logger := new(ConsoleLogger)
logger.removeNewlines = removeNewlines
logger.outputFile = os.Stderr
return logger
}
// Intended to run in a separate goroutine. It prints log messages to console.
//
// dataChan: receives log messages.
//
// flushChan: receives flush command.
func (conf *ConsoleLogger) LaunchModule(dataChan <-chan (*common.RlogMsg), flushChan chan (chan (bool))) {
prefix := common.SyslogHeader()
// wait forever on data and flush channel
for {
select {
case logMsg := <-dataChan:
// received log message, print it
conf.printMsg(logMsg, prefix)
case ret := <-flushChan:
// flush and return success
conf.flush(dataChan, prefix)
ret <- true
}
}
}
// Prints the message to console.
//
// rawRlogMsg: log message received from channel.
//
// prefix: log prefix
func (conf *ConsoleLogger) printMsg(rawRlogMsg *common.RlogMsg, prefix string) {
msg := common.FormatMessage(rawRlogMsg, prefix, conf.removeNewlines)
fmt.Fprintln(conf.outputFile, msg)
}
// Flushes pending messages to console.
//
// dataChan: data channel to access all pending messages
//
// prefix: log prefix
func (conf *ConsoleLogger) flush(dataChan <-chan (*common.RlogMsg), prefix string) {
for {
// perform non blocking read until the channel is empty
select {
case logMsg := <-dataChan:
conf.printMsg(logMsg, prefix)
default:
return
}
}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package invertedidx_test
import (
"math"
"strconv"
"testing"
"github.com/cockroachdb/cockroach/pkg/geo"
"github.com/cockroachdb/cockroach/pkg/geo/geoindex"
"github.com/cockroachdb/cockroach/pkg/geo/geopb"
"github.com/cockroachdb/cockroach/pkg/sql/inverted"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/opt/invertedidx"
"github.com/cockroachdb/cockroach/pkg/sql/opt/norm"
"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils"
"github.com/cockroachdb/cockroach/pkg/sql/opt/testutils/testcat"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/encoding"
"github.com/stretchr/testify/require"
)
func TestTryJoinGeoIndex(t *testing.T) {
semaCtx := tree.MakeSemaContext()
evalCtx := tree.NewTestingEvalContext(nil /* st */)
tc := testcat.New()
// Create the input table.
if _, err := tc.ExecuteDDL(
"CREATE TABLE t1 (geom1 GEOMETRY, geog1 GEOGRAPHY, geom11 GEOMETRY, geog11 GEOGRAPHY, " +
"inet1 INET, bbox1 box2d)",
); err != nil {
t.Fatal(err)
}
// Create the indexed table.
if _, err := tc.ExecuteDDL(
"CREATE TABLE t2 (geom2 GEOMETRY, geog2 GEOGRAPHY, inet2 INET, bbox2 box2d, " +
"INVERTED INDEX (geom2), INVERTED INDEX (geog2))",
); err != nil {
t.Fatal(err)
}
var f norm.Factory
f.Init(evalCtx, tc)
md := f.Metadata()
tn1 := tree.NewUnqualifiedTableName("t1")
tn2 := tree.NewUnqualifiedTableName("t2")
tab1 := md.AddTable(tc.Table(tn1), tn1)
tab2 := md.AddTable(tc.Table(tn2), tn2)
geomOrd, geogOrd := 1, 2
testCases := []struct {
filters string
indexOrd int
invertedExpr string
}{
{
filters: "st_covers(geom1, geom2)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
filters: "st_covers(geom2, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(geom1, geom2)",
},
{
filters: "st_coveredby(geog1, geog2)",
indexOrd: geogOrd,
invertedExpr: "st_coveredby(geog1, geog2)",
},
{
filters: "st_coveredby(geog2, geog1)",
indexOrd: geogOrd,
invertedExpr: "st_covers(geog1, geog2)",
},
{
filters: "st_containsproperly(geom2, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(geom1, geom2)",
},
{
filters: "st_dwithin(geog2, geog1, 1)",
indexOrd: geogOrd,
invertedExpr: "st_dwithin(geog1, geog2, 1)",
},
{
filters: "st_dfullywithin(geom2, geom1, 1)",
indexOrd: geomOrd,
invertedExpr: "st_dfullywithin(geom1, geom2, 1)",
},
{
filters: "st_intersects(geom1, geom2)",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
filters: "st_overlaps(geom2, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
// Wrong index ordinal.
filters: "st_covers(geom1, geom2)",
indexOrd: geogOrd,
invertedExpr: "",
},
{
// We can perform a join using two geospatial functions on the same
// indexed column, even if the input columns are different.
filters: "st_covers(geom1, geom2) AND st_covers(geom2, geom11)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) AND st_coveredby(geom11, geom2)",
},
{
// We can perform a join using two geospatial functions on the same
// indexed column, even if the input columns are different.
filters: "st_covers(geog2, geog1) AND st_dwithin(geog11, geog2, 10)",
indexOrd: geogOrd,
invertedExpr: "st_coveredby(geog1, geog2) AND st_dwithin(geog11, geog2, 10)",
},
{
// We can perform a join using two geospatial functions on the same
// indexed column, even if the input columns are different.
filters: "st_covers(geom1, geom2) OR st_covers(geom2, geom11)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) OR st_coveredby(geom11, geom2)",
},
{
// When functions affecting two different geospatial variables are OR-ed,
// we cannot perform an inverted join.
filters: "st_covers(geom1, geom2) OR st_covers(geog1, geog2)",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_covers(geom1, geom2) AND st_covers(geog1, geog2)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_covers(geom1, geom2) AND st_covers(geog1, geog2)",
indexOrd: geogOrd,
invertedExpr: "st_covers(geog1, geog2)",
},
{
// Join conditions can be combined with index constraints.
filters: "st_covers(geom1, geom2) AND " +
"st_covers(geom2, 'LINESTRING ( 0 0, 0 2 )'::geometry)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) AND " +
"st_coveredby('LINESTRING ( 0 0, 0 2 )'::geometry, geom2)",
},
{
// Join conditions can be combined with index constraints.
filters: "st_covers(geom1, geom2) AND " +
"st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom2) AND " +
"st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom1)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2) AND " +
"st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom2)",
},
{
// At least one column from the input is required.
filters: "st_covers(geom2, 'LINESTRING ( 0 0, 0 2 )'::geometry)",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// AND with a non-geospatial function.
filters: "st_covers(geom1, geom2) AND inet_same_family(inet1, inet2)",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
// OR with a non-geospatial function.
filters: "st_covers(geom1, geom2) OR inet_same_family(inet1, inet2)",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// Arbitrarily complex join condition.
filters: "st_covers(geog2, geog1) OR (" +
"st_dwithin(geog11, geog2, 100) AND st_covers(geom1, geom2) AND " +
"st_covers(geog2, 'SRID=4326;POINT(-40.23456 70.456772)'::geography)) AND " +
"st_overlaps(geom2, geom1) AND " +
"st_covers('SRID=4326;POINT(-42.89456 75.938299)'::geography, geog2)",
indexOrd: geogOrd,
invertedExpr: "st_coveredby(geog1, geog2) OR (" +
"st_dwithin(geog11, geog2, 100) AND " +
"st_coveredby('SRID=4326;POINT(-40.23456 70.456772)'::geography, geog2)) AND " +
"st_covers('SRID=4326;POINT(-42.89456 75.938299)'::geography, geog2)",
},
// Bounding box operators.
{
filters: "bbox1 ~ geom2",
indexOrd: geomOrd,
invertedExpr: "st_covers(bbox1::geometry, geom2)",
},
{
filters: "geom2 ~ bbox1",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(bbox1::geometry, geom2)",
},
{
filters: "geom1 ~ geom2",
indexOrd: geomOrd,
invertedExpr: "st_covers(geom1, geom2)",
},
{
filters: "geom2 ~ geom1",
indexOrd: geomOrd,
invertedExpr: "st_coveredby(geom1, geom2)",
},
{
filters: "bbox1 && geom2",
indexOrd: geomOrd,
invertedExpr: "st_intersects(bbox1::geometry, geom2)",
},
{
filters: "geom2 && bbox1",
indexOrd: geomOrd,
invertedExpr: "st_intersects(bbox1::geometry, geom2)",
},
{
filters: "geom1 && geom2",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
filters: "geom2 && geom1",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2)",
},
{
filters: "geom2 && geom1 AND 'BOX(1 2, 3 4)'::box2d ~ geom2",
indexOrd: geomOrd,
invertedExpr: "st_intersects(geom1, geom2) AND " +
"st_covers('BOX(1 2, 3 4)'::box2d::geometry, geom2)",
},
{
// Wrong index ordinal.
filters: "bbox1 ~ geom2",
indexOrd: geogOrd,
invertedExpr: "",
},
{
// At least one column from the input is required.
filters: "bbox2 ~ geom2",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// At least one column from the input is required.
filters: "'BOX(1 2, 3 4)'::box2d ~ geom2",
indexOrd: geomOrd,
invertedExpr: "",
},
{
// Wrong types.
filters: "geom1::string ~ geom2::string",
indexOrd: geomOrd,
invertedExpr: "",
},
}
for _, tc := range testCases {
t.Logf("test case: %v", tc)
filters := testutils.BuildFilters(t, &f, &semaCtx, evalCtx, tc.filters)
var inputCols opt.ColSet
for i, n := 0, md.Table(tab1).ColumnCount(); i < n; i++ {
inputCols.Add(tab1.ColumnID(i))
}
actInvertedExpr := invertedidx.TryJoinInvertedIndex(
evalCtx.Context, &f, filters, tab2, md.Table(tab2).Index(tc.indexOrd), inputCols,
)
if actInvertedExpr == nil {
if tc.invertedExpr != "" {
t.Fatalf("expected %s, got <nil>", tc.invertedExpr)
}
continue
}
if tc.invertedExpr == "" {
t.Fatalf("expected <nil>, got %v", actInvertedExpr)
}
expInvertedExpr := testutils.BuildScalar(t, &f, &semaCtx, evalCtx, tc.invertedExpr)
if actInvertedExpr.String() != expInvertedExpr.String() {
t.Errorf("expected %v, got %v", expInvertedExpr, actInvertedExpr)
}
}
}
func TestTryFilterGeoIndex(t *testing.T) {
semaCtx := tree.MakeSemaContext()
evalCtx := tree.NewTestingEvalContext(nil /* st */)
tc := testcat.New()
if _, err := tc.ExecuteDDL(
"CREATE TABLE t (geom GEOMETRY, geog GEOGRAPHY, INVERTED INDEX (geom), INVERTED INDEX (geog))",
); err != nil {
t.Fatal(err)
}
var f norm.Factory
f.Init(evalCtx, tc)
md := f.Metadata()
tn := tree.NewUnqualifiedTableName("t")
tab := md.AddTable(tc.Table(tn), tn)
geomOrd, geogOrd := 1, 2
testCases := []struct {
filters string
indexOrd int
ok bool
preFilterExpr string
preFilterCol opt.ColumnID
preFilterTypeFamily types.Family
}{
{
filters: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
// Still works with arguments commuted.
filters: "st_intersects(geom, 'LINESTRING ( 0 0, 0 2 )'::geometry)",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
indexOrd: geogOrd,
ok: true,
preFilterExpr: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
preFilterCol: 2,
preFilterTypeFamily: types.GeographyFamily,
},
{
// Still works with arguments commuted.
filters: "st_covers(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geogOrd,
ok: true,
preFilterExpr: "st_coveredby('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
preFilterCol: 2,
preFilterTypeFamily: types.GeographyFamily,
},
{
// Wrong index ordinal.
filters: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
indexOrd: geomOrd,
ok: false,
},
{
// Wrong index ordinal.
filters: "st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
indexOrd: geogOrd,
ok: false,
},
{
// When functions affecting two different geospatial variables are OR-ed,
// we cannot constrain either index.
filters: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom) OR " +
"st_coveredby(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geomOrd,
ok: false,
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom) AND " +
"st_coveredby(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
// We can constrain either index when the functions are AND-ed.
filters: "st_equals('LINESTRING ( 0 0, 0 2 )'::geometry, geom) AND " +
"st_coveredby(geog, 'SRID=4326;POINT(-40.23456 70.4567772)'::geography)",
indexOrd: geogOrd,
ok: true,
preFilterExpr: "st_covers('SRID=4326;POINT(-40.23456 70.4567772)'::geography, geog)",
preFilterCol: 2,
preFilterTypeFamily: types.GeographyFamily,
},
// Bounding box operators.
{
filters: "'BOX(1 2, 3 4)'::box2d ~ geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_covers('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom ~ 'BOX(1 2, 3 4)'::box2d",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_coveredby('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "'LINESTRING ( 0 0, 0 2 )'::geometry ~ geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_covers('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom ~ 'LINESTRING ( 0 0, 0 2 )'::geometry",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_coveredby('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "'BOX(1 2, 3 4)'::box2d && geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom && 'BOX(1 2, 3 4)'::box2d",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('POLYGON (( 1 2, 1 4, 3 4, 3 2, 1 2))'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "'LINESTRING ( 0 0, 0 2 )'::geometry && geom",
indexOrd: geomOrd,
ok: true,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
},
{
filters: "geom && 'LINESTRING ( 0 0, 0 2 )'::geometry",
indexOrd: geomOrd,
preFilterExpr: "st_intersects('LINESTRING ( 0 0, 0 2 )'::geometry, geom)",
preFilterCol: 1,
preFilterTypeFamily: types.GeometryFamily,
ok: true,
},
{
// Wrong index ordinal.
filters: "'BOX(1 2, 3 4)'::box2d ~ geom",
indexOrd: geogOrd,
ok: false,
},
}
for _, tc := range testCases {
t.Logf("test case: %v", tc)
filters := testutils.BuildFilters(t, &f, &semaCtx, evalCtx, tc.filters)
// We're not testing that the correct SpanExpression is returned here;
// that is tested elsewhere. This is just testing that we are constraining
// the index when we expect to.
spanExpr, _, remainingFilters, pfState, ok := invertedidx.TryFilterInvertedIndex(
evalCtx,
&f,
filters,
nil, /* optionalFilters */
tab,
md.Table(tab).Index(tc.indexOrd),
nil, /* computedColumns */
)
if tc.ok != ok {
t.Fatalf("expected %v, got %v", tc.ok, ok)
}
if ok {
if spanExpr.Unique {
t.Fatalf("span expressions for geospatial indexes should never have Unique=true")
}
if spanExpr.Tight {
t.Fatalf("span expressions for geospatial indexes should never have Tight=true")
}
if remainingFilters.String() != filters.String() {
t.Errorf("expected remainingFilters=%v, got %v", filters, remainingFilters)
}
if len(tc.preFilterExpr) == 0 {
require.Nil(t, pfState)
} else {
require.NotNil(t, pfState)
pfExpr := testutils.BuildScalar(t, &f, &semaCtx, evalCtx, tc.preFilterExpr)
require.Equal(t, pfExpr.String(), pfState.Expr.String())
require.Equal(t, tc.preFilterCol, pfState.Col)
require.Equal(t, tc.preFilterTypeFamily, pfState.Typ.Family())
}
}
}
}
func TestPreFilterer(t *testing.T) {
// Test cases do pre-filtering for (geoShapes[i], geoShapes[j]) for all i,
// j.
geoShapes := []string{
"SRID=4326;POINT(0 0)",
"SRID=4326;POINT(5 5)",
"SRID=4326;LINESTRING(8 8, 9 9)",
"SRID=4326;POLYGON((0 0, 5 0, 5 5, 0 5, 0 0))",
}
testCases := []struct {
// The typ, relationship and relationshipParams determine how the
// PreFilterer works.
typ *types.T
relationship geoindex.RelationshipType
relationshipParams []tree.Datum
shapes []string
expected [][]bool
// excludeFromPreFilters excludes shapes at the given indexes from being
// used in Bind calls.
excludeFromPreFilters []int
}{
{
typ: types.Geometry,
relationship: geoindex.Intersects,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.Covers,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{false, false, false, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.CoveredBy,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, false},
{false, true, false, false},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.DWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, true, true},
{false, true, true, true},
{true, true, true, true},
},
},
{
typ: types.Geometry,
relationship: geoindex.DFullyWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
expected: [][]bool{
{true, false, false, false},
{false, true, false, false},
{false, true, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.Intersects,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.Covers,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{false, false, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.CoveredBy,
shapes: geoShapes,
expected: [][]bool{
{true, false, false, false},
{false, true, false, false},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.DWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
expected: [][]bool{
{true, false, false, true},
{false, true, false, true},
{false, false, true, false},
{true, true, false, true},
},
},
{
typ: types.Geography,
relationship: geoindex.DWithin,
relationshipParams: []tree.Datum{tree.NewDFloat(3)},
shapes: geoShapes,
excludeFromPreFilters: []int{2},
expected: [][]bool{
{true, false, true},
{false, true, true},
{false, false, false},
{true, true, true},
},
},
}
encodeInv := func(bbox geopb.BoundingBox) inverted.EncVal {
var b []byte
b = encoding.EncodeGeoInvertedAscending(b)
// Arbitrary cellid
b = encoding.EncodeUvarintAscending(b, math.MaxUint32)
b = encoding.EncodeGeoInvertedBBox(b, bbox.LoX, bbox.LoY, bbox.HiX, bbox.HiY)
return b
}
for i, tc := range testCases {
t.Run(strconv.Itoa(i+1), func(t *testing.T) {
filterer := invertedidx.NewPreFilterer(tc.typ, tc.relationship, tc.relationshipParams)
var toBind []tree.Datum
var toPreFilter []inverted.EncVal
includeBind := func(index int) bool {
for _, exclude := range tc.excludeFromPreFilters {
if exclude == index {
return false
}
}
return true
}
for i, shape := range tc.shapes {
switch tc.typ {
case types.Geometry:
g, err := geo.ParseGeometry(shape)
require.NoError(t, err)
if includeBind(i) {
toBind = append(toBind, tree.NewDGeometry(g))
}
toPreFilter = append(toPreFilter, encodeInv(*g.BoundingBoxRef()))
case types.Geography:
g, err := geo.ParseGeography(shape)
require.NoError(t, err)
if includeBind(i) {
toBind = append(toBind, tree.NewDGeography(g))
}
rect := g.BoundingRect()
toPreFilter = append(toPreFilter,
encodeInv(geopb.BoundingBox{
LoX: rect.Lng.Lo,
HiX: rect.Lng.Hi,
LoY: rect.Lat.Lo,
HiY: rect.Lat.Hi,
}))
}
}
var preFilterState []interface{}
for _, d := range toBind {
preFilterState = append(preFilterState, filterer.Bind(d))
}
result := make([]bool, len(preFilterState))
for i, enc := range toPreFilter {
res, err := filterer.PreFilter(enc, preFilterState, result)
require.NoError(t, err)
expectedRes := false
for _, b := range result {
expectedRes = expectedRes || b
}
require.Equal(t, expectedRes, res)
require.Equal(t, tc.expected[i], result)
}
})
}
}
// TODO(sumeer): test for NewGeoDatumsToInvertedExpr, geoDatumsToInvertedExpr.
|
package lib
import (
"database/sql"
"fmt"
"strings"
"gylib/common"
"gylib/common/datatype"
)
type Mysqlcon struct {
Tablename string
Sql_where string
Sql_order string
Sql_fields string
Sql_limit string
Db_perfix string
Join_arr map[string]string
LastSqltext string
}
/**
初始化结构
*/
func (this *Mysqlcon) Dbinit() {
this.Tablename = ""
this.Sql_limit = ""
this.Sql_order = ""
this.Sql_fields = ""
this.Sql_where = ""
this.Db_perfix = Db_perfix
this.Join_arr = make(map[string]string)
}
/*
设置数据表
*/
func (this *Mysqlcon) Tbname(name string) Querybuilder {
//data := common.Getini("conf/app.ini","database",map[string]string{"db_perfix":""})
this.Tablename = this.Db_perfix + name
return this
}
func (this *Mysqlcon) Where(where string) Querybuilder {
if this.Sql_where == "" {
this.Sql_where = where
} else {
this.Sql_where += " and (" + where + ")"
}
return this
}
func (this *Mysqlcon) Order(orderstr string) Querybuilder {
this.Sql_order = orderstr
return this
}
func (this *Mysqlcon) Limit(limitstr string) Querybuilder {
this.Sql_limit = limitstr
return this
}
func (this *Mysqlcon) MapContains(src map[string]interface{}, key string) bool {
if _, ok := src[key]; ok {
return true
}
return false
}
func (this *Mysqlcon) MapContains_str(src map[string]string, key string) bool {
if _, ok := src[key]; ok {
return true
}
return false
}
/*启动事务,返回事物指针*/
func (this *Mysqlcon) Start_tran() (*sql.Tx) {
tx, err := mysqldb.Begin()
if (err != nil) {
return nil
}
return tx
}
func (this *Mysqlcon) Begin_tran(sqlstr []string) (int) {
tx, err := mysqldb.Begin()
if (err != nil) {
return 0
}
for _, sqltext := range sqlstr {
_, err = tx.Exec(sqltext)
if (err != nil) {
tx.Rollback()
return 0
}
}
tx.Commit()
return 1
}
func (this *Mysqlcon) Get_key_eq_value(id string) (string) {
tbname:=this.Tablename
result := ""
fd_list,ok:=G_dbtables[tbname]
if (ok) {
for _, v := range fd_list.([]map[string]string) {
record := v
if (record["key"] == "PRI") {
result = record["field"] + "=" + this.checkstr(record["type"], id)
break
}
}
return result
} else {
this.Update_redis(tbname)
rows, err := mysqldb.Query("SHOW full COLUMNS FROM " + tbname)
//fmt.Println(rows)
if(err!=nil){
fmt.Println("SHOW full COLUMNS FROM " + tbname)
}
defer rows.Close()
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
//将行数据保存到record字典
record := make(map[string]string)
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[strings.ToLower(columns[i])] = string(col.([]byte))
}
}
if (record["key"] == "PRI") {
result = record["field"] + "=" + this.checkstr(record["type"], id)
break;
}
}
return result
}
}
func (this *Mysqlcon) Get_key_in_value(id string) (string) {
result := ""
fd_list,ok := G_dbtables[this.Tablename]
if (ok) {
for _, v := range fd_list.([]map[string]string) {
record := v
if (record["key"] == "PRI") {
result = record["field"] + " in (" + this.set_in_where(record["type"], id) + ")"
break
}
}
return result
} else {
this.Update_redis(this.Tablename)
rows, _ := mysqldb.Query("SHOW full COLUMNS FROM " + this.Tablename)
defer rows.Close()
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
//将行数据保存到record字典
record := make(map[string]string)
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[strings.ToLower(columns[i])] = string(col.([]byte))
}
}
if (record["key"] == "PRI") {
result = record["field"] + " in (" + this.set_in_where(record["type"], id) + ")"
break;
}
}
return result
}
}
func (this *Mysqlcon) get_insert_sql(postdata map[string]interface{}) (result string, val string) {
fd_list,ok := G_dbtables[this.Tablename]
if (ok) {
for _, v := range fd_list.([]map[string]string) {
record := v
if (record["key"] == "PRI" && record["extra"] == "auto_increment") {
continue
}
if this.MapContains(postdata, record["field"]) == false {
continue
}
val_str := this.Type2str(postdata[record["field"]])
if result == "" {
result = "`" + record["field"] + "`"
val = this.checkstr(record["type"], val_str)
} else {
result += ",`" + record["field"] + "`"
val += "," + this.checkstr(record["type"], val_str)
}
}
return result, val
} else {
this.Update_redis(this.Tablename)
rows, _ := mysqldb.Query("SHOW full COLUMNS FROM " + this.Tablename)
defer rows.Close()
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
//将行数据保存到record字典
record := make(map[string]string)
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[strings.ToLower(columns[i])] = string(col.([]byte))
}
}
if (record["key"] == "PRI" && record["extra"] == "auto_increment") {
continue
}
if this.MapContains(postdata, record["field"]) == false {
continue
}
val_str := this.Type2str(postdata[record["field"]])
if result == "" {
result = "`" + record["field"] + "`"
val = this.checkstr(record["type"], val_str)
} else {
result += ",`" + record["field"] + "`"
val += "," + this.checkstr(record["type"], val_str)
}
}
return result, val
}
}
func (this *Mysqlcon) Type2str(val interface{}) (string) {
//fmt.Println(fmt.Sprintf("%T,%v",val,val))
var result string = ""
switch val.(type) {
case []string:
strArray := val.([]string)
result = strings.Join(strArray, "")
case []uint8:
result = string(val.([]uint8))
default:
result = fmt.Sprintf("%v", val)
}
return result
}
func (this *Mysqlcon) Get_fields_sql(fd_name, val_name string) (result string) {
fd_list,ok :=G_dbtables[this.Tablename]
if (ok) {
for _, v := range fd_list.([]map[string]string) {
record := v
if (fd_name == record["field"]) {
result = "`" + record["field"] + "`=" + this.checkstr(record["type"], val_name)
break
}
}
}
return result
}
func (this *Mysqlcon) get_update_sql(postdata map[string]interface{}) (result string) {
fd_list,ok := G_dbtables[this.Tablename]
if (ok) {
for _, v := range fd_list.([]map[string]string) {
record := v
if (record["key"] == "PRI" && record["extra"] == "auto_increment") {
continue
}
if this.MapContains(postdata, record["field"]) == false {
continue
}
val_str := this.Type2str(postdata[record["field"]])
if result == "" {
result = "`" + record["field"] + "`=" + this.checkstr(record["type"], val_str)
} else {
result += ",`" + record["field"]+ "`=" + this.checkstr(record["type"], val_str)
}
}
return result
} else {
this.Update_redis(this.Tablename)
rows, _ := mysqldb.Query("SHOW full COLUMNS FROM " + this.Tablename)
defer rows.Close()
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
//将行数据保存到record字典
record := make(map[string]string)
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[strings.ToLower(columns[i])] = string(col.([]byte))
}
}
if (record["key"] == "PRI" && record["extra"] == "auto_increment") {
continue
}
if this.MapContains(postdata, record["field"]) == false {
continue
}
val_str := this.Type2str(postdata[record["field"]])
//if (val_str == "") {
// continue
//}
if result == "" {
result = "`" + record["field"] + "`=" + this.checkstr(record["type"], val_str)
} else {
result += ",`" + record["field"] + "`=" + this.checkstr(record["type"], val_str)
}
}
return result
}
}
func (this *Mysqlcon) checkstr(fdtype string, fdvalue string) (string) {
if (fdvalue == "") {
return "null"
}
if (strings.Contains(fdtype, "tinyint") ||
strings.Contains(fdtype, "double") ||
strings.Contains(fdtype, "float") ||
strings.Contains(fdtype, "int") ||
strings.Contains(fdtype, "decimal")) {
return fdvalue
} else {
//result :=strings.Replace(fdvalue, "\\", "\\\\", -1)
//result = "'" + strings.Replace(result, "'", "\\'", -1) + "'"
result := "'" + strings.Replace(fdvalue, "'", "\\'", -1) + "'"
return result
}
}
func (this *Mysqlcon) set_in_where(fdtype string, fdvalue string) (string) {
if (fdvalue == "") {
return "null"
}
if (strings.Contains(fdtype, "tinyint") ||
strings.Contains(fdtype, "double") ||
strings.Contains(fdtype, "float") ||
strings.Contains(fdtype, "int") ||
strings.Contains(fdtype, "decimal")) {
return fdvalue
} else {
arr := strings.Split(fdvalue, ",")
result := ""
for _, v := range arr {
if (result == "") {
result = "'" + strings.Replace(v, "'", "\\'", -1) + "'"
} else {
result += ",'" + strings.Replace(v, "'", "\\'", -1) + "'"
}
}
return result
}
}
func (this *Mysqlcon) Insert(postdata map[string]interface{}) (sql.Result, error) {
var sqltext string
fields, value := this.get_insert_sql(postdata)
sqltext = fmt.Sprintf("insert into %v (%v) values (%v) ", this.Tablename, fields, value)
//fmt.Println(sqltext)
this.LastSqltext = sqltext
result, err := mysqldb.Exec(sqltext)
return result, err
}
func (this *Mysqlcon) Delete() (sql.Result, error) {
sqltext := fmt.Sprintf(" delete from %v where %v", this.Tablename, this.Sql_where)
this.LastSqltext = sqltext
result, err := mysqldb.Exec(sqltext)
return result, err
}
func(this *Mysqlcon) SetDec(fdname string,quantity int)(sql.Result,error){
sqltext := fmt.Sprintf("update %v set %v=%v-%v where %v", this.Tablename, fdname,fdname,quantity,this.Sql_where)
this.LastSqltext = sqltext
result, err := mysqldb.Exec(sqltext)
return result, err
}
func(this *Mysqlcon) SetInc(fdname string,quantity int)(sql.Result,error){
sqltext := fmt.Sprintf("update %v set %v=%v+%v where %v", this.Tablename, fdname,fdname,quantity,this.Sql_where)
this.LastSqltext = sqltext
result, err := mysqldb.Exec(sqltext)
return result, err
}
func (this *Mysqlcon) Update(postdata map[string]interface{}) (sql.Result, error) {
sqltext := this.get_update_sql(postdata)
sqltext = fmt.Sprintf("update %v set %v where %v", this.Tablename, sqltext, this.Sql_where)
this.LastSqltext = sqltext
//fmt.Println(sqltext)
//fmt.Println(postdata)
result, err := mysqldb.Exec(sqltext)
return result, err
}
func (this *Mysqlcon) Get_Insert(postdata map[string]interface{}) (string) {
var sqltext string
fields, value := this.get_insert_sql(postdata)
sqltext = fmt.Sprintf("insert into %v (%v) values (%v) ", this.Tablename, fields, value)
//fmt.Println(sqltext)
this.LastSqltext = sqltext
return sqltext
}
func (this *Mysqlcon) Get_Update(postdata map[string]interface{}) (string) {
sqltext := this.get_update_sql(postdata)
sqltext = fmt.Sprintf("update %v set %v where %v", this.Tablename, sqltext, this.Sql_where)
this.LastSqltext = sqltext
return sqltext
}
func (this *Mysqlcon) Query(sqltext string) []map[string]string {
this.LastSqltext = sqltext
rows, err := mysqldb.Query(sqltext)
if(err!=nil){
return nil
}
defer rows.Close()
//字典类型
//构造scanArgs、values两个数组,scanArgs的每个值指向values相应值的地址
columns, _ := rows.Columns()
result := make([]map[string]string, 0)
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
//将行数据保存到record字典
record := make(map[string]string)
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[columns[i]] = string(col.([]byte))
} else {
record[columns[i]] = ""
}
}
result = append(result, record)
}
//fmt.Print(result)
if (len(result) == 0) {
return nil
}
return result
}
func (this *Mysqlcon) Excute(sqltext string) (sql.Result, error) {
this.LastSqltext = sqltext
result, err := mysqldb.Exec(sqltext)
return result, err
}
//
//func (this *Mysqlcon) delete(fields ...string) Querybuilder {
// this.Tokens = append(this.Tokens, "SELECT", strings.Join(fields,","))
// return qb
//}
func (this *Mysqlcon) Join(tbname string, jointype string, where string, fileds string) Querybuilder {
if (this.Join_arr["tbname"] == "") {
this.Join_arr["tbname"] = this.Tablename + " " + jointype + " " + Db_perfix + tbname + " on " + where
if (fileds != "") {
this.Join_arr["fields"] = this.Tablename + ".*," + fileds
} else {
this.Join_arr["fields"] = this.Tablename + ".*"
}
} else {
this.Join_arr["tbname"] += " " + jointype + " " + Db_perfix + tbname + " on " + where
if (fileds != "") {
this.Join_arr["fields"] += "," + fileds
}
}
return this
}
func (this *Mysqlcon) set_sql(flag int) (string) {
sqltext := ""
if (flag == 0) {
if (this.MapContains_str(this.Join_arr, "tbname")) {
if (this.Join_arr["fields"] != "") {
sqltext = "select " + this.Join_arr["fields"] + " from " + this.Join_arr["tbname"]
} else {
sqltext = "select " + this.Tablename + ".* from " + this.Tablename
}
} else {
sqltext = "select * from " + this.Tablename
}
} else {
if (this.MapContains_str(this.Join_arr, "tbname")) {
sqltext = "select count(" + this.Tablename + ".*) as ct " + " from " + this.Join_arr["tbname"]
} else {
sqltext = "select count(*) as ct from " + this.Tablename
}
}
return sqltext
}
func (this *Mysqlcon) Find() map[string]string {
sqltext := this.set_sql(0)
if this.Sql_where != "" {
sqltext += " where " + this.Sql_where
}
if this.Sql_order != "" {
sqltext += " order by " + this.Sql_order
}
this.LastSqltext = sqltext + " limit 1"
rows, err := mysqldb.Query(sqltext + " limit 1")
//fmt.Println("rows",rows,err)
if (err != nil) {
return nil
}
if (rows == nil) {
return nil
}
defer rows.Close()
//字典类型
//构造scanArgs、values两个数组,scanArgs的每个值指向values相应值的地址
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
record := make(map[string]string)
for rows.Next() {
//将行数据保存到record字典
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[columns[i]] = string(col.([]byte))
} else {
record[columns[i]] = ""
}
}
}
if (len(record) == 0) {
return nil
}
return record
}
func (this *Mysqlcon) Count() int64 {
sqltext := this.set_sql(1)
if this.Sql_where != "" {
sqltext += " where " + this.Sql_where
}
rows := mysqldb.QueryRow(sqltext)
var record int64
rows.Scan(&record)
return record
}
func (this *Mysqlcon) Sum(fd string) (float64) {
var result float64
sqltext := this.set_sql(1)
sqltext = strings.Replace(sqltext, "count(*)", "sum("+fd+")", -1)
if this.Sql_where != "" {
sqltext += " where " + this.Sql_where
}
rows := mysqldb.QueryRow(sqltext)
rows.Scan(&result)
return result
}
func (this *Mysqlcon) Select() []map[string]string {
sqltext := this.set_sql(0)
if this.Sql_where != "" {
sqltext += " where " + this.Sql_where
}
if this.Sql_order != "" {
sqltext += " order by " + this.Sql_order
}
if this.Sql_limit != "" {
sqltext += " limit " + this.Sql_limit
}
this.LastSqltext = sqltext
//fmt.Println(sqltext)
rows, err := mysqldb.Query(sqltext)
if (err != nil) {
return nil
}
if (rows == nil) {
return nil
}
defer rows.Close()
//字典类型
//构造scanArgs、values两个数组,scanArgs的每个值指向values相应值的地址
columns, _ := rows.Columns()
result := make([]map[string]string, 0)
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
j := 0
for rows.Next() {
//将行数据保存到record字典
record := make(map[string]string)
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[columns[i]] = string(col.([]byte))
//record[columns[i]] = col.([]byte)
} else {
record[columns[i]] = ""
}
}
result = append(result, record)
//result[j] = record
j++
}
if (len(result) == 0) {
return nil
}
return result
}
func (this *Mysqlcon) GetLastSql() string {
return this.LastSqltext
}
func (this *Mysqlcon) Get_where_data(postdata map[string]interface{}) string {
var result string
for key, val := range postdata {
//strArray := val.([]string)
//val_str := strings.Join(strArray, "")
val_str := strings.TrimSpace(this.Type2str(val))
if val_str != "" {
if strings.Contains(key, "S_") {
key1 := strings.Replace(key, "S_", "", -1)
postdatakey := make(map[string]interface{})
postdatakey[key1] = val_str
if result == "" {
result = this.get_update_sql(postdatakey)
} else {
result += " and " + this.get_update_sql(postdatakey)
}
//fmt.Println(postdatakey, key1)
}
if strings.Contains(key, "I_") {
val_str = strings.Replace(val_str, "'", "\\'", -1)
key1 := strings.Replace(key, "I_", "", -1)
//if result == "" {
// result = this.Tablename+"."+key1 + " like '%" + val_str + "%'"
//} else {
// result += " and " + this.Tablename+"."+key1 + " like '%" + val_str + "%'"
//}
if result == "" {
result = "locate('" + val_str + "'," + this.Tablename + "." + key1 + ")>0"
} else {
result += " and locate('" + val_str + "'," + this.Tablename + "." + key1 + ")>0"
}
}
}
}
return (result)
}
func (this *Mysqlcon) Get_new_add() map[string]string {
fd_list,ok := G_dbtables[this.Tablename]
if (ok) {
//fmt.Println(fd_list)
result := make(map[string]string)
for _, v := range fd_list.([]map[string]string) {
fd_name:=v["field"]
result[fd_name] = ""
}
return result
} else {
this.Update_redis(this.Tablename)
rows, _ := mysqldb.Query("SHOW full COLUMNS FROM " + this.Tablename)
defer rows.Close()
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
result := make(map[string]string)
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
//将行数据保存到record字典
record := make(map[string]string)
_ = rows.Scan(scanArgs...)
for i, col := range values {
if col != nil {
record[strings.ToLower(columns[i])] = string(col.([]byte))
result[record["field"]] = ""
}
}
}
return result
}
}
func (this *Mysqlcon) Update_redis(tbname string) {
list := this.Query("SHOW full COLUMNS FROM "+tbname)
if (list != nil) {
data_list := make([]map[string]string, 0)
for _, val := range list {
col := make(map[string]string)
for key, _ := range val {
col[common.Tolow_map_name(key)] = val[key]
}
data_list = append(data_list, col)
}
G_dbtables[tbname] = data_list
}
}
func (this *Mysqlcon) Get_select_data(d_data map[string]string,masterdb string) (map[string]string) {
data,ok := G_fd_list[masterdb]
if (ok) {
for _, v := range data.([]map[string]string) {
listname := strings.Replace(v["list_tb_name"], this.Db_perfix, "", -1)
tbname := strings.Replace(v["list_tb_name"], this.Db_perfix, "", -1)
listname = strings.Replace(listname, "_", "", -1)
this.Dbinit()
where := v["list_where"]
list_val := v["list_val"]
list_display := datatype.Type2str(v["list_display"])
if (where != "") {
where += " and " + this.Tbname(tbname).Get_fields_sql(list_val, d_data[v["name"]])
} else {
where = this.Tbname(tbname).Get_fields_sql(list_val, d_data[v["name"]])
}
this.Dbinit()
list_data := this.Tbname(tbname).Where(where).Find()
//fmt.Println(v,this.GetLastSql())
//fmt.Println(list_data)
if (list_data != nil) {
d_data[v["name"]+"_name"] = list_data[list_display]
} else {
d_data[v["name"]+"_name"] = ""
}
}
}
//fmt.Println(d_data)
return d_data
}
//func (this *Mysqlcon)Table_json(){
// rows:=this.Query("SHOW full COLUMNS FROM " + this.Tablename)
// json_str,_:=json.Marshal(rows)
// DataTable[this.Tablename]=string(json_str)
//}
//
//func (this *Mysqlcon) Get_new_str()map[string]string{
// table_str,ok:=DataTable[this.Tablename]
// if(!ok){
// this.Table_json()
// table_str=DataTable[this.Tablename]
// }
// rows:=make([]map[string]string,0)
// json.Unmarshal([]byte(table_str),&rows)
// record := make(map[string]string)
// result := make(map[string]string)
// for _,val:=range rows{
// for i, col := range val {
// if(strings.Trim(col,"")==""){
// continue
// }
// if i!="" && col != "" {
// //fmt.Print("%h",col)
// record[strings.ToLower(i)] = col
// result[record["field"]] = "0"
// }
// }
// }
// return result
//}
|
package stars
import (
"github.com/faiface/pixel"
"github.com/faiface/pixel/imdraw"
"golang.org/x/image/colornames"
"math"
)
const (
Seed = 0x9d2c5681
)
func Static(imd *imdraw.IMDraw, bounds pixel.Rect) {
imd.Color = colornames.Darkgray
Draw(imd, pixel.ZV, bounds, 4)
imd.Color = colornames.Gray
Draw(imd, pixel.ZV, bounds, 2)
imd.Color = colornames.White
Draw(imd, pixel.ZV, bounds, 1)
}
func Draw(imd *imdraw.IMDraw, cam pixel.Vec, bounds pixel.Rect, starscale int) {
w := int(bounds.W())
h := int(bounds.H())
size := int(math.Max(bounds.W(), bounds.H())) / starscale
xoff := int(cam.X) - w/2
yoff := int(cam.Y) - h/2
sx := xoff/size*size - size
sy := yoff/size*size - size
for i := sx; i <= xoff+w+size; i += size {
for j := sy; j <= yoff+h+size; j += size {
hash := mix(Seed, i, j)
for n := 0; n < 3; n++ {
px := (hash % size) + i
hash >>= 3
py := (hash % size) + j
hash >>= 3
imd.Push(pixel.Vec{float64(px), float64(py)})
imd.Circle(1, 0)
}
}
}
}
func mix(a, b, c int) int {
a -= b
a -= c
a ^= c >> 13
b -= c
b -= a
b ^= a << 8
c -= a
c -= b
c ^= b >> 13
a -= b
a -= c
a ^= c >> 12
b -= c
b -= a
b ^= a << 16
c -= a
c -= b
c ^= b >> 5
a -= b
a -= c
a ^= c >> 3
b -= c
b -= a
b ^= a << 10
c -= a
c -= b
c ^= b >> 15
return c
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
)
func SendGetRequest(r *http.Request) (APIResponse, error) {
if CONFIGS.Debug {
log.Printf("SendGetRequest Host:%s,Header:%v,URI:%v\n", r.Host, r.Header, r.URL.RequestURI())
}
var data APIResponse
data.Type = "origin"
client := &http.Client{}
url := fmt.Sprintf("http://%s%s", CONFIGS.API.Server, r.URL.RequestURI())
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Println("Generate Request Failed:", err)
return data, err
}
req.Header = r.Header
resp, err := client.Do(req)
if err != nil {
// handle error
log.Println("SendGetRequest Send Error:", err)
return data, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
log.Println("SendGetRequest Body Error:", err)
return data, err
}
data.Header = resp.Header
data.Body = body
data.Type = "origin"
return data, nil
}
type APIResponse struct {
Type string
Header http.Header
Body interface{}
}
func SendPostRequest(r *http.Request) (APIResponse, error) {
if CONFIGS.Debug {
log.Printf("SendPostRequest Host:%s,Header:%v,URI:%v\n", r.Host, r.Header, r.URL.RequestURI())
}
var data APIResponse
data.Type = "origin"
client := &http.Client{}
url := fmt.Sprintf("http://%s%s", CONFIGS.API.Server, r.URL.RequestURI())
req, err := http.NewRequest("POST", url, r.Body)
if err != nil {
log.Println("SendPostRequest Generate Request Failed:", err)
return data, err
}
req.Header = r.Header
resp, err := client.Do(req)
if err != nil {
// handle error
log.Println("SendPostRequest Send Error:", err)
return data, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
log.Println("SendPostRequest Body Error:", err)
return data, err
}
data.Header = resp.Header
data.Body = body
data.Type = "origin"
return data, nil
}
func SendCustomRequest(r *http.Request) (APIResponse, error) {
if CONFIGS.Debug {
log.Printf("SendCustomRequest Host:%s,Header:%v,URI:%v\n", r.Host, r.Header, r.URL.RequestURI())
}
var data APIResponse
data.Type = "origin"
client := &http.Client{}
url := fmt.Sprintf("http://%s%s", CONFIGS.API.Server, r.URL.RequestURI())
req, err := http.NewRequest(r.Method, url, r.Body)
if err != nil {
log.Println("SendCustomRequest Generate Request Failed:", err)
return data, err
}
req.Header = r.Header
resp, err := client.Do(req)
if err != nil {
// handle error
log.Println("SendCustomRequest Send Error:", err)
return data, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
log.Println("SendCustomRequest Body Error:", err)
return data, err
}
data.Header = resp.Header
data.Body = body
data.Type = "origin"
return data, nil
}
|
package graph
import (
"fmt"
"io/ioutil"
"os"
"testing"
)
func TestLargeGraph(t *testing.T) {
var err error
g := NewWithLossCombined(NewGraph())
g.Iter = 10000
g.Lambda = 1e-8
g.L2 = 1e-3
g.AnnW = 1e4
g.Repw = 1e3
g.DistTargt = 0.1
g.DistTargtW = 10.
for i := 0; i < 80; i++ {
g.Add(0, 0, fmt.Sprintf("%d", i))
for j := 1; j < i; j++ {
if GCD(i, j) != 1 {
g.Link(j, i)
}
}
}
g.Shuffle()
g.Normalize()
err = ioutil.WriteFile("ex_large_normalized.svg", []byte(g.ToSVG()), os.ModePerm)
if err != nil {
t.Fatal(err)
}
fmt.Println("\n-----------------")
g.Minimize()
g.Normalize()
err = ioutil.WriteFile("ex_large_minimized_normalized.svg", []byte(g.ToSVG()), os.ModePerm)
if err != nil {
t.Fatal(err)
}
fmt.Println("\n-----------------")
}
func TestGCD(t *testing.T) {
if GCD(3, 8) != 1 {
t.Fatal("GCD calculation error")
}
if GCD(8, 3) != 1 {
t.Fatal("GCD calculation error")
}
if GCD(3, 15) != 3 {
t.Fatal("GCD calculation error")
}
if GCD(15, 3) != 3 {
t.Fatal("GCD calculation error")
}
if GCD(1, 15) != 1 {
t.Fatal("GCD calculation error")
}
if GCD(15, 1) != 1 {
t.Fatal("GCD calculation error")
}
}
// GCD calculates GCD iteratively using remainder.
func GCD(a, b int) int {
for b != 0 {
a, b = b, a%b
}
return a
}
|
package iteration
const repeatCount int = 5
// Repeat ...
func Repeat(character string) (repeated string) {
for i := 0; i < repeatCount; i++ {
repeated += "a"
}
return repeated
}
|
package dist
import (
"fmt"
"testing"
)
func Test_Erlang(t *testing.T) {
var numbers []float64
lambda := float64(2)
k := 1
d := ErlangDistribution{
DistributionType: "Erlang",
}
for i := 1; i < 100000; i++ {
n, _ := d.RandVar(k, lambda)
numbers = append(numbers, n)
// fmt.Println(n)
}
m := ArrayMean(numbers)
ev := d.ExpectedValue(k, lambda)
v := d.Variance(k, lambda)
fmt.Printf("%v Distribution Expected value for lambda %v, k %v = %v", d.DistributionType, lambda, k, ev)
fmt.Println("")
fmt.Printf("%v Distribution Variance %v", d.DistributionType, v)
fmt.Println("")
fmt.Printf("Actual mean of array values %v", m)
fmt.Println("")
HistPlot(numbers)
}
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package shell
import (
"yunion.io/x/onecloud/pkg/multicloud/ecloud"
"yunion.io/x/onecloud/pkg/util/shellutils"
)
func init() {
type VDiskListOptions struct {
}
shellutils.R(&VDiskListOptions{}, "disk-list", "List disks", func(cli *ecloud.SRegion, args *VDiskListOptions) error {
disks, e := cli.GetDisks()
if e != nil {
return e
}
printList(disks, 0, 0, 0, nil)
return nil
})
}
|
package main
// Leetcode 457. (medium)
func circularArrayLoop(nums []int) bool {
for i, num := range nums {
if num == 0 {
continue
}
slow, fast := i, nextCircularArrayLoop(i, nums)
for num*nums[fast] > 0 && num*nums[nextCircularArrayLoop(fast, nums)] > 0 {
if slow == fast {
if slow == nextCircularArrayLoop(slow, nums) {
break
}
return true
}
slow = nextCircularArrayLoop(slow, nums)
fast = nextCircularArrayLoop(nextCircularArrayLoop(fast, nums), nums)
}
slow = i
for num*nums[slow] > 0 {
next := nextCircularArrayLoop(slow, nums)
nums[slow] = 0
slow = next
}
}
return false
}
func nextCircularArrayLoop(i int, nums []int) int {
return ((i+nums[i])%len(nums) + len(nums)) % len(nums)
}
|
package storage
import (
"strings"
"testing"
_ "github.com/lib/pq"
"github.com/spf13/viper"
)
const (
postgresHost = "postgres.HOST"
postgresPort = "postgres.PORT"
postgresUser = "postgres.USER"
postgresPass = "postgres.PASS"
postgresDB = "postgres.DB"
)
func TestConnect(t *testing.T) {
v := viper.New()
v.AddConfigPath("../config/")
v.SetConfigName("viper.config")
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
if err := v.ReadInConfig(); err != nil {
t.Error(err)
}
conf := &DBConfig{
Host: v.GetString(postgresHost),
Port: v.GetString(postgresPort),
User: v.GetString(postgresUser),
Pass: v.GetString(postgresPass),
DB: v.GetString(postgresDB),
}
incorrectConf := &DBConfig{
Host: "localhouston",
Port: "we",
User: "have",
Pass: "a",
DB: "problem",
}
tests := []struct {
name string
config *DBConfig
wantErr bool
}{
{
name: "TestWithCorrectInput",
config: conf,
wantErr: false,
},
{
name: "TestWithIncorrectInput",
config: incorrectConf,
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
_, err := Connect(tt.config)
if (err != nil) != tt.wantErr {
t.Errorf("Connect() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}
|
package traceconfig
import (
"fmt"
"io"
"github.com/opentracing/opentracing-go"
"github.com/uber/jaeger-client-go"
"github.com/uber/jaeger-client-go/config"
)
func TraceInit(serviceName string, samplerType string, samplerParam float64) (opentracing.Tracer, io.Closer) {
cfg := &config.Configuration{
ServiceName: serviceName,
Sampler: &config.SamplerConfig{
Type: samplerType,
Param: samplerParam, //
},
Reporter: &config.ReporterConfig{
LocalAgentHostPort: "localhost:6831",
LogSpans: true,
},
}
tracer, closer, err := cfg.NewTracer(config.Logger(jaeger.StdLogger))
if err != nil {
panic(fmt.Sprintf("Init failed: %v\n", err))
}
return tracer, closer
}
/*
其中关于SamplerConfig的Type可以选择
const,全量采集。param采样率设置1,0 分别对应打开和关闭
probabilistic ,概率采集。param默认万份之一,0~1之间取值,
rateLimiting ,限速采集。param每秒采样的个数
remote 动态采集策略。param值于probabilistic的参数一样。
在收到实际值之前的初始采样率。改值可以通过环境变量的JAEGER_SAMPLER_PARAM设定
*/
|
package field
import (
"fmt"
"reflect"
"github.com/spf13/cast"
)
// ToInt64SliceE casts an interface to a []int64 type.
func ToInt64SliceE(i interface{}) ([]int64, error) {
if i == nil {
return nil, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i)
}
switch v := i.(type) {
case []int64:
return v, nil
}
kind := reflect.TypeOf(i).Kind()
switch kind {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(i)
a := make([]int64, s.Len())
for j := 0; j < s.Len(); j++ {
val, err := cast.ToInt64E(s.Index(j).Interface())
if err != nil {
return nil, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i)
}
a[j] = val
}
return a, nil
default:
return nil, fmt.Errorf("unable to cast %#v of type %T to []int64", i, i)
}
}
// ToUint64SliceE casts an interface to a []int64 type.
func ToUint64SliceE(i interface{}) ([]uint64, error) {
if i == nil {
return nil, fmt.Errorf("unable to cast %#v of type %T to []uint64", i, i)
}
switch v := i.(type) {
case []uint64:
return v, nil
}
kind := reflect.TypeOf(i).Kind()
switch kind {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(i)
a := make([]uint64, s.Len())
for j := 0; j < s.Len(); j++ {
val, err := cast.ToUint64E(s.Index(j).Interface())
if err != nil {
return nil, fmt.Errorf("unable to cast %#v of type %T to []uint64", i, i)
}
a[j] = val
}
return a, nil
default:
return nil, fmt.Errorf("unable to cast %#v of type %T to []uint64", i, i)
}
}
// ToFloat64SliceE casts an interface to a []float64 type.
func ToFloat64SliceE(i interface{}) ([]float64, error) {
if i == nil {
return nil, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i)
}
switch v := i.(type) {
case []float64:
return v, nil
}
kind := reflect.TypeOf(i).Kind()
switch kind {
case reflect.Slice, reflect.Array:
s := reflect.ValueOf(i)
a := make([]float64, s.Len())
for j := 0; j < s.Len(); j++ {
val, err := cast.ToFloat64E(s.Index(j).Interface())
if err != nil {
return nil, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i)
}
a[j] = val
}
return a, nil
default:
return nil, fmt.Errorf("unable to cast %#v of type %T to []float64", i, i)
}
}
|
package app
import (
"github.com/fatmalabidi/bookstore_users_api/controllers/ping"
"github.com/fatmalabidi/bookstore_users_api/controllers/users"
)
func mapUrls() {
router.GET("/ping", ping.Ping)
router.POST("/users", users.CreateUser)
router.GET("/users:userID", users.GetUser)
router.PUT("/users:userID", users.UpdateUser)
router.PATCH("/users:userID", users.UpdateUser)
router.DELETE("/users:userID", users.DeleteUser)
router.GET("/internal/users/search", users.Search)
}
|
package internal
import (
"framework/cluster"
"framework/gate"
"framework/log"
"proto/gameproto"
"github.com/golang/protobuf/proto"
)
//GameMsgHandler 消息handler
type GameMsgHandler func([]interface{})
var (
//Game模块消息处理
gameMsgHandlers = make(map[gameproto.MsgID]GameMsgHandler)
)
func init() {
//集群相关消息
registerHandler(gameproto.MsgID_CLUSTER_UPDATEINFO_NOTIFY, handleClusterInfo)
//Gate登录消息
registerHandler(gameproto.MsgID_LOGINSVR_LOGIN_REQ, handleLogin)
}
//注册消息handler
func registerHandler(id gameproto.MsgID, gameMsgHandler GameMsgHandler) {
if _, ok := gameMsgHandlers[id]; ok {
//已经注册过
log.Error("Failed to register msg handler %u\n", id)
return
}
gameMsgHandlers[id] = gameMsgHandler
return
}
//处理集群信息更新
func handleClusterInfo(args []interface{}) {
a := args[0].(cluster.Agent)
data := args[1].([]byte)
//解析消息
reqMsg := &gameproto.Cluster_UpdateInfoNotify{}
err := proto.Unmarshal(data, reqMsg)
if err != nil {
log.Error("Failed to parse clustinfo msg, err %v\n", err)
return
}
if _, ok := clusterAgent[reqMsg.GetLocalendID()]; ok {
//agent已经注册
log.Error("failed to update endpointid %d, already registered.\n", reqMsg.GetLocalendID())
return
}
//更新Cluster对端ID
clusterAgent[reqMsg.GetLocalendID()] = a
a.UserData().(*ClusterAgentInfo).endpointID = reqMsg.GetLocalendID()
log.Debug("update cluster info, endpointid %d\n", reqMsg.GetLocalendID())
return
}
func handleLogin(args []interface{}) {
a := args[0].(gate.Agent)
data := args[1].([]byte)
//解析消息
reqMsg := &gameproto.LoginSvr_LoginReq{}
err := proto.Unmarshal(data, reqMsg)
if err != nil {
log.Error("Failed to parse login msg, err %v\n", err)
return
}
//todo jasonxiong 确认后编写登录的详细逻辑
log.Debug("success to process login request, uin %u\n", reqMsg.GetUin())
//发送返回
respLogin, err := proto.Marshal(&gameproto.LoginSvr_LoginResp{IResult: proto.Int32(0)})
if err != nil {
log.Error("Failed to encode resp login msg, err %v\n", err)
return
}
respMsg := &gameproto.ProtoMsg{
Msgid: gameproto.MsgID_LOGINSVR_LOGIN_RESP.Enum(),
Uin: proto.Uint32(reqMsg.GetUin()),
Msgdata: respLogin,
}
//序列化
respData, err := proto.Marshal(respMsg)
if err != nil {
log.Error("Failed to marshal resp msg, err %v\n", err)
return
}
a.WriteMsg(respData)
return
}
|
package box
import (
"cloud-box-backend/source/meta/models"
"github.com/jmoiron/sqlx"
)
const (
getAccountBoxesQuery = `select trim(tunnel_domain) tunnel_domain, trim(uuid) uuid from box where account_hash = $1`
setAccountHashToBoxQuery = `update box set account_hash = $1 where uuid = $2`
addBoxQuery = `
insert into box(tunnel_domain, uuid)
values(:tunnel_domain, :uuid)
on conflict (uuid)
do update
set tunnel_domain = :tunnel_domain`
)
type Repository struct {
db *sqlx.DB
}
func New(db *sqlx.DB) Repository {
return Repository{db}
}
func (r Repository) GetBoxes(accountHash string) ([]models.Box, error) {
var boxes []models.Box
err := r.db.Select(&boxes, getAccountBoxesQuery, accountHash)
return boxes, err
}
func (r Repository) BindBoxWithAccount(accountHash, boxUUID string) error {
_, err := r.db.Exec(setAccountHashToBoxQuery, accountHash, boxUUID)
return err
}
func (r Repository) Register(box models.BoxRegistration) error {
_, err := r.db.NamedExec(addBoxQuery, box)
return err
}
|
package main
import (
"fmt"
)
func main() {
dd := make(map[string]int)
dd["x"] = 1
dd["y"] = 2
dd["z"] = 3
fmt.Println(dd)
fmt.Println(dd["x"])
delete(dd, "y")
fmt.Println(dd)
_, re := dd["y"]
fmt.Println(re)
hh := map[string]int{
"aa": 2,
"bb": 3,
}
fmt.Println(hh["bb"])
for k, nn := range hh {
fmt.Println(k, nn)
}
}
|
package roleplay
import (
"bytes"
"fmt"
)
func GetCities(ctx Context) {
buffer := bytes.NewBufferString("Cidades disponíveis: \n")
buffer.WriteString("```")
for _, city := range ctx.Config.GetEnvConfStringSlice("cities") {
msg := fmt.Sprintf("- %s \n", city)
buffer.WriteString(msg)
}
buffer.WriteString("```")
str := buffer.String()
ctx.Reply(str)
}
|
package util
import (
"fmt"
"os"
"k8s.io/apimachinery/pkg/util/yaml"
)
func BindJsonOrYaml(filePath string, obj interface{}) error {
reader, err := os.Open(filePath)
if err != nil {
return fmt.Errorf("Failed opening file %s due to %s", filePath, err)
}
err = yaml.NewYAMLOrJSONDecoder(reader, 128).Decode(obj)
if err != nil {
return fmt.Errorf("Failed to parse file %s because: %v", filePath, err)
}
return nil
}
|
// Copyright (C) 2019 Michael J. Fromberger. All Rights Reserved.
package otp_test
import (
"testing"
"github.com/creachadair/otp"
)
var googleTests = []struct {
key string
counter uint64
otp string
}{
// Manually generated compatibility test vectors for Google authenticator.
//
// To verify these test vectors, or to generate new ones, manually enter the
// key and set "time-based" to off. The first key shown is for index 1, and
// refreshing increments the index sequentially.
{"aaaa aaaa aaaa aaaa", 1, "812658"},
{"aaaa aaaa aaaa aaaa", 2, "073348"},
{"aaaa aaaa aaaa aaaa", 3, "887919"},
{"aaaa aaaa aaaa aaaa", 4, "320986"},
{"aaaa aaaa aaaa aaaa", 5, "435986"},
{"abcd efgh ijkl mnop", 1, "317963"},
{"abcd efgh ijkl mnop", 2, "625848"},
{"abcd efgh ijkl mnop", 3, "281014"},
{"abcd efgh ijkl mnop", 4, "709708"},
{"abcd efgh ijkl mnop", 5, "522086"},
// These are time-based codes. Enter the key in the authenticator app and
// select "time-based". Copy a code and use "date +%s" to get the time in
// seconds. The default timestep is based on a 30-second window.
{"aaaa bbbb cccc dddd", 1642868750 / 30, "349451"},
{"aaaa bbbb cccc dddd", 1642868800 / 30, "349712"},
{"aaaa bbbb cccc dddd", 1642868822 / 30, "367384"},
{"aaaa bbbb cccc dddd", 1642869021 / 30, "436225"},
}
func TestDefaultHOTP(t *testing.T) {
for _, test := range googleTests {
got, err := otp.DefaultHOTP(test.key, test.counter)
if err != nil {
t.Errorf("Invalid key: %v", err)
} else if got != test.otp {
t.Errorf("Wrong OTP: got %q, want %q", got, test.otp)
}
if t.Failed() {
t.Logf("DefaultHOTP(%q, %v)", test.key, test.counter)
}
}
}
func TestConfig_Next(t *testing.T) {
const testKey = "aaaa aaaa aaaa aaaa"
var cfg otp.Config
if err := cfg.ParseKey(testKey); err != nil {
t.Fatalf("ParseKey %q failed: %v", testKey, err)
}
var nrun int
for _, test := range googleTests {
if test.key != testKey {
continue
}
nrun++
got := cfg.Next()
if got != test.otp {
t.Errorf("Next [counter=%d]: got %q, want %q", cfg.Counter, got, test.otp)
}
if cfg.Counter != test.counter {
t.Errorf("Next counter: got %d, want %d", cfg.Counter, test.counter)
}
}
if nrun == 0 {
t.Fatal("Found no matching test cases")
}
}
func TestGoogleAuthCompat(t *testing.T) {
for _, test := range googleTests {
key, err := otp.ParseKey(test.key)
if err != nil {
t.Errorf("ParseKey(%q) failed: %v", test.key, err)
continue
}
t.Run("Standard-"+test.otp, func(t *testing.T) {
cfg := otp.Config{Key: string(key)}
got := cfg.HOTP(test.counter)
if got != test.otp {
t.Errorf("Key %q HOTP(%d) got %q, want %q", test.key, test.counter, got, test.otp)
}
})
t.Run("Custom-"+test.otp, func(t *testing.T) {
cfg := otp.Config{
Key: string(key),
// Map digits to corresponding letters 0=a, 1=b, etc.
Format: func(hash []byte, nd int) string {
v := otp.Truncate(hash)
buf := make([]byte, nd)
for i := nd - 1; i >= 0; i-- {
buf[i] = byte(v%10) + byte('a')
v /= 10
}
return string(buf)
},
}
got := cfg.HOTP(test.counter)
want := digitsToLetters(test.otp)
if got != want {
t.Errorf("Key %q HOTP(%d) got %q, want %q", test.key, test.counter, got, want)
}
})
}
}
func TestFormatBounds(t *testing.T) {
cfg := otp.Config{
Key: "whatever",
TimeStep: func() uint64 { return 1 },
// Request 5 digits, but generate 8.
// This should cause code generation to panic.
Digits: 5,
Format: func(_ []byte, nd int) string {
return "12345678" // N.B. not 5
},
}
t.Run("Panic", func(t *testing.T) {
var code string
defer func() {
p := recover()
if p == nil {
t.Fatalf("Expected failure; got %q", code)
}
t.Logf("Got expected panic: %v", p)
}()
code = cfg.TOTP()
})
}
func TestFormatAlphabet(t *testing.T) {
tests := []struct {
alphabet string
want string
}{
{"XYZPDQ", "PQXPP"},
{"0123456789", "43645"},
}
for _, test := range tests {
cfg := otp.Config{
Key: "whatever",
Digits: 5,
Format: otp.FormatAlphabet(test.alphabet),
}
got := cfg.HOTP(1)
if got != test.want {
t.Errorf("[%q].HOTP(1) failed: got %q, want %q", test.alphabet, got, test.want)
}
}
}
// digitsToLetters maps each decimal digit in s to the corresponding letter in
// the range a..j. It will panic for any value outside this range.
func digitsToLetters(s string) string {
buf := make([]byte, len(s))
for i := range s {
if s[i] < '0' || s[i] > '9' {
panic("invalid digit")
}
buf[i] = s[i] - '0' + 'a'
}
return string(buf)
}
/*
[RFC 4226] Appendix D - HOTP Algorithm: Test Values
The following test data uses the ASCII string "12345678901234567890" for the
secret:
Secret = 0x3132333435363738393031323334353637383930
Table 1 details for each count, the intermediate HMAC value.
Count Hexadecimal HMAC-SHA-1(secret, count)
0 cc93cf18508d94934c64b65d8ba7667fb7cde4b0
1 75a48a19d4cbe100644e8ac1397eea747a2d33ab
2 0bacb7fa082fef30782211938bc1c5e70416ff44
3 66c28227d03a2d5529262ff016a1e6ef76557ece
4 a904c900a64b35909874b33e61c5938a8e15ed1c
5 a37e783d7b7233c083d4f62926c7a25f238d0316
6 bc9cd28561042c83f219324d3c607256c03272ae
7 a4fb960c0bc06e1eabb804e5b397cdc4b45596fa
8 1b3c89f65e6c9e883012052823443f048b4332db
9 1637409809a679dc698207310c8c7fc07290d9e5
Table 2 details for each count the truncated values (both in hexadecimal and
decimal) and then the HOTP value.
Truncated
Count Hexadecimal Decimal HOTP
0 4c93cf18 1284755224 755224
1 41397eea 1094287082 287082
2 82fef30 137359152 359152
3 66ef7655 1726969429 969429
4 61c5938a 1640338314 338314
5 33c083d4 868254676 254676
6 7256c032 1918287922 287922
7 4e5b397 82162583 162583
8 2823443f 673399871 399871
9 2679dc69 645520489 520489
[RFC 6238] Appendix B. Test Vectors
This section provides test values that can be used for the HOTP time-based
variant algorithm interoperability test.
The test token shared secret uses the ASCII string value
"12345678901234567890". With Time Step X = 30, and the Unix epoch as the
initial value to count time steps, where T0 = 0, the TOTP algorithm will
display the following values for specified modes and timestamps.
+-------------+--------------+------------------+----------+--------+
| Time (sec) | UTC Time | Value of T (hex) | TOTP | Mode |
+-------------+--------------+------------------+----------+--------+
| 59 | 1970-01-01 | 0000000000000001 | 94287082 | SHA1 |
| | 00:00:59 | | | |
| 1111111109 | 2005-03-18 | 00000000023523EC | 07081804 | SHA1 |
| | 01:58:29 | | | |
| 1111111111 | 2005-03-18 | 00000000023523ED | 14050471 | SHA1 |
| | 01:58:31 | | | |
| 1234567890 | 2009-02-13 | 000000000273EF07 | 89005924 | SHA1 |
| | 23:31:30 | | | |
| 2000000000 | 2033-05-18 | 0000000003F940AA | 69279037 | SHA1 |
| | 03:33:20 | | | |
| 20000000000 | 2603-10-11 | 0000000027BC86AA | 65353130 | SHA1 |
| | 11:33:20 | | | |
+-------------+--------------+------------------+----------+--------+
*/
|
package tcp
import (
"sync"
)
type MsgQueue struct {
list []interface{}
listGuard sync.Mutex
listCond *sync.Cond
}
func (self *MsgQueue) Add(msg interface{}) {
self.listGuard.Lock()
self.list = append(self.list, msg)
self.listGuard.Unlock()
self.listCond.Signal()
}
func (self *MsgQueue) Reset() {
self.list = self.list[0:0]
}
func (self *MsgQueue) Pick(retList *[]interface{}) (exit bool) {
self.listGuard.Lock()
for len(self.list) == 0 {
self.listCond.Wait()
}
self.listGuard.Unlock()
self.listGuard.Lock()
// 复制出队列
for _, ev := range self.list {
if ev == nil {
exit = true
break
} else {
*retList = append(*retList, ev)
}
}
self.Reset()
self.listGuard.Unlock()
return
}
func NewMsgQueue() *MsgQueue {
self := &MsgQueue{}
self.listCond = sync.NewCond(&self.listGuard)
return self
}
|
package main
import "fmt"
// func main() {
// done := make(chan bool)
// values := []string{"a", "b", "c"}
// for _, v := range values {
// go func() {
// fmt.Println(v)
// done <- true
// }()
// }
// // wait for all goroutines to complete before exiting
// for _ = range values {
// <-done
// }
// }
// type inter interface {
// Value() int
// }
// type face interface {
// Value() int
// }
// type X struct {
// x int
// }
// func (inst X) Value() int {
// return inst.x
// }
// func f1(n inter) {
// fmt.Println("f1 Value: ", n.Value())
// }
// func f2(n face) {
// fmt.Println("f2 Value: ", n.Value())
// }
// type Parent struct {
// Val int
// c Child
// }
// type Child struct {
// Val int
// }
// func main() {
// var myX X
// f1(myX)
// f2(myX)
// var p Parent
// p.Val = 5
// p.c.Val = 3
// fmt.Println("P Val: ", p.Val)
// fmt.Println("P C Val: ", p.c.Val)
// fmt.Println("Done.")
// }
func main() {
f()
fmt.Println("Done.")
}
func f() {
// defer func() {
// for r := recover(); r != nil; r = recover() {
// fmt.Println("Recovered in f", r)
// }
// }()
fmt.Println("Calling g.")
g(0)
fmt.Println("Returned normally from g.")
}
func g(i int) {
defer func() {
fmt.Println("First deferred function, recovering")
fmt.Println("Recover: ", recover())
}()
defer func() {
fmt.Println("Second deferred function, panicking...")
panic("Second Deferred Panic")
fmt.Println("After second deferred panic")
}()
defer func() {
// r := recover()
// fmt.Println("Recovered in third deferred func:", r)
panic("Third Deferred Panic")
}()
fmt.Println("About to hit the first panic....")
panic("First Panic")
fmt.Println("About to hit the second panic....")
panic("Second Panic")
}
|
package main
import (
"fmt"
"os"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "eic",
Short: "Ensure import comment",
Run: rootRun,
}
var (
_dirpath string
_filepath string
_dryrun bool
)
func init() {
rootCmd.Flags().StringVarP(&_dirpath, "dir", "d", "", "transfer directory")
rootCmd.Flags().StringVarP(&_filepath, "file", "f", "", "transfer a file")
rootCmd.Flags().BoolVarP(&_dryrun, "dryrun", "n", false, "show what would have been transferred")
}
func rootRun(cmd *cobra.Command, args []string) {
w := &Worker{
DryRun: _dryrun,
}
switch {
case _dirpath != "":
if err := w.WorkDir(_dirpath); err != nil {
fmt.Println(err)
os.Exit(1)
}
case _filepath != "":
if err := w.WorkFile(_filepath); err != nil {
fmt.Println(err)
os.Exit(1)
}
default:
cmd.Help()
os.Exit(1)
}
}
|
package main
import "fmt"
func main() {
var x string = "Hello, world"
var y string
y = "Hello, world"
fmt.Println(x == y)
z := "Hello, world"
var h = 5
}
|
package manifestsecrets
import (
"log"
"github.com/pivotal-cf/on-demand-service-broker/boshdirector"
"github.com/pivotal-cf/on-demand-service-broker/broker"
)
type NoopSecretManager struct{}
func (r *NoopSecretManager) ResolveManifestSecrets(manifest []byte, deploymentVariables []boshdirector.Variable, logger *log.Logger) (map[string]string, error) {
return nil, nil
}
func (r *NoopSecretManager) DeleteSecretsForInstance(instanceID string, logger *log.Logger) error {
return nil
}
type BoshCredHubSecretManager struct {
matcher Matcher
operator CredhubOperator
}
func BuildManager(enableSecureManifests bool, matcher Matcher, secretsFetcher CredhubOperator) broker.ManifestSecretManager {
if !enableSecureManifests {
return new(NoopSecretManager)
}
return &BoshCredHubSecretManager{
matcher: matcher,
operator: secretsFetcher,
}
}
func (r *BoshCredHubSecretManager) ResolveManifestSecrets(manifest []byte, deploymentVariables []boshdirector.Variable, logger *log.Logger) (map[string]string, error) {
matches, err := r.matcher.Match(manifest, deploymentVariables)
if err != nil {
return nil, err
}
secrets, err := r.operator.BulkGet(matches, logger)
if err != nil {
return nil, err
}
return secrets, nil
}
func (r *BoshCredHubSecretManager) DeleteSecretsForInstance(instanceID string, logger *log.Logger) error {
paths, err := r.operator.FindNameLike(instanceID, logger)
if err != nil {
return err
}
return r.operator.BulkDelete(paths, logger)
}
|
package Sieve
import (
"reflect"
"testing"
)
func TestSieve(t *testing.T) {
var tests = []struct {
n int
want []int
}{
{25, []int{2, 3, 5, 7, 11, 13, 17, 19, 23}},
{3, []int{2, 3}},
{9, []int{2, 3, 5, 7, 8}},
}
for _, test := range tests {
got := Sieve(test.n)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("Sieve(%d) => %v, want %v", test.n, got, test.want)
}
}
}
|
/*
* Insert an image to a PDF file.
*
* Adds image to a specific page of a PDF. xPos and yPos define the upper left corner of the image location, and width
* is the width of the image in PDF coordinates (height/width ratio is maintained).
*
* Example go run pdf_add_image_to_page.go /tmp/input.pdf 1 /tmp/image.jpg 0 0 100 /tmp/output.pdf
* adds the image to the upper left corner of the page (0,0). The width is 100 (typical page width 612 with defaults).
*
* Syntax: go run pdf_add_image_to_page.go input.pdf <page> image.jpg <xpos> <ypos> <width> output.pdf
*/
package main
import (
"fmt"
"os"
"strconv"
"github.com/unidoc/unipdf/v3/common/license"
"github.com/unidoc/unipdf/v3/creator"
"github.com/unidoc/unipdf/v3/model"
)
const licenseKey = `
-----BEGIN UNIDOC LICENSE KEY-----
Free trial license keys are available at: https://unidoc.io/
-----END UNIDOC LICENSE KEY-----
`
func init() {
// Enable debug-level logging.
// unicommon.SetLogger(unicommon.NewConsoleLogger(unicommon.LogLevelDebug))
err := license.SetLicenseKey(licenseKey, `Company Name`)
if err != nil {
panic(err)
}
}
func main() {
if len(os.Args) < 8 {
fmt.Printf("Usage: go run pdf_add_image_to_page.go input.pdf <page> image.jpg <xpos> <ypos> <width> output.pdf\n")
os.Exit(1)
}
inputPath := os.Args[1]
pageNumStr := os.Args[2]
imagePath := os.Args[3]
xPos, err := strconv.ParseFloat(os.Args[4], 64)
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
yPos, err := strconv.ParseFloat(os.Args[5], 64)
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
iwidth, err := strconv.ParseFloat(os.Args[6], 64)
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
outputPath := os.Args[7]
fmt.Printf("xPos: %d, yPos: %d\n", xPos, yPos)
pageNum, err := strconv.Atoi(pageNumStr)
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
err = addImageToPdf(inputPath, outputPath, imagePath, pageNum, xPos, yPos, iwidth)
if err != nil {
fmt.Printf("Error: %v\n", err)
os.Exit(1)
}
fmt.Printf("Complete, see output file: %s\n", outputPath)
}
// Add image to a specific page of a PDF. xPos and yPos define the upper left corner of the image location, and iwidth
// is the width of the image in PDF document dimensions (height/width ratio is maintained).
func addImageToPdf(inputPath string, outputPath string, imagePath string, pageNum int, xPos float64, yPos float64, iwidth float64) error {
c := creator.New()
// Prepare the image.
img, err := c.NewImageFromFile(imagePath)
if err != nil {
return err
}
img.ScaleToWidth(iwidth)
img.SetPos(xPos, yPos)
// Read the input pdf file.
f, err := os.Open(inputPath)
if err != nil {
return err
}
defer f.Close()
pdfReader, err := model.NewPdfReader(f)
if err != nil {
return err
}
numPages, err := pdfReader.GetNumPages()
if err != nil {
return err
}
// Load the pages.
for i := 0; i < numPages; i++ {
page, err := pdfReader.GetPage(i + 1)
if err != nil {
return err
}
// Add the page.
err = c.AddPage(page)
if err != nil {
return err
}
// If the specified page, or -1, apply the image to the page.
if i+1 == pageNum || pageNum == -1 {
_ = c.Draw(img)
}
}
err = c.WriteToFile(outputPath)
return err
}
|
package algorithms
import "math/big"
type Sieve struct {
internalBaseStringRef string
internalBigIntRef *big.Int
}
// Takes a new base 10 string of integers to sift through to factorize. Will use all available processors.
func NewQuadraticSieve(s string) *Sieve {
i := new(big.Int)
i.SetString(s, 10)
return &Sieve{
internalBaseStringRef: s,
internalBigIntRef: i,
}
}
//func (s *Sieve) Sift() string {
// var microBase uint64
// var nbPrimes int64
// var i, j uint64
// sqrtN, rem := new(big.Int), new(big.Int)
//
//} |
package control
import (
"encoding/json"
"github.com/PuerkitoBio/goquery"
"github.com/playgrunge/monicore/core/api"
"github.com/playgrunge/monicore/core/scrape"
"log"
"net/http"
"regexp"
"strings"
)
type HydroApi struct {
api.ApiRequest
scrape.ScrapeRequest
}
func (h *HydroApi) Scrape(doc *goquery.Document) map[string]interface{} {
var intrClientsREGEX = regexp.MustCompile(`^[0-9]+[0-9 ]*[0-9]*`)
var totalClientsREGEX = regexp.MustCompile(`[0-9]+[0-9 ]*[0-9]*$`)
var data = map[string]interface{}{}
doc.Find("div.service-on table tbody tr").Each(func(i int, s *goquery.Selection) {
region := s.Find("td[scope=row] a").Text()
interruptions := s.Find("td:nth-child(2)").Text()
clients := s.Find("td:nth-child(3)").Text()
intrClients := strings.Replace(intrClientsREGEX.FindString(clients), " ", "", -1)
totalClients := strings.Replace(totalClientsREGEX.FindString(clients), " ", "", -1)
data[region] = map[string]interface{}{
"interruptions": interruptions,
"clientsInterrupted": intrClients,
"totalClients": totalClients,
}
})
doc.Find("div.service-on table tfoot tr").Each(func(i int, s *goquery.Selection) {
region := "all"
interruptions := s.Find("td:nth-child(2)").Text()
clients := s.Find("td:nth-child(3)").Text()
intrClients := strings.Replace(intrClientsREGEX.FindString(clients), " ", "", -1)
totalClients := strings.Replace(totalClientsREGEX.FindString(clients), " ", "", -1)
data[region] = map[string]interface{}{
"interruptions": interruptions,
"clientsInterrupted": intrClients,
"totalClients": totalClients,
}
})
return data
}
const HydroName = "hydro"
func (h *HydroApi) SendApi(w http.ResponseWriter, r *http.Request) {
res, err := h.GetApi()
if err != nil {
return
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Write(res)
}
func (h *HydroApi) GetApi() ([]byte, error) {
doc, err := goquery.NewDocument("http://poweroutages.hydroquebec.com/poweroutages/service-interruption-report")
if err != nil {
log.Fatal(err)
}
data := h.Scrape(doc)
robots, err := json.Marshal(&data)
if err != nil {
log.Fatal(err)
return nil, err
}
return robots, nil
}
|
/**
* Copyright (c) 2018-present, MultiVAC Foundation.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package sync
import (
"testing"
"time"
"github.com/multivactech/MultiVAC/configs/config"
"github.com/multivactech/MultiVAC/model/chaincfg/chainhash"
"github.com/multivactech/MultiVAC/model/shard"
"github.com/multivactech/MultiVAC/model/wire"
"github.com/multivactech/MultiVAC/processor/shared/message"
)
func TestBaseWorker_ShouldStartSync(t *testing.T) {
tests := []struct {
desc string
shouldStartSync bool
status syncStatus
p *syncPeer
isStaleSync bool
}{
{
desc: "IDLE status without peer",
status: IDLE,
shouldStartSync: false,
},
{
desc: "RUNNING status without peer",
shouldStartSync: false,
status: RUNNING,
},
{
desc: "IDLE status with outbound storage peer",
p: createPeerForTest(true, config.StorageNode),
shouldStartSync: true,
},
{
desc: "RUNNING status with outbound storage peer",
p: createPeerForTest(true, config.StorageNode),
status: RUNNING,
shouldStartSync: false,
isStaleSync: false,
},
{
desc: "RUNNING status with outbound storage peer but sync is stale",
p: createPeerForTest(true, config.StorageNode),
status: RUNNING,
shouldStartSync: true,
isStaleSync: true,
},
}
for _, tt := range tests {
w := newBaseWorker(shard.IDToShardIndex(TestCurrentShard))
w.status = tt.status
if tt.isStaleSync {
w.syncStartTime = time.Now().Add(time.Duration(-(syncStaleMin + 1) * time.Minute))
} else {
w.syncStartTime = time.Now()
}
if tt.p != nil {
w.addSyncPeer(tt.p.cp, tt.p.nodeType)
}
if s := w.shouldStartSync(); s != tt.shouldStartSync {
t.Errorf("BaseWorker added peer when [%s], expected: %v, got: %v", tt.desc, tt.shouldStartSync, s)
}
}
}
func TestBaseWorker_HandleAddPeer(t *testing.T) {
tests := []struct {
desc string
status syncStatus
syncStartTime time.Time
p *syncPeer
sn storageSyncContract
abc minerSyncContract
shouldStartSync bool
}{
{
desc: "RUNNING status with outbound storage peer",
p: createPeerForTest(true, config.StorageNode),
sn: createFakeStorageContract(),
abc: nil,
status: RUNNING,
syncStartTime: time.Now().Add(-time.Minute * 1),
shouldStartSync: false,
},
{
desc: "RUNNING status with outbound storage peer",
p: createPeerForTest(true, config.StorageNode),
sn: createFakeStorageContract(),
abc: nil,
status: RUNNING,
syncStartTime: time.Now().Add(-time.Minute * 10),
shouldStartSync: true,
},
{
desc: "IDLE status with outbound storage peer",
p: createPeerForTest(true, config.StorageNode),
sn: createFakeStorageContract(),
abc: nil,
status: IDLE,
shouldStartSync: true,
},
{
desc: "RUNNING status with inbound storage peer",
p: createPeerForTest(false, config.StorageNode),
sn: createFakeStorageContract(),
abc: nil,
status: RUNNING,
syncStartTime: time.Now().Add(-time.Minute * 10),
shouldStartSync: false,
},
{
desc: "RUNNING status with outbound miner peer",
p: createPeerForTest(true, config.MinerNode),
sn: nil,
abc: &syncContract{},
status: RUNNING,
syncStartTime: time.Now().Add(-time.Minute * 10),
shouldStartSync: true,
},
{
desc: "IDLE status with outbound miner peer",
p: createPeerForTest(true, config.MinerNode),
sn: nil,
abc: &syncContract{},
status: IDLE,
shouldStartSync: true,
},
}
for _, tt := range tests {
mgr := createFakeMgr()
if tt.p.nodeType == config.StorageNode {
w := newStorageWorker(shard.IDToShardIndex(TestCurrentShard), createFakeStorageContract())
w.setManager(mgr)
if tt.status == RUNNING {
if tt.syncStartTime.IsZero() {
w.syncStartTime = time.Now()
} else {
w.syncStartTime = tt.syncStartTime
}
}
w.status = tt.status
if tt.p != nil {
w.handleAddPeer(tt.p.cp, tt.p.nodeType)
w.handleSyncReq()
}
if s := mgr.syncing; s != tt.shouldStartSync {
t.Errorf("StorageWorker is syncing [%s], expected: %v, got: %v", tt.desc, tt.shouldStartSync, s)
}
} else {
w := newMinerWorker(shard.IDToShardIndex(TestCurrentShard), tt.abc)
w.setManager(mgr)
if tt.status == RUNNING {
if tt.syncStartTime.IsZero() {
w.syncStartTime = time.Now()
} else {
w.syncStartTime = tt.syncStartTime
}
}
w.status = tt.status
if tt.p != nil {
w.handleAddPeer(tt.p.cp, tt.p.nodeType)
w.handleSyncReq()
}
if s := mgr.syncing; s != tt.shouldStartSync {
t.Errorf("MinerWorker is syncing [%s], expected: %v, got: %v", tt.desc, tt.shouldStartSync, s)
}
}
}
}
func TestBaseWorker_HandlePeerDone(t *testing.T) {
tests := []struct {
desc string
status syncStatus
p *syncPeer
sn storageSyncContract
abc minerSyncContract
shouldStopSync bool
}{
{
desc: "RUNNING status with outbound storage peer",
p: createPeerForTest(true, config.StorageNode),
sn: createFakeStorageContract(),
abc: nil,
status: RUNNING,
shouldStopSync: true,
},
{
desc: "IDLE status with outbound storage peer",
p: createPeerForTest(true, config.StorageNode),
sn: createFakeStorageContract(),
abc: nil,
status: IDLE,
shouldStopSync: false,
},
{
desc: "RUNNING status with inbound storage peer",
p: createPeerForTest(false, config.StorageNode),
sn: createFakeStorageContract(),
abc: nil,
status: RUNNING,
shouldStopSync: false,
},
{
desc: "RUNNING status with outbound miner peer",
p: createPeerForTest(true, config.MinerNode),
sn: nil,
abc: &syncContract{},
status: RUNNING,
shouldStopSync: true,
},
{
desc: "IDLE status with outbound miner peer",
p: createPeerForTest(true, config.MinerNode),
sn: nil,
abc: &syncContract{},
status: IDLE,
shouldStopSync: false,
},
}
for _, tt := range tests {
mgr := createFakeMgr()
if tt.p.nodeType == config.StorageNode {
w := newStorageWorker(shard.IDToShardIndex(TestCurrentShard), createFakeStorageContract())
w.setManager(mgr)
w.status = tt.status
w.peerMgr.addSyncPeerCandidate(tt.p.cp, tt.p.nodeType)
w.peerMgr.getSyncPeer()
w.handlePeerDone(tt.p.cp, tt.p.nodeType)
if s := mgr.stop; s != tt.shouldStopSync {
t.Errorf("StorageWorker stop [%s], expected: %v, got: %v", tt.desc, tt.shouldStopSync, s)
}
} else {
w := newMinerWorker(shard.IDToShardIndex(TestCurrentShard), tt.abc)
w.setManager(mgr)
w.status = tt.status
w.peerMgr.addSyncPeerCandidate(tt.p.cp, tt.p.nodeType)
w.peerMgr.getSyncPeer()
w.handlePeerDone(tt.p.cp, tt.p.nodeType)
if s := mgr.stop; s != tt.shouldStopSync {
t.Errorf("MinerWorker stop [%s], expected: %v, got: %v", tt.desc, tt.shouldStopSync, s)
}
}
}
}
func TestBaseWorker_IsSyncStale(t *testing.T) {
bw := newBaseWorker(shard.IDToShardIndex(TestCurrentShard))
tests := []struct {
time time.Time
res bool
}{
{
time: time.Now().Add(-time.Minute * 1),
res: false,
},
{
time: time.Now().Add(-time.Minute * 5),
res: true,
},
{
time: time.Now().Add(-time.Minute * 4),
res: true,
},
{
time: time.Now().Add(-time.Minute * 10),
res: true,
},
}
for _, tt := range tests {
bw.syncStartTime = tt.time
if bw.isSyncStale() != tt.res {
t.Errorf("Last sync time is:%v, now time is:%v ,the IsSyncStale result should be:%v,but get:%v", tt.time, time.Now(), tt.res, bw.isSyncStale())
}
}
}
func TestBaseWork_ProcessMail(t *testing.T) {
mgr := createFakeMgr()
sp := createPeerForTest(true, config.StorageNode)
sw := newStorageWorker(shard.IDToShardIndex(TestCurrentShard), createFakeStorageContract())
sw.setManager(mgr)
msgAddPeer := &peerMail{connPeer: sp.cp, nodeType: sp.nodeType}
sw.status = IDLE
sw.Act(message.NewEvent(evtAddPeer, msgAddPeer), nil)
sw.handleSyncReq()
if s := mgr.syncing; s != true {
t.Errorf("Process add peer mail,the peer's sync status, expected: %v, got: %v", true, s)
}
msgPeerDone := &peerMail{connPeer: sp.cp, nodeType: sp.nodeType}
sw.status = RUNNING
sw.Act(message.NewEvent(evtDonePeer, msgPeerDone), nil)
if s := mgr.stop; s != true {
t.Errorf("Process peer done mail,the peer's stop status, expected: %v, got: %v", true, s)
}
//process message type e.g. msgSyncInv, msgSyncReq was tested in handleMag test
msgSyncInv := wire.NewMsgSyncInv(shard.IDToShardIndex(TestCurrentShard), wire.SyncInvTypeGetData)
sw.Act(message.NewEvent(evtMsg, msgSyncInv), nil)
msgSyncReq := wire.NewMsgSyncReq(shard.IDToShardIndex(TestCurrentShard))
sw.Act(message.NewEvent(evtMsg, msgSyncReq), nil)
//process default mail e.g. storageMissInfo was tested in handleCustomReq test
msgDefault := &storageMissInfo{}
sw.Act(message.NewEvent(-1, msgDefault), nil)
}
func TestBaseWork_HandleSyncInv(t *testing.T) {
msgSyncInvFullList := wire.NewMsgSyncInv(shard.IDToShardIndex(TestCurrentShard), wire.SyncInvTypeFullList)
msgSyncInvFullList.AddInvGroup(shard.IDToShardIndex(TestCurrentShard), *testHash0)
sw := newStorageWorker(shard.IDToShardIndex(TestCurrentShard), createFakeStorageContract())
p := createPeerForTest(true, config.StorageNode)
sw.peerMgr.addSyncPeerCandidate(p.cp, p.nodeType)
sw.status = IDLE
lenCurInvBeforeSync0 := len(sw.curInvs)
sw.handleSyncInv(msgSyncInvFullList)
lenCurInvAfterSync0 := len(sw.curInvs)
if lenCurInvAfterSync0 != lenCurInvBeforeSync0 {
t.Errorf("If the status of worker is IDLE, the SyncInv, the curInv will not work")
}
sw.status = RUNNING
lenCurInvBeforeSync := len(sw.curInvs)
sw.handleSyncInv(msgSyncInvFullList)
lenCurInvAfterSync := len(sw.curInvs)
if lenCurInvAfterSync != lenCurInvBeforeSync+len(msgSyncInvFullList.InvGroups) {
t.Errorf("After SyncInv, the curInv length of worker should add to: %v, got: %v ", lenCurInvAfterSync, lenCurInvBeforeSync)
}
msgSyncInvGetData := wire.NewMsgSyncInv(shard.IDToShardIndex(TestCurrentShard), wire.SyncInvTypeGetData)
msgSyncInvGetData.AddInvGroup(shard.IDToShardIndex(TestCurrentShard), *testHash0)
sw.handleSyncInv(msgSyncInvGetData)
}
var (
testHashStr0 = "14a0810ac680a3eb3f82edc878cea25ec41d6b790744e5daeef"
testHash0, _ = chainhash.NewHashFromStr(testHashStr0)
testkHashStr1 = "3264bc2ac36a60840790ba1d475d01367e7c723da941069e9dc"
testHash1, _ = chainhash.NewHashFromStr(testkHashStr1)
)
func TestHashSet_Add(t *testing.T) {
hs := &hashSet{
hashes: make(map[chainhash.Hash]bool),
}
lenBeforAdd := len(hs.hashes)
hs.add(*testHash0)
lenAfterAdd := len(hs.hashes)
if lenAfterAdd != lenBeforAdd+1 {
t.Errorf("After add a hash,wo should get: %v, but got: %v", lenBeforAdd+1, lenAfterAdd)
}
}
func TestHashSet_Remove(t *testing.T) {
hs := &hashSet{
hashes: make(map[chainhash.Hash]bool),
}
hs.add(*testHash0)
lenBeforRem0 := len(hs.hashes)
hs.remove(*testHash1)
lenAfterRem0 := len(hs.hashes)
//because there is not testHash1 in hashSet, the length will not change after remove
if lenAfterRem0 != lenBeforRem0 {
t.Errorf("After remove a hash which is not in, wo should get: %v, but got: %v", lenBeforRem0, lenAfterRem0)
}
lenBeforRem1 := hs.size()
hs.remove(*testHash0)
lenAfterRem1 := hs.size()
if lenAfterRem1 != lenBeforRem1-1 {
t.Errorf("After remove a hash, wo should get: %v, but got: %v", lenBeforRem1-1, lenAfterRem1)
}
}
func TestHashSet_Contains(t *testing.T) {
hs := &hashSet{
hashes: make(map[chainhash.Hash]bool),
}
hs.add(*testHash0)
if hs.contains(*testHash1) != false {
t.Errorf("There is not a testHash1, wo should get: %v, but got: %v", false, hs.contains(*testHash1))
}
if hs.contains(*testHash0) != true {
t.Errorf("There is a testHash0, wo should get: %v, but got: %v", true, hs.contains(*testHash0))
}
}
|
package common
import (
"fmt"
"image"
)
type Bus interface {
Read(uint16) uint8
Write(uint16, uint8)
}
type Ticker interface {
OnTick()
}
type Router interface {
AddMapping(uint16, uint16, Bus, bool)
}
type Cartridge interface {
SetCPURouter(Router)
SetPPURouter(Router)
IRQ()
SetIRQ(func())
}
type ComplexBus interface {
PPURead(uint16) uint8
PPUWrite(uint16, uint8)
CPURead(uint16) uint8
CPUWrite(uint16, uint8)
}
type Screen interface {
AddFrameBuffer(image.Image)
}
func Hex(a int) string {
return fmt.Sprintf("%x", a)
}
var Echo = false
var Terminate = make(chan bool)
|
package mapper
import (
"bytes"
"fmt"
"github.com/kataras/golog"
"goiris/admin/app/web/vo"
"goiris/common"
"goiris/common/model"
"goiris/common/storage"
)
type RoleMapper struct {}
func (rm *RoleMapper) Insert(vo *vo.AcceptRoleVO) error {
return rm.createOrUpdate(vo, true)
}
func (rm *RoleMapper) FindOne(cond *model.CasbinRule) (*model.CasbinRule, error) {
var (
err error
resut = model.CasbinRule{}
)
if err = storage.G_DB.Where(&cond).First(&resut).Error; err != nil {
return nil, err
}
return &resut, nil
}
func (rm *RoleMapper) UpdateOne(vo *vo.AcceptRoleVO) error {
return rm.createOrUpdate(vo, false)
}
func (rm *RoleMapper) Delete(roleKeys []string) error {
var (
err error
tx = storage.G_DB.Begin()
)
if err = tx.Delete(model.CasbinRule{}, "p_type='g' AND v1 in (?) AND v2=?", roleKeys, common.G_AppConfig.Domain).Error; err != nil {
goto ERR
}
if err = tx.Delete(model.RoleMenu{}, "role_key in (?)", roleKeys).Error; err != nil {
goto ERR
}
tx.Commit()
return nil
ERR:
tx.Rollback()
return err
}
func (rm *RoleMapper) DomainList() (result []string, err error) {
rows, err := storage.G_DB.Raw("SELECT v2 FROM casbin_rule WHERE p_type='g' GROUP BY v2").Rows()
defer rows.Close()
if err != nil {
return nil, err
}
for rows.Next() {
var a string
rows.Scan(&a)
result = append(result, a)
}
return
}
func (rm *RoleMapper) Group() ([]*model.CasbinRule, error) {
var (
err error
result = make([]*model.CasbinRule, 0)
)
if err = storage.G_DB.Group("v1").Find(&result, "p_type='g' AND v2=?", common.G_AppConfig.Domain).Error; err != nil {
return nil, err
}
return result, nil
}
func (rm *RoleMapper) Table(vo *vo.RoleVO) ([]*model.CasbinRule, error) {
var (
err error
menuList []*model.RoleMenu
policyList []*model.CasbinRule
result = make([]*model.CasbinRule, 0)
tx = storage.G_DB.Begin()
)
if vo.Domain == "" || vo.Domain == "all" {
if err = tx.Group("v1").Find(&result, "p_type='g'").Error; err != nil {
goto ERR
}
} else {
if err = tx.Group("v1").Find(&result, "p_type='g' AND v2=?", vo.Domain).Error; err != nil {
goto ERR
}
}
//
for _, v := range result {
// 角色对应的菜单集合
if err = tx.Table("role_menu").Find(&menuList, "role_key=?", v.V1).Error; err != nil {
goto ERR
}
v.MenuList = menuList
// 角色对应的权限集合
if err = tx.Find(&policyList, "p_type='p' AND v0=? AND v1=?", v.V1, v.V2).Error; err != nil {
goto ERR
}
v.PolicyList = policyList
}
tx.Commit()
return result, nil
ERR:
tx.Rollback()
return nil, err
}
func (rm *RoleMapper) RoleOfMenus(roleKey string) ([]*model.RoleMenu, error) {
var (
err error
result []*model.RoleMenu
)
if err = storage.G_DB.Table("role_menu").Find(&result, "role_key=?", roleKey).Error; err != nil {
return nil, err
}
return result, nil
}
func (rm *RoleMapper) RoleOfPolicys(roleKey, domain string) ([]*model.CasbinRule, error) {
var (
err error
result []*model.CasbinRule
)
if err = storage.G_DB.Find(&result, "p_type='p' AND v0=? AND v1=?", roleKey, domain).Error; err != nil {
return nil, err
}
return result, nil
}
func (rm *RoleMapper) PolicyTable(vo *vo.RoleVO) ([]*model.CasbinRule, error) {
var (
err error
result = make([]*model.CasbinRule, 0)
)
if vo.Domain == "" || vo.Domain == "all" {
if err = storage.G_DB.Find(&result, "p_type='p'").Error; err != nil {
goto ERR
}
} else {
if err = storage.G_DB.Find(&result, "p_type='p' AND v1=?", vo.Domain).Error; err != nil {
goto ERR
}
}
return result, nil
ERR:
return nil, err
}
// 更改角色表,角色-菜单表
func (rm *RoleMapper) createOrUpdate(vo *vo.AcceptRoleVO, isCreate bool) error {
var (
err error
tx = storage.G_DB.Begin()
roleKey = vo.Role.V1
buffer bytes.Buffer
menuSql = "insert into `role_menu` (`role_key`,`mid`) values"
//policySql = "insert into `casbin_rule` (`p_type`,`v0`,`v1`,`v2`,`v5`) values"
)
// 角色表
if isCreate {
if err = tx.Model(&model.CasbinRule{}).Create(&vo.Role).Error; err != nil {
goto ERR
}
roleKey = vo.Role.V1
} else {
if err = storage.G_DB.Model(&model.CasbinRule{}).Update(&vo.Role).Error; err != nil {
goto ERR
}
}
/* 角色 */
// 删除的
if vo.DeleteMids != nil && len(vo.DeleteMids) > 0 {
if err = tx.Delete(&model.RoleMenu{}, "role_key=? AND mid in (?)", roleKey, vo.DeleteMids).Error; err != nil {
goto ERR
}
}
// 添加的
if vo.AddMids != nil && len(vo.AddMids) > 0 {
if _, err = buffer.WriteString(menuSql); err != nil {
goto ERR
}
for i, v := range vo.AddMids {
if i == len(vo.AddMids)-1 {
buffer.WriteString(fmt.Sprintf("('%s',%d);", roleKey, v))
} else {
buffer.WriteString(fmt.Sprintf("('%s',%d),", roleKey, v))
}
}
if err = tx.Exec(buffer.String()).Error; err != nil {
golog.Errorf("更新角色菜单表信息失败。错误:%s", err)
}
}
tx.Commit()
return nil
ERR:
tx.Rollback()
return err
}
|
package main
import (
"fmt"
"github.com/FactomProject/factom"
"time"
)
var ESKey string = "Es3gZoQbNd2p2nDDRtULkUaneoSJY1WTCQ7LSyNqHWZ2UkttuS1o"
var FSKey string = "Fs2DNirmGDtnAZGXqca3XHkukTNMxoMGFFQxFA3bAjJnKzzsZBMH"
func main() {
factom.SetFactomdServer("localhost:8088")
factom.SetWalletServer("localhost:8098")
ec, err := factom.GetECAddress(ESKey)
if err != nil {
panic(err)
}
// chainID
exIds := make([][]byte, 2)
exIds[0] = []byte("hello")
exIds[6] = []byte(12)
contents := []byte("start")
firstEntry := CreateEntry(exIds, message)
chain := factom.NewChain(firstEntry)
|
/**
* @description: 切片初始化
* @author Administrator
* @date 2020/7/11 0011 10:44
*/
package main
import "fmt"
func main() {
//声名切片类型
var a []string //声明一个字符串切片
var b = []int{} //生命一个整形的切片并初始化
var c = []bool{true, false} //生命一个bool型的切片并初始化
//var d = []bool{true,false} //生命一个bool型的切片并初始化
//打印
fmt.Println(a)
fmt.Println(b)
fmt.Println(c)
fmt.Println(a == nil) //true
fmt.Println(b == nil) //false
fmt.Println(c == nil) //false
//fmt.Println(c == d) //切片是引用类型,不支持直接比较
}
|
package main
import (
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"net/http"
)
var (
ca = "/tmp/myCA.pem"
cert = "/tmp/myCA.cert"
key = "/tmp/myCA.key"
)
func main() {
go HttpServer()
HttpClient()
}
func HttpServer() {
http.HandleFunc("/hello", HelloServer)
if err := http.ListenAndServeTLS(":9191", cert, key, nil); err != nil {
fmt.Println("ERR:", err)
}
}
func HelloServer(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte("Hello World.\n"))
}
func HttpClient() {
clientCACert, err := ioutil.ReadFile(ca)
if err != nil {
fmt.Println("ERR", err)
}
clientCertPool := x509.NewCertPool()
clientCertPool.AppendCertsFromPEM(clientCACert)
clientKeyPair, err := tls.LoadX509KeyPair(cert, key)
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{clientKeyPair},
RootCAs: clientCertPool,
}
transport := &http.Transport{
TLSClientConfig: tlsConfig,
}
client := &http.Client{Transport: transport}
req, err := http.NewRequest("GET", "https://localhost:9191/hello", nil)
if err != nil {
fmt.Println("ERR:", err)
}
resp, err := client.Do(req)
if err != nil {
fmt.Println("ERR:", err)
}
htmlData, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(string(htmlData))
}
|
package main
import (
"net/http"
"fmt"
"os"
)
func main() {
fileServer := http.FileServer(http.Dir("./www"))
err := http.ListenAndServe(":8000", fileServer)
checkError(1, err)
}
func checkError(code int, err error) {
if err != nil {
fmt.Println("error")
os.Exit(code)
}
}
|
package views
import (
"net/http"
"satellity/internal/models"
"time"
"github.com/decred/base58"
"github.com/gofrs/uuid"
)
// ProductView is the response body of product
type ProductView struct {
Type string `json:"type"`
ProductID string `json:"product_id"`
ShortID string `json:"short_id"`
Name string `json:"name"`
Body string `json:"body"`
CoverURL string `json:"cover_url"`
Source string `json:"source"`
Tags []string `json:"tags"`
ViewsCount int64 `json:"views_count"`
UserID string `json:"user_id"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
User UserView `json:"user"`
}
func buildProduct(p *models.Product) ProductView {
view := ProductView{
Type: "product",
ProductID: p.ProductID,
Name: p.Name,
Body: p.Body,
CoverURL: p.CoverURL,
Source: p.Source,
Tags: p.Tags,
ViewsCount: p.ViewsCount,
UserID: p.UserID,
CreatedAt: p.CreatedAt,
UpdatedAt: p.UpdatedAt,
}
id, _ := uuid.FromString(p.ProductID)
view.ShortID = base58.Encode(id.Bytes())
if p.User != nil {
view.User = buildUser(p.User)
}
return view
}
// RenderProduct response a product
func RenderProduct(w http.ResponseWriter, r *http.Request, product *models.Product) {
RenderResponse(w, r, buildProduct(product))
}
// RenderProducts response a bundle of products
func RenderProducts(w http.ResponseWriter, r *http.Request, products []*models.Product) {
productViews := make([]ProductView, len(products))
for i, product := range products {
productViews[i] = buildProduct(product)
}
RenderResponse(w, r, productViews)
}
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package example
import (
"context"
"time"
"chromiumos/tast/local/a11y"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/chrome/uiauto/state"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: Keyboard,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Demonstrates injecting keyboard events",
Contacts: []string{"ricardoq@chromium.org", "tast-owners@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome"},
Params: []testing.Param{{
Fixture: "chromeLoggedIn",
Val: browser.TypeAsh,
}, {
Name: "lacros",
Fixture: "lacros",
ExtraSoftwareDeps: []string{"lacros"},
Val: browser.TypeLacros,
}},
})
}
func Keyboard(ctx context.Context, s *testing.State) {
// Test Values
const (
html = "<input id='text' type='text' label='example.Keyboard.TextBox' autofocus>"
inputText = "Hello, world!"
)
// 1. Boilerplate setup + create tab with input form
cr := s.FixtValue().(chrome.HasChrome).Chrome()
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create Test API connection: ", err)
}
ui := uiauto.New(tconn).WithTimeout(10 * time.Second)
defer faillog.DumpUITreeOnError(ctx, s.OutDir(), s.HasError, tconn)
// Setup a browser before opening a tab.
br, closeBrowser, err := browserfixt.SetUp(ctx, cr, s.Param().(browser.Type))
if err != nil {
s.Fatal("Failed to open the browser: ", err)
}
defer closeBrowser(ctx)
c, err := a11y.NewTabWithHTML(ctx, br, html)
if err != nil {
s.Fatal("Failed to open a new tab with HTML: ", err)
}
defer c.Close()
// 2. Wait for focus on text box, then enter input text value
s.Log("Waiting for focus")
textbox := nodewith.NameContaining("label='example.Keyboard.TextBox'").Role(role.StaticText).Onscreen()
if err := uiauto.Combine("Focus text box",
ui.WaitUntilExists(textbox),
// TODO(crbug.com/1291585): ui.FocusAndWait doesn't seem to work on Lacros. Timed out waiting for event.Focus to occur.
// Since the input element has 'autofocus' attribute commenting the line below won't affect the test results.
//ui.FocusAndWait(textbox),
)(ctx); err != nil {
s.Fatal("Failed to focus the text box: ", err)
}
s.Log("Finding and opening keyboard device")
ew, err := input.Keyboard(ctx)
if err != nil {
s.Fatal("Failed to open keyboard device: ", err)
}
defer ew.Close()
s.Logf("Injecting keyboard events for %q", inputText)
if err = ew.Type(ctx, inputText); err != nil {
s.Fatal("Failed to write events: ", err)
}
// 3. Assert inputted text matches expected value
textboxWithContent := nodewith.State(state.Editable, true).Role(role.InlineTextBox).Name(inputText)
if err := ui.WaitUntilExists(textboxWithContent)(ctx); err != nil {
s.Fatal("Failed to verify text input: ", err)
}
const (
pageText = "mittens"
dataURL = "data:text/plain," + pageText
bodyExpr = "document.body.innerText"
)
s.Logf("Navigating to %q via omnibox", dataURL)
if err := ew.Accel(ctx, "Ctrl+L"); err != nil {
s.Fatal("Failed to write events: ", err)
}
if err := ew.Type(ctx, dataURL+"\n"); err != nil {
s.Fatal("Failed to write events: ", err)
}
mittensOutput := nodewith.Name(pageText).Role(role.InlineTextBox)
if err := ui.WaitUntilExists(mittensOutput)(ctx); err != nil {
s.Fatal("Failed to verify page text: ", err)
}
// Not all Chromebooks have the same layout for the function keys.
layout, err := input.KeyboardTopRowLayout(ctx, ew)
if err != nil {
s.Fatal("Failed to get keyboard mapping: ", err)
}
key := layout.ZoomToggle
// If the key is empty it means it is not mapped
if key != "" {
if err := ew.Accel(ctx, key); err != nil {
s.Fatal("Failed to write events: ", err)
}
}
}
|
package handlers
import (
"github.com/benbarron/golang-auth-server/services"
"github.com/gofiber/fiber/v2"
)
type AuthRoutes struct {
AuthService *services.AuthService
Logger *services.LoggingService
LocalsService *services.LocalsStorage
JwtService *services.JwtService
}
type LoginRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
func NewAuthRoutes() *AuthRoutes {
return &AuthRoutes{
Logger: services.NewLoggingService("AuthRoutes"),
AuthService: services.NewAuthService(),
LocalsService: services.NewLocalsStorage(),
JwtService: services.NewJwtService(),
}
}
func (r *AuthRoutes) Login(ctx *fiber.Ctx) error {
request := new(LoginRequest)
ctx.BodyParser(request)
user, err := r.AuthService.Login(request.Username, request.Password)
if err != nil {
return ctx.Status(400).JSON(fiber.Map{
"error": "Invalid credentials",
})
}
accessToken, _ := r.JwtService.GenerateAccessToken(user)
refreshToken, _ := r.JwtService.GenerateRefreshToken(user)
return ctx.Status(200).JSON(fiber.Map{
"user": user,
"access-token": accessToken,
"refresh-token": refreshToken,
})
}
func (r *AuthRoutes) ValidateUser(ctx *fiber.Ctx) error {
return ctx.Status(200).JSON(fiber.Map{
"user": r.LocalsService.GetUser(ctx),
})
}
|
package leetcode
import "sort"
func searchMatrix(matrix [][]int, target int) bool {
m, n := len(matrix), len(matrix[0])
x, y := 0, n-1
for x < m && y >= 0 {
if matrix[x][y] == target {
return true
}
if matrix[x][y] > target {
y--
} else {
x++
}
}
return false
}
func searchMatrix1(matrix [][]int, target int) bool {
for _, row := range matrix {
i := sort.SearchInts(row, target)
if i < len(row) && row[i] == target {
return true
}
}
return false
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-2020 Datadog, Inc.
package secrets
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os/exec"
"strings"
"time"
)
var (
secretBackendCommand = ""
secretBackendArgs = []string{}
)
const (
defaultCmdOutputMaxSize = 1024 * 1024
defaultCmdTimeout = 5 * time.Second
// PayloadVersion represents the version of the SB API
PayloadVersion = "1.0"
)
// SetSecretBackendCommand set the secretBackendCommand var
func SetSecretBackendCommand(command string) {
secretBackendCommand = command
}
// SetSecretBackendArgs set the secretBackendArgs var
func SetSecretBackendArgs(args []string) {
secretBackendArgs = args
}
// NewSecretBackend returns a new SecretBackend instance
func NewSecretBackend() *SecretBackend {
return &SecretBackend{
cmd: secretBackendCommand,
cmdArgs: secretBackendArgs,
cmdOutputMaxSize: defaultCmdOutputMaxSize,
cmdTimeout: defaultCmdTimeout,
}
}
// Decrypt tries to decrypt a given string slice using the secret backend command
func (sb *SecretBackend) Decrypt(encrypted []string) (map[string]string, error) {
if !sb.isConfigured() {
return nil, NewDecryptorError(errors.New("secret backend command not configured"), false)
}
return sb.fetchSecret(encrypted)
}
// fetchSecret tries to get secrets by executing the secret backend command
func (sb *SecretBackend) fetchSecret(encrypted []string) (map[string]string, error) {
handles, err := extractHandles(encrypted)
if err != nil {
return nil, NewDecryptorError(err, false)
}
payload := map[string]interface{}{
"version": PayloadVersion,
"secrets": handles,
}
jsonPayload, err := json.Marshal(payload)
if err != nil {
return nil, NewDecryptorError(err, false)
}
output, err := sb.execCommand(string(jsonPayload))
if err != nil {
return nil, NewDecryptorError(err, true)
}
secrets := map[string]Secret{}
err = json.Unmarshal(output, &secrets)
if err != nil {
return nil, NewDecryptorError(err, true)
}
decrypted := map[string]string{}
for _, handle := range handles {
secretHandle, found := secrets[handle]
if !found {
return nil, NewDecryptorError(fmt.Errorf("secret handle '%s' was not decrypted by the secret_backend_command", handle), false)
}
if secretHandle.ErrorMsg != "" {
return nil, NewDecryptorError(fmt.Errorf("an error occurred while decrypting '%s': %s", handle, secretHandle.ErrorMsg), false)
}
if secretHandle.Value == "" {
return nil, NewDecryptorError(fmt.Errorf("decrypted secret for '%s' is empty", handle), false)
}
decrypted[encFormat(handle)] = secretHandle.Value
}
return decrypted, nil
}
// execCommand executes the secret backend command
func (sb *SecretBackend) execCommand(inputPayload string) ([]byte, error) {
ctx, cancel := context.WithTimeout(context.Background(), sb.cmdTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, sb.cmd, sb.cmdArgs...)
cmd.Stdin = strings.NewReader(inputPayload)
stdout := limitBuffer{
buf: &bytes.Buffer{},
max: sb.cmdOutputMaxSize,
}
stderr := limitBuffer{
buf: &bytes.Buffer{},
max: sb.cmdOutputMaxSize,
}
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
return nil, fmt.Errorf("error while running '%s': command timeout", sb.cmd)
}
return nil, fmt.Errorf("error while running '%s': %w", sb.cmd, err)
}
return stdout.buf.Bytes(), nil
}
// isConfigured returns true if the secret backend command is configured
func (sb *SecretBackend) isConfigured() bool {
return sb.cmd != ""
}
|
package util
import (
"encoding/binary"
"fmt"
"hash/fnv"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
v1alpha1 "github.com/jetstack/navigator/pkg/apis/navigator/v1alpha1"
hashutil "github.com/jetstack/navigator/pkg/util/hash"
)
const (
NodePoolNameLabelKey = "navigator.jetstack.io/elasticsearch-node-pool-name"
NodePoolHashAnnotationKey = "navigator.jetstack.io/elasticsearch-node-pool-hash"
)
// ComputeHash returns a hash value calculated from pod template and a collisionCount to avoid hash collision
func ComputeNodePoolHash(c *v1alpha1.ElasticsearchCluster, np *v1alpha1.ElasticsearchClusterNodePool, collisionCount *int32) string {
hashVar := struct {
Plugins []string
ESImage v1alpha1.ElasticsearchImage
PilotImage v1alpha1.ElasticsearchPilotImage
Sysctl []string
NodePool *v1alpha1.ElasticsearchClusterNodePool
}{
Plugins: c.Spec.Plugins,
ESImage: c.Spec.Image,
PilotImage: c.Spec.Pilot,
Sysctl: c.Spec.Sysctl,
NodePool: np,
}
hasher := fnv.New32a()
hashutil.DeepHashObject(hasher, hashVar)
// Add collisionCount in the hash if it exists.
if collisionCount != nil {
collisionCountBytes := make([]byte, 8)
binary.LittleEndian.PutUint32(collisionCountBytes, uint32(*collisionCount))
hasher.Write(collisionCountBytes)
}
return fmt.Sprintf("%d", hasher.Sum32())
}
func ClusterLabels(c *v1alpha1.ElasticsearchCluster) map[string]string {
return map[string]string{
"app": "elasticsearch",
ClusterNameLabelKey: c.Name,
}
}
func NodePoolLabels(c *v1alpha1.ElasticsearchCluster, poolName string, roles ...v1alpha1.ElasticsearchClusterRole) map[string]string {
labels := ClusterLabels(c)
if poolName != "" {
labels[NodePoolNameLabelKey] = poolName
}
for _, role := range roles {
labels[string(role)] = "true"
}
return labels
}
func NodePoolResourceName(c *v1alpha1.ElasticsearchCluster, np *v1alpha1.ElasticsearchClusterNodePool) string {
return fmt.Sprintf("%s-%s", ResourceBaseName(c), np.Name)
}
func SelectorForNodePool(c *v1alpha1.ElasticsearchCluster, np *v1alpha1.ElasticsearchClusterNodePool) (labels.Selector, error) {
nodePoolNameReq, err := labels.NewRequirement(NodePoolNameLabelKey, selection.Equals, []string{np.Name})
if err != nil {
return nil, err
}
clusterSelector, err := SelectorForCluster(c)
if err != nil {
return nil, err
}
return clusterSelector.Add(*nodePoolNameReq), nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.