text stringlengths 11 4.05M |
|---|
// Sample program to show how only types that can have equality defined on them
// can be a map key.
package main
// user represents someone using the program.
type user struct {
name string
surname string
}
// users defines a set of users.
type users []user
func main() {
// Declare and make a map that uses a slice as the key.
// u := make(map[users]int)
// ./example.go:17:12: invalid map key type users
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !codes
// +build !codes
package test_driver
import (
"math"
)
func isSpace(c byte) bool {
return c == ' ' || c == '\t'
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func myMin(a, b int) int {
if a < b {
return a
}
return b
}
func pow10(x int) int32 {
return int32(math.Pow10(x))
}
func Abs(n int64) int64 {
y := n >> 63
return (n ^ y) - y
}
// uintSizeTable is used as a table to do comparison to get uint length is faster than doing loop on division with 10
var uintSizeTable = [21]uint64{
0, // redundant 0 here, so to make function StrLenOfUint64Fast to count from 1 and return i directly
9, 99, 999, 9999, 99999,
999999, 9999999, 99999999, 999999999, 9999999999,
99999999999, 999999999999, 9999999999999, 99999999999999, 999999999999999,
9999999999999999, 99999999999999999, 999999999999999999, 9999999999999999999,
math.MaxUint64,
} // math.MaxUint64 is 18446744073709551615 and it has 20 digits
// StrLenOfUint64Fast efficiently calculate the string character lengths of an uint64 as input
func StrLenOfUint64Fast(x uint64) int {
for i := 1; ; i++ {
if x <= uintSizeTable[i] {
return i
}
}
}
// StrLenOfInt64Fast efficiently calculate the string character lengths of an int64 as input
func StrLenOfInt64Fast(x int64) int {
size := 0
if x < 0 {
size = 1 // add "-" sign on the length count
}
return size + StrLenOfUint64Fast(uint64(Abs(x)))
}
|
package main
import (
"fmt"
"image"
"image/color"
"image/png"
"os"
)
func main() {
if len(os.Args) < 2 {
fmt.Println("Usage: colors filename")
return
}
file, _ := os.Open(os.Args[1])
image, _ := png.Decode(file)
count, out := countColors(image)
fmt.Printf("%d unique colors\n%d repeat colors, saved in %s\n", count, image.Bounds().Max.X * image.Bounds().Max.Y - count, out)
}
func countColors(in image.Image) (int, string) {
var encoder png.Encoder
encoder.CompressionLevel = png.BestCompression
max := in.Bounds().Max
out := image.NewRGBA(image.Rect(0, 0, max.X, max.Y))
count := 0
var colors = make([]color.Color, max.X * max.Y)
for i, _ := range colors {
x := i % max.X
y := i / max.Y
color := in.At(x, y)
c := count+1
for c >= 0 {
if colors[c] == color {
out.Set(x, y, color)
break
}
c--
}
if (c < 0) { // color didn't exist
colors[count] = color
count++
}
}
filename := "repeats.png"
file, _ := os.Create(filename)
defer file.Close()
encoder.Encode(file, out)
return count, filename
}
|
package main
import (
"context"
"flag"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/jsteenb2/health/internal/health"
"github.com/jsteenb2/health/internal/httpmw"
"github.com/jsteenb2/health/internal/server"
)
func main() {
var (
bindAddr = flag.String("bind", "127.0.0.1:8080", "address http server listens on")
sslEnabled = flag.Bool("ssl", false, "enable ssl")
sslCert = flag.String("sslcert", "", "ssl certification path")
sslKey = flag.String("sslkey", "", "ssl key path")
filePath = flag.String("repopath", "endpoints.gob", "file path to the persist the endpoints to disk")
nukeEndpoints = flag.Bool("nuke", false, "nuke the existing endpoint checks")
)
flag.Parse()
if *nukeEndpoints {
if err := os.Remove(*filePath); err != nil && !os.IsNotExist(err) {
log.Fatal(err)
}
}
healthFileRepo, err := health.NewFileRepository(*filePath)
if err != nil {
log.Fatal(err)
}
healthSVC := health.NewSVC(healthFileRepo)
var api http.Handler
{
// prefix the health handler with /api and use the behavior of http.StripPrefix
// to provide a 404 if the route does not have a prefix of /api
api = http.StripPrefix("/api", health.NewHTTPServer(healthSVC))
api = httpmw.Recover()(api)
api = httpmw.ContentType("application/json")(api)
}
svr := server.New(*bindAddr, api)
log.Println("listening at: ", *bindAddr)
go func(sslEnabled bool, cert, key string) {
if err := svr.Listen(sslEnabled, cert, key); err != nil {
log.Println(err)
}
}(*sslEnabled, *sslCert, *sslKey)
<-systemCtx().Done()
if err := svr.Stop(10 * time.Second); err != nil {
log.Fatal(err)
}
log.Println("server stopped")
}
func systemCtx() context.Context {
ctx, cancel := context.WithCancel(context.Background())
stopChan := make(chan os.Signal, 1)
signal.Notify(stopChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-stopChan
cancel()
}()
return ctx
}
|
package main
import (
"gobot/modules"
"gobot/common"
)
func main() {
bot := common.BOT("irc.quakenet.org:6667", "gobot", "#Dolfik", []string{ "#dolfik", "#Lover.ee" })
bot.RegisterModule(modules.LOGGER(bot))
bot.RegisterModule(modules.NewWeatherModule(bot))
bot.RegisterModule(modules.NewQuoteModule(bot))
bot.RegisterModule(modules.NewFakeModule(bot))
bot.RegisterModule(modules.NewCryptocurrency(bot))
bot.RegisterModule(modules.NewJsEvalModule(bot))
//bot.RegisterModule(modules.NewRandomQuoteModule(bot))
bot.Loop();
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package helm_test
import (
"fmt"
"io"
"net/http"
"reflect"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/golang/mock/gomock"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/release"
"helm.sh/helm/v3/pkg/repo"
"github.com/kubernetes-sigs/minibroker/pkg/helm"
"github.com/kubernetes-sigs/minibroker/pkg/helm/mocks"
"github.com/kubernetes-sigs/minibroker/pkg/log"
nameutilmocks "github.com/kubernetes-sigs/minibroker/pkg/nameutil/mocks"
)
//go:generate mockgen -destination=./mocks/mock_testutil_chart.go -package=mocks github.com/kubernetes-sigs/minibroker/pkg/helm/testutil ChartInstallRunner,ChartUninstallRunner
//go:generate mockgen -destination=./mocks/mock_chart.go -package=mocks github.com/kubernetes-sigs/minibroker/pkg/helm ChartLoader,ChartHelmClientProvider
//go:generate mockgen -destination=./mocks/mock_http.go -package=mocks github.com/kubernetes-sigs/minibroker/pkg/helm HTTPGetter
//go:generate mockgen -destination=./mocks/mock_io.go -package=mocks io ReadCloser
var _ = Describe("Chart", func() {
var ctrl *gomock.Controller
BeforeEach(func() {
ctrl = gomock.NewController(GinkgoT())
})
AfterEach(func() {
ctrl.Finish()
})
Describe("ChartClient", func() {
Describe("NewDefaultChartClient", func() {
It("should return a new ChartClient", func() {
client := helm.NewDefaultChartClient()
Expect(client).NotTo(BeNil())
})
})
Describe("Install", func() {
It("should fail when the chartDef.URLs is empty", func() {
client := helm.NewChartClient(log.NewNoop(), nil, nil, nil)
chartDef := &repo.ChartVersion{
Metadata: &chart.Metadata{Name: "foo"},
URLs: make([]string, 0),
}
release, err := client.Install(chartDef, "", nil)
Expect(err).To(Equal(fmt.Errorf("failed to install chart: missing chart URL for \"foo\"")))
Expect(release).To(BeNil())
})
It("should fail when loading the chart from the chart manager fails", func() {
chartURL := "https://foo/bar.tar.gz"
chartLoader := mocks.NewMockChartLoader(ctrl)
chartLoader.EXPECT().
Load(chartURL).
Return(nil, fmt.Errorf("error from chart loader")).
Times(1)
client := helm.NewChartClient(log.NewNoop(), chartLoader, nil, nil)
chartDef := &repo.ChartVersion{URLs: []string{chartURL}}
release, err := client.Install(chartDef, "", nil)
Expect(err).To(Equal(fmt.Errorf("failed to install chart: error from chart loader")))
Expect(release).To(BeNil())
})
It("should fail when the name generator fails", func() {
chartRequested := &chart.Chart{Metadata: &chart.Metadata{Deprecated: false}}
chartLoader := mocks.NewMockChartLoader(ctrl)
chartLoader.EXPECT().
Load(gomock.Any()).
Return(chartRequested, nil).
Times(1)
nameGenerator := nameutilmocks.NewMockGenerator(ctrl)
nameGenerator.EXPECT().
Generate("foo-").
Return("", fmt.Errorf("error from name generator")).
Times(1)
client := helm.NewChartClient(log.NewNoop(), chartLoader, nameGenerator, nil)
chartDef := &repo.ChartVersion{
Metadata: &chart.Metadata{Name: "foo"},
URLs: []string{"https://foo/bar.tar.gz"},
}
release, err := client.Install(chartDef, "", nil)
Expect(err).To(Equal(fmt.Errorf("failed to install chart: error from name generator")))
Expect(release).To(BeNil())
})
It("should fail when the generated name length exceeds the maximum value", func() {
chartRequested := &chart.Chart{Metadata: &chart.Metadata{Deprecated: false}}
releaseName := strings.Repeat("x", 54)
chartLoader := mocks.NewMockChartLoader(ctrl)
chartLoader.EXPECT().
Load(gomock.Any()).
Return(chartRequested, nil).
Times(1)
nameGenerator := nameutilmocks.NewMockGenerator(ctrl)
nameGenerator.EXPECT().
Generate(gomock.Any()).
Return(releaseName, nil).
Times(1)
client := helm.NewChartClient(log.NewNoop(), chartLoader, nameGenerator, nil)
chartDef := &repo.ChartVersion{
Metadata: &chart.Metadata{Name: "foo"},
URLs: []string{"https://foo/bar.tar.gz"},
}
release, err := client.Install(chartDef, "", nil)
Expect(err).To(Equal(fmt.Errorf("failed to install chart: invalid release name %q: names cannot exceed 53 characters", releaseName)))
Expect(release).To(BeNil())
})
It("should fail when getting the helm installer client fails", func() {
releaseName := "foo-12345"
namespace := "foo-namespace"
chartRequested := &chart.Chart{Metadata: &chart.Metadata{Deprecated: false}}
chartLoader := mocks.NewMockChartLoader(ctrl)
chartLoader.EXPECT().
Load(gomock.Any()).
Return(chartRequested, nil).
Times(1)
nameGenerator := nameutilmocks.NewMockGenerator(ctrl)
nameGenerator.EXPECT().
Generate(gomock.Any()).
Return(releaseName, nil).
Times(1)
chartHelmClientProvider := mocks.NewMockChartHelmClientProvider(ctrl)
chartHelmClientProvider.EXPECT().
ProvideInstaller(releaseName, namespace).
Return(nil, fmt.Errorf("error from client provider")).
Times(1)
client := helm.NewChartClient(log.NewNoop(), chartLoader, nameGenerator, chartHelmClientProvider)
chartDef := &repo.ChartVersion{
Metadata: &chart.Metadata{Name: "foo"},
URLs: []string{"https://foo/bar.tar.gz"},
}
release, err := client.Install(chartDef, namespace, nil)
Expect(err).To(Equal(fmt.Errorf("failed to install chart: error from client provider")))
Expect(release).To(BeNil())
})
It("should fail when running the install client fails", func() {
releaseName := "foo-12345"
namespace := "foo-namespace"
chartRequested := &chart.Chart{Metadata: &chart.Metadata{Deprecated: false}}
values := map[string]interface{}{"bar": "baz"}
chartLoader := mocks.NewMockChartLoader(ctrl)
chartLoader.EXPECT().
Load(gomock.Any()).
Return(chartRequested, nil).
Times(1)
nameGenerator := nameutilmocks.NewMockGenerator(ctrl)
nameGenerator.EXPECT().
Generate(gomock.Any()).
Return(releaseName, nil).
Times(1)
installRunner := mocks.NewMockChartInstallRunner(ctrl)
installRunner.EXPECT().
ChartInstallRunner(chartRequested, values).
Return(nil, fmt.Errorf("error from client install runner")).
Times(1)
chartHelmClientProvider := mocks.NewMockChartHelmClientProvider(ctrl)
chartHelmClientProvider.EXPECT().
ProvideInstaller(releaseName, namespace).
Return(installRunner.ChartInstallRunner, nil).
Times(1)
client := helm.NewChartClient(log.NewNoop(), chartLoader, nameGenerator, chartHelmClientProvider)
chartDef := &repo.ChartVersion{
Metadata: &chart.Metadata{Name: "foo"},
URLs: []string{"https://foo/bar.tar.gz"},
}
release, err := client.Install(chartDef, namespace, values)
Expect(err).To(Equal(fmt.Errorf("failed to install chart: error from client install runner")))
Expect(release).To(BeNil())
})
Describe("Succeeding", func() {
tests := []struct {
title string
deprecated bool
}{
{
title: "should install non-deprecated charts",
deprecated: false,
},
{
title: "should install deprecated charts",
deprecated: true,
},
}
for _, t := range tests {
tt := t
It(tt.title, func() {
releaseName := "foo-12345"
expectedRelease := &release.Release{Name: releaseName}
namespace := "foo-namespace"
chartRequested := &chart.Chart{Metadata: &chart.Metadata{Deprecated: tt.deprecated}}
values := map[string]interface{}{"bar": "baz"}
chartLoader := mocks.NewMockChartLoader(ctrl)
chartLoader.EXPECT().
Load(gomock.Any()).
Return(chartRequested, nil).
Times(1)
nameGenerator := nameutilmocks.NewMockGenerator(ctrl)
nameGenerator.EXPECT().
Generate(gomock.Any()).
Return(releaseName, nil).
Times(1)
installRunner := mocks.NewMockChartInstallRunner(ctrl)
installRunner.EXPECT().
ChartInstallRunner(chartRequested, values).
Return(expectedRelease, nil).
Times(1)
chartHelmClientProvider := mocks.NewMockChartHelmClientProvider(ctrl)
chartHelmClientProvider.EXPECT().
ProvideInstaller(releaseName, namespace).
Return(installRunner.ChartInstallRunner, nil).
Times(1)
client := helm.NewChartClient(log.NewNoop(), chartLoader, nameGenerator, chartHelmClientProvider)
chartDef := &repo.ChartVersion{
Metadata: &chart.Metadata{Name: "foo"},
URLs: []string{"https://foo/bar.tar.gz"},
}
release, err := client.Install(chartDef, namespace, values)
Expect(err).NotTo(HaveOccurred())
Expect(release).To(Equal(expectedRelease))
})
}
})
})
Describe("Uninstall", func() {
It("should fail when getting the helm uninstaller client fails", func() {
releaseName := "foo-12345"
namespace := "foo-namespace"
chartHelmClientProvider := mocks.NewMockChartHelmClientProvider(ctrl)
chartHelmClientProvider.EXPECT().
ProvideUninstaller(namespace).
Return(nil, fmt.Errorf("error from client provider")).
Times(1)
client := helm.NewChartClient(log.NewNoop(), nil, nil, chartHelmClientProvider)
err := client.Uninstall(releaseName, namespace)
Expect(err).To(Equal(fmt.Errorf("failed to uninstall chart: error from client provider")))
})
It("should fail when running the uninstall client fails", func() {
releaseName := "foo-12345"
namespace := "foo-namespace"
uninstallRunner := mocks.NewMockChartUninstallRunner(ctrl)
uninstallRunner.EXPECT().
ChartUninstallRunner(releaseName).
Return(nil, fmt.Errorf("error from client uninstall runner")).
Times(1)
chartHelmClientProvider := mocks.NewMockChartHelmClientProvider(ctrl)
chartHelmClientProvider.EXPECT().
ProvideUninstaller(namespace).
Return(uninstallRunner.ChartUninstallRunner, nil).
Times(1)
client := helm.NewChartClient(log.NewNoop(), nil, nil, chartHelmClientProvider)
err := client.Uninstall(releaseName, namespace)
Expect(err).To(Equal(fmt.Errorf("failed to uninstall chart: error from client uninstall runner")))
})
It("should succeed uninstalling", func() {
releaseName := "foo-12345"
namespace := "foo-namespace"
uninstallRunner := mocks.NewMockChartUninstallRunner(ctrl)
uninstallRunner.EXPECT().
ChartUninstallRunner(releaseName).
Return(&release.UninstallReleaseResponse{}, nil).
Times(1)
chartHelmClientProvider := mocks.NewMockChartHelmClientProvider(ctrl)
chartHelmClientProvider.EXPECT().
ProvideUninstaller(namespace).
Return(uninstallRunner.ChartUninstallRunner, nil).
Times(1)
client := helm.NewChartClient(log.NewNoop(), nil, nil, chartHelmClientProvider)
err := client.Uninstall(releaseName, namespace)
Expect(err).NotTo(HaveOccurred())
})
})
})
Describe("ChartManager", func() {
Describe("NewDefaultChartManager", func() {
It("should satisfy the ChartLoader interface", func() {
var chartManager helm.ChartLoader = helm.NewDefaultChartManager()
Expect(chartManager).NotTo(BeNil())
})
})
Describe("Load", func() {
It("should fail when downloading the chart fails", func() {
chartURL := "https://foo/bar.tar.gz"
httpGetter := mocks.NewMockHTTPGetter(ctrl)
httpGetter.EXPECT().
Get(chartURL).
Return(nil, fmt.Errorf("http error")).
Times(1)
chartManager := helm.NewChartManager(httpGetter, nil)
chart, err := chartManager.Load(chartURL)
Expect(err).To(Equal(fmt.Errorf("failed to load chart: http error")))
Expect(chart).To(BeNil())
})
It("should fail when loading the chart fails", func() {
chartURL := "https://foo/bar.tar.gz"
resBody := mocks.NewMockReadCloser(ctrl)
resBody.EXPECT().
Close().
Times(1)
httpRes := &http.Response{Body: resBody}
httpGetter := mocks.NewMockHTTPGetter(ctrl)
httpGetter.EXPECT().
Get(chartURL).
Return(httpRes, nil).
Times(1)
loadChartArchive := func(body io.Reader) (*chart.Chart, error) {
Expect(body).To(Equal(resBody))
return nil, fmt.Errorf("load chart archive error")
}
chartManager := helm.NewChartManager(httpGetter, loadChartArchive)
chart, err := chartManager.Load(chartURL)
Expect(err).To(Equal(fmt.Errorf("failed to load chart: load chart archive error")))
Expect(chart).To(BeNil())
})
It("should load a chart", func() {
chartURL := "https://foo/bar.tar.gz"
resBody := mocks.NewMockReadCloser(ctrl)
resBody.EXPECT().
Close().
Times(1)
httpRes := &http.Response{Body: resBody}
httpGetter := mocks.NewMockHTTPGetter(ctrl)
httpGetter.EXPECT().
Get(chartURL).
Return(httpRes, nil).
Times(1)
expectedChart := &chart.Chart{
Metadata: &chart.Metadata{
Name: "foo",
},
}
loadChartArchive := func(body io.Reader) (*chart.Chart, error) {
Expect(body).To(Equal(resBody))
return expectedChart, nil
}
chartManager := helm.NewChartManager(httpGetter, loadChartArchive)
chart, err := chartManager.Load(chartURL)
Expect(err).NotTo(HaveOccurred())
Expect(chart).To(Equal(expectedChart))
})
})
})
Describe("ChartHelm", func() {
Describe("NewDefaultChartHelm", func() {
It("should satisfy the ChartHelmClientProvider interface", func() {
var chartHelm helm.ChartHelmClientProvider = helm.NewDefaultChartHelm()
Expect(chartHelm).NotTo(BeNil())
})
})
Describe("ProvideInstaller", func() {
It("should fail when config provider fails", func() {
namespace := "foo-namespace"
configProvider := mocks.NewMockConfigProvider(ctrl)
configProvider.EXPECT().
ConfigProvider(namespace).
Return(nil, fmt.Errorf("error from config provider")).
Times(1)
chartHelm := helm.NewChartHelm(configProvider.ConfigProvider, nil, nil)
installer, err := chartHelm.ProvideInstaller("", namespace)
Expect(err).To(Equal(fmt.Errorf("failed to provide chart installer: error from config provider")))
Expect(installer).To(BeNil())
})
It("should provide an install runner client", func() {
releaseName := "foo-12345"
namespace := "foo-namespace"
cfg := &action.Configuration{}
expectedInstaller := &action.Install{
ReleaseName: releaseName,
Namespace: namespace,
}
configProvider := mocks.NewMockConfigProvider(ctrl)
configProvider.EXPECT().
ConfigProvider(namespace).
Return(cfg, nil)
actionNewInstall := func(arg0 *action.Configuration) *action.Install {
Expect(arg0).To(Equal(cfg))
return expectedInstaller
}
chartHelm := helm.NewChartHelm(configProvider.ConfigProvider, actionNewInstall, nil)
installer, err := chartHelm.ProvideInstaller(releaseName, namespace)
Expect(err).NotTo(HaveOccurred())
Expect(
reflect.ValueOf(installer).Pointer(),
).To(Equal(
reflect.ValueOf(expectedInstaller.Run).Pointer(),
))
})
})
Describe("ProvideUninstaller", func() {
It("should fail when config provider fails", func() {
namespace := "foo-namespace"
configProvider := mocks.NewMockConfigProvider(ctrl)
configProvider.EXPECT().
ConfigProvider(namespace).
Return(nil, fmt.Errorf("error from config provider"))
chartHelm := helm.NewChartHelm(configProvider.ConfigProvider, nil, nil)
uninstaller, err := chartHelm.ProvideUninstaller(namespace)
Expect(err).To(Equal(fmt.Errorf("failed to provide chart uninstaller: error from config provider")))
Expect(uninstaller).To(BeNil())
})
It("should provide an uninstall runner client", func() {
namespace := "foo-namespace"
cfg := &action.Configuration{}
expectedUninstaller := &action.Uninstall{}
configProvider := mocks.NewMockConfigProvider(ctrl)
configProvider.EXPECT().
ConfigProvider(namespace).
Return(cfg, nil)
actionNewUninstall := func(arg0 *action.Configuration) *action.Uninstall {
Expect(arg0).To(Equal(cfg))
return expectedUninstaller
}
chartHelm := helm.NewChartHelm(configProvider.ConfigProvider, nil, actionNewUninstall)
uninstaller, err := chartHelm.ProvideUninstaller(namespace)
Expect(err).NotTo(HaveOccurred())
Expect(
reflect.ValueOf(uninstaller).Pointer(),
).To(Equal(
reflect.ValueOf(expectedUninstaller.Run).Pointer(),
))
})
})
})
})
|
package l64
import (
"testing"
"gotest.tools/assert"
)
func Test_minPathSum(t *testing.T) {
tests := []struct {
name string
want int
input [][]int
}{
{
want: 7,
input: [][]int{
{1, 3, 1},
{1, 5, 1},
{4, 2, 1},
},
},
}
for _, tt := range tests {
output := minPathSum(tt.input)
assert.Equal(t, output, tt.want)
}
}
|
package consul
import (
"log"
)
// Watch listening to the service in Consul
func (client *Client) Watch() <-chan AvailableServers {
if len(client.discoveryConfigs) == 0 {
return nil
}
client.once.Do(func() {
for _, sdConfig := range client.discoveryConfigs {
go func(sdConfig *DiscoveryConfig) {
if err := sdConfig.plan.Run(client.consulAddr); err != nil {
log.Printf("Consul Watch Err: %+v\n", err)
}
}(sdConfig)
}
})
return client.watchChan
}
|
package main
import (
"bufio"
"fmt"
"os"
"strconv"
)
func main() {
//Implicit declaration, hehehe
scanner := bufio.NewScanner(os.Stdin)
fmt.Println("Enter the year of your birth: ")
scanner.Scan()
input, _ := strconv.ParseInt(scanner.Text(), 0, 64)
fmt.Printf("You wil be %d years old at the end of 2020", 2021 - input)
}
|
package main
import (
"bytes"
"crypto/md5"
"encoding/json"
"fmt"
"html/template"
"io"
"log"
"net/http"
"os"
"reflect"
"strconv"
"strings"
"time"
)
type Product struct {
Pid int
Product_name string
Image_url string
Product_Code string
Supplier_Id int
}
type header struct {
Encryption string `json:"encryption"`
Timestamp int64 `json:"timestamp"`
Key string `json:"key"`
Partnercode int `json:"partnercode"`
}
type ProductList []Product
func sayHelloName(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
fmt.Println(r.Form)
fmt.Println("path",r.URL.Path)
fmt.Println("scheme",r.URL.Scheme)
fmt.Println(r.Form["url_long"])
for k,v := range r.Form{
fmt.Println("key",k)
fmt.Println(v)
fmt.Println("val:",strings.Join(v,""))
}
fmt.Fprintf(w,"Hello bruce!")
}
func hotProducts(w http.ResponseWriter, r *http.Request) {
plist := ProductList{
Product{Pid:123,Product_name:"XieZi",Image_url:"http://image.demo.com"},
Product{Pid:124,Product_name:"WaZi",Image_url:"http://image.demo.com"},
Product{Pid:125,Product_name:"YiFu",Image_url:"http://image.demo.com"},
}
jsonBytes, err := json.Marshal(plist)
if err != nil {
fmt.Println(err)
}
fmt.Fprintf(w,string(jsonBytes))
fmt.Println(string(jsonBytes))
}
func login(w http.ResponseWriter, r *http.Request) {
fmt.Println("method:",r.Method)
if r.Method == "GET" {
t, _ := template.ParseFiles("login.gtpl")
log.Println(t.Execute(w,nil))
}else{
r.ParseForm()
r.Form.Set("uid","1234")
fmt.Println(r.Form)
fmt.Println("username:",r.Form["username"])
fmt.Println("password:",r.Form["password"])
fmt.Println("uid:",r.Form.Get("uid"))
}
}
func upload(w http.ResponseWriter,r *http.Request) {
fmt.Println("method:",r.Method)
if r.Method == "GET" {
crutime := time.Now().Unix()
h := md5.New()
io.WriteString(h,strconv.FormatInt(crutime,10))
token := fmt.Sprintf("%x",h.Sum(nil))
t,_ := template.ParseFiles("upload.gtpl")
t.Execute(w,token)
}else {
r.ParseMultipartForm(32 << 20)
file, handler, err := r.FormFile("uploadfile")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
fmt.Fprintf(w,"%v",handler.Header)
f,err := os.OpenFile("./test/"+handler.Filename,os.O_WRONLY|os.O_CREATE,0666)
if err != nil {
fmt.Println(err)
return
}
defer f.Close()
io.Copy(f,file)
}
}
func main() {
var bodyBuf *bytes.Buffer = &bytes.Buffer{}
fmt.Println(reflect.TypeOf(bodyBuf))
var teststring string = "teststring"
fmt.Println(teststring)
fmt.Println(reflect.TypeOf(teststring))
var bytestring []byte = []byte(teststring)
fmt.Println(bytestring)
fmt.Println(reflect.TypeOf(bytestring))
var md5bytes [16]byte = md5.Sum(bytestring)
fmt.Println(md5bytes)
fmt.Println(reflect.TypeOf(md5bytes))
var md5string string = fmt.Sprintf("%x",md5bytes)
fmt.Println(md5string)
fmt.Println(64 << 20)
fmt.Println(32 << 20)
fmt.Println("Start webserver...")
http.HandleFunc("/",sayHelloName)
http.HandleFunc("/hot/products",hotProducts)
http.HandleFunc("/login",login)
http.HandleFunc("/upload",upload)
port := "9090"
fmt.Println("Webserver running")
fmt.Println("Please open http://localhost:"+port)
ptest:=Product{Pid:123,Product_name:"XieZi",Image_url:"http://image.demo.com"}
jsonBytes, erro := json.Marshal(ptest)
if erro != nil {
fmt.Println(erro)
}
fmt.Println(string(jsonBytes))
headerTest := header{
Encryption: "sha",
Timestamp: 1482463793,
Key: "2342874840784a81d4d9e335aaf76260",
Partnercode: 10025,
}
jsons, errs := json.Marshal(headerTest) //转换成JSON返回的是byte[]
if errs != nil {
fmt.Println(errs.Error())
}
fmt.Println(string(jsons)) //byte[]转换成string 输出
err := http.ListenAndServe(":"+port,nil)
if err != nil {
log.Fatal("ListenAndServe: ",err)
}
} |
package main
import (
"github.com/spf13/cobra"
"github.com/alejandroEsc/maas-cli/pkg/cli"
)
func listCmd() *cobra.Command {
mo := &cli.ListOptions{}
cmd := &cobra.Command{
Use: "list",
Short: "list MAAS resources.",
Long: "",
Run: func(cmd *cobra.Command, args []string) {
cmd.Usage()
},
}
fs := cmd.Flags()
bindCommonMAASFlags(&mo.MAASOptions, fs)
cmd.AddCommand(listMachinesCmd())
cmd.AddCommand(listNodesCmd())
return cmd
}
|
package simulation
import (
"bytes"
"fmt"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/types/kv"
"github.com/irisnet/irismod/modules/nft/types"
)
// DecodeStore unmarshals the KVPair's Value to the corresponding gov type
func NewDecodeStore(cdc codec.Marshaler) func(kvA, kvB kv.Pair) string {
return func(kvA, kvB kv.Pair) string {
switch {
case bytes.Equal(kvA.Key[:1], types.PrefixNFT):
var nftA, nftB types.BaseNFT
cdc.MustUnmarshalBinaryBare(kvA.Value, &nftA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &nftB)
return fmt.Sprintf("%v\n%v", nftA, nftB)
case bytes.Equal(kvA.Key[:1], types.PrefixOwners):
idA := types.MustUnMarshalTokenID(cdc, kvA.Value)
idB := types.MustUnMarshalTokenID(cdc, kvB.Value)
return fmt.Sprintf("%v\n%v", idA, idB)
case bytes.Equal(kvA.Key[:1], types.PrefixCollection):
supplyA := types.MustUnMarshalSupply(cdc, kvA.Value)
supplyB := types.MustUnMarshalSupply(cdc, kvB.Value)
return fmt.Sprintf("%d\n%d", supplyA, supplyB)
case bytes.Equal(kvA.Key[:1], types.PrefixDenom):
var denomA, denomB types.Denom
cdc.MustUnmarshalBinaryBare(kvA.Value, &denomA)
cdc.MustUnmarshalBinaryBare(kvB.Value, &denomB)
return fmt.Sprintf("%v\n%v", denomA, denomB)
default:
panic(fmt.Sprintf("invalid %s key prefix %X", types.ModuleName, kvA.Key[:1]))
}
}
}
|
package debugger
import "fmt"
func Example() {
pid, err := FindPidByPs("/home/oneu/ccpos/dist/ccpos")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(pid)
bp := BreakPoint(pid, 0x014d7da8)
bp.Enable()
}
|
package types
import (
// HOFSTADTER_START import
// HOFSTADTER_END import
)
/*
Name: User
About: A user of the blog site
*/
// HOFSTADTER_START start
// HOFSTADTER_END start
func NewUser() *User {
return &User{}
}
func NewAuthBasicUserSignupRequest() *AuthBasicUserSignupRequest {
return &AuthBasicUserSignupRequest{}
}
func NewAuthBasicUserSignupResponse() *AuthBasicUserSignupResponse {
return &AuthBasicUserSignupResponse{}
}
func NewAuthBasicUserLoginRequest() *AuthBasicUserLoginRequest {
return &AuthBasicUserLoginRequest{}
}
func NewAuthBasicUserLoginResponse() *AuthBasicUserLoginResponse {
return &AuthBasicUserLoginResponse{}
}
// HOFSTADTER_BELOW
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//740. Delete and Earn
//Given an array nums of integers, you can perform operations on the array.
//In each operation, you pick any nums[i] and delete it to earn nums[i] points. After, you must delete every element equal to nums[i] - 1 or nums[i] + 1.
//You start with 0 points. Return the maximum number of points you can earn by applying such operations.
//Example 1:
//Input: nums = [3, 4, 2]
//Output: 6
//Explanation:
//Delete 4 to earn 4 points, consequently 3 is also deleted.
//Then, delete 2 to earn 2 points. 6 total points are earned.
//Example 2:
//Input: nums = [2, 2, 3, 3, 3, 4]
//Output: 9
//Explanation:
//Delete 3 to earn 3 points, deleting both 2's and the 4.
//Then, delete 3 again to earn 3 points, and 3 again to earn 3 points.
//9 total points are earned.
//Note:
//The length of nums is at most 20000.
//Each element nums[i] is an integer in the range [1, 10000].
//func deleteAndEarn(nums []int) int {
//}
// Time Is Money |
package helpers
import (
"bufio"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/url"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/evanxg852000/eserveless/internal/database"
"github.com/mholt/archiver/v3"
// "github.com/docker/docker/pkg/archive"
"github.com/go-git/go-git/v5"
"github.com/phayes/freeport"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
// CloneProjectRepo clones the repository from a git repo url
func CloneProjectRepo(directory string, url string) (string, error) {
rep, err := git.PlainClone(directory, false, &git.CloneOptions{
URL: url,
RecurseSubmodules: git.DefaultSubmoduleRecursionDepth,
})
if err != nil {
return "", err
}
// retrieve the the branch pointed by HEAD
ref, err := rep.Head()
if err != nil {
return "", err
}
return ref.Hash().String(), nil
}
// ReadProjectManifest reads and decoded the project eserveless
// configursation manifest
func ReadProjectManifest(directory string) (*database.Manifest, error) {
manifestFile := path.Join(directory, ".eserveless.yaml")
content, err := ioutil.ReadFile(manifestFile)
if err != nil {
return nil, err
}
manifest := database.Manifest{}
err = yaml.Unmarshal(content, &manifest)
if err != nil {
return nil, err
}
return &manifest, nil
}
// PrepareDockerImage will prepare a fresh copy of the repository
// and create all necessary files from templates in order to build a docker image
func PrepareDockerImage(resDir string, repoDir string, f *database.Function) (string, error) {
//create a temp directory to hold the repo
buildDir, err := ioutil.TempDir("", "docker-builds-")
if err != nil {
return "", err
}
// make copy of the repo
err = CopyDir(repoDir, buildDir)
if err != nil {
return "", err
}
//create main file
mainCode, err := ioutil.ReadFile(path.Join(resDir, f.GetCodeTemplateFileName()))
if err != nil {
return "", err
}
mainCode = []byte(strings.ReplaceAll(string(mainCode), "{%functionName%}", f.Name))
err = ioutil.WriteFile(
path.Join(buildDir, f.GetCodeFileName()),
mainCode,
0644,
)
if err != nil {
return "", err
}
//copy docker file
err = CopyFile(
path.Join(resDir, f.GetDockerTemplateFileName()),
path.Join(buildDir, f.GetDockerFileName()),
)
if err != nil {
return "", err
}
return buildDir, nil
}
// GetDockerBuildContext creates a tarball to Docker client SDK as io.Reader
func GetDockerBuildContext(src, dest string) (*os.File, error) {
var files []string
err := filepath.Walk(src, func(path string, info os.FileInfo, err error) error {
if info.IsDir() || strings.Contains(path, ".git") {
return nil
}
files = append(files, path)
return nil
})
if err != nil {
return nil, err
}
// archive format is determined by file extension
err = archiver.Archive(files, dest)
if err != nil {
return nil, err
}
f, err := os.Open(dest)
if err != nil {
return nil, err
}
return f, nil
}
func BuildDockerImage(srcDir string, f *database.Function) (string, error) {
buildCtx, err := GetDockerBuildContext(srcDir, path.Join(srcDir, "buildCtx.tar"))
if err != nil {
return "", err
}
defer buildCtx.Close()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return "", err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(300)*time.Second)
defer cancel()
buildOptions := types.ImageBuildOptions{
SuppressOutput: false,
PullParent: true,
Dockerfile: "Dockerfile",
Tags: []string{f.Image},
NoCache: true,
Remove: true,
}
buildResponse, err := cli.ImageBuild(ctx, buildCtx, buildOptions)
if err != nil {
return "", err
}
defer buildResponse.Body.Close()
reader := bufio.NewReader(buildResponse.Body)
logs := ""
for {
line, _, err := reader.ReadLine()
if err != nil && err == io.EOF {
break
} else if err != nil {
return logs, err
}
data := make(map[string]string)
err = json.Unmarshal(line, &data)
if err == nil {
logs = logs + data["stream"]
}
}
return logs, nil
}
func RunDockerImage(f *database.Function, onRunningCallback func(string)) error {
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
return err
}
//choose port
port, err := freeport.GetFreePort()
if err != nil {
return err
}
hostBinding := nat.PortBinding{
HostIP: "0.0.0.0",
HostPort: strconv.Itoa(port),
}
containerPort, err := nat.NewPort("tcp", "8000")
if err != nil {
return errors.New("Unable to get the port")
}
portBinding := nat.PortMap{containerPort: []nat.PortBinding{hostBinding}}
cont, err := cli.ContainerCreate(ctx, &container.Config{
Image: f.Image,
Tty: true,
}, &container.HostConfig{
PortBindings: portBinding,
}, nil, nil, "")
if err != nil {
return err
}
err = cli.ContainerStart(ctx, cont.ID, types.ContainerStartOptions{})
if err != nil {
return err
}
fmt.Printf("Container %s is started.", cont.ID)
if onRunningCallback != nil {
err = WaitHostPort(
fmt.Sprintf("0.0.0.0:%s", strconv.Itoa(port)),
time.Duration(30)*time.Second,
)
if err != nil {
return errors.New("Could not run the cloud function")
}
// call callback when container is ready
onRunningCallback(fmt.Sprintf("http://0.0.0.0:%s", strconv.Itoa(port)))
}
go func() {
//30 seconds is arbitrary it should run til func timeout
time.Sleep(time.Duration(30) * time.Second)
out, err := cli.ContainerLogs(ctx, cont.ID, types.ContainerLogsOptions{ShowStdout: true})
if err != nil {
logrus.Error(err.Error())
}
defer out.Close()
buf := new(strings.Builder)
_, err = io.Copy(buf, out)
if err != nil && err != io.EOF {
logrus.Error(err.Error())
} else {
logrus.Info(buf.String())
}
cli.ContainerStop(ctx, cont.ID, nil)
if err != nil {
logrus.Error(err.Error())
}
}()
return nil
}
// CopyFile will copy a file
func CopyFile(src, dst string) error {
var err error
var srcfd *os.File
var dstfd *os.File
var srcinfo os.FileInfo
if srcfd, err = os.Open(src); err != nil {
return err
}
defer srcfd.Close()
if dstfd, err = os.Create(dst); err != nil {
return err
}
defer dstfd.Close()
if _, err = io.Copy(dstfd, srcfd); err != nil {
return err
}
if srcinfo, err = os.Stat(src); err != nil {
return err
}
return os.Chmod(dst, srcinfo.Mode())
}
// CopyDir copies a whole directory recursively
func CopyDir(src string, dst string) error {
var err error
var fds []os.FileInfo
var srcinfo os.FileInfo
if srcinfo, err = os.Stat(src); err != nil {
return err
}
if err = os.MkdirAll(dst, srcinfo.Mode()); err != nil {
return err
}
if fds, err = ioutil.ReadDir(src); err != nil {
return err
}
for _, fd := range fds {
srcfp := path.Join(src, fd.Name())
dstfp := path.Join(dst, fd.Name())
if fd.IsDir() {
if err = CopyDir(srcfp, dstfp); err != nil {
fmt.Println(err)
}
} else {
if err = CopyFile(srcfp, dstfp); err != nil {
fmt.Println(err)
}
}
}
return nil
}
// ValidateGithubRepoURL validates and extracts project name from repo Url
func ValidateGithubRepoURL(repoURL string) (string, string, error) {
repoURL = strings.TrimSpace(repoURL)
repoURL = strings.Trim(repoURL, "/")
_, err := url.ParseRequestURI(repoURL)
if err != nil {
return "", "", err
}
parts := strings.Split(repoURL, "/")
if len(parts) != 5 || parts[2] != "github.com" {
return "", "", errors.New("No a valid github url")
}
return repoURL, fmt.Sprintf("%s-%s", parts[3], parts[4]), nil
}
// Wait for server to be available
func WaitHostPort(host string, timeout time.Duration) error {
elapsedTime := time.Duration(0)
for {
conn, err := net.Dial("tcp", host)
if err == nil && conn != nil {
_ = conn.Close()
return nil
}
time.Sleep(time.Duration(500) * time.Millisecond)
elapsedTime += time.Duration(500) * time.Millisecond
if elapsedTime >= timeout {
return errors.New("Connection timeout")
}
}
}
|
package object
import (
"encoding/binary"
"unsafe"
"github.com/tidwall/geojson"
"github.com/tidwall/geojson/geometry"
"github.com/tidwall/tile38/internal/field"
)
type pointObject struct {
base Object
pt geojson.SimplePoint
}
type geoObject struct {
base Object
geo geojson.Object
}
const opoint = 1
const ogeo = 2
type Object struct {
head string // tuple (kind,expires,id)
fields field.List
}
func (o *Object) geo() geojson.Object {
if o != nil {
switch o.head[0] {
case opoint:
return &(*pointObject)(unsafe.Pointer(o)).pt
case ogeo:
return (*geoObject)(unsafe.Pointer(o)).geo
}
}
return nil
}
// uvarint is a slightly modified version of binary.Uvarint, and it's a little
// faster. But it lacks overflow checks which are not needed for our use.
func uvarint(s string) (uint64, int) {
var x uint64
for i := 0; i < len(s); i++ {
b := s[i]
if b < 0x80 {
return x | uint64(b)<<(i*7), i + 1
}
x |= uint64(b&0x7f) << (i * 7)
}
return 0, 0
}
func varint(s string) (int64, int) {
ux, n := uvarint(s)
x := int64(ux >> 1)
if ux&1 != 0 {
x = ^x
}
return x, n
}
func (o *Object) ID() string {
if o.head[1] == 0 {
return o.head[2:]
}
_, n := varint(o.head[1:])
return o.head[1+n:]
}
func (o *Object) Fields() field.List {
return o.fields
}
func (o *Object) Expires() int64 {
ex, _ := varint(o.head[1:])
return ex
}
func (o *Object) Rect() geometry.Rect {
ogeo := o.geo()
if ogeo == nil {
return geometry.Rect{}
}
return ogeo.Rect()
}
func (o *Object) Geo() geojson.Object {
return o.geo()
}
func (o *Object) String() string {
ogeo := o.geo()
if ogeo == nil {
return ""
}
return ogeo.String()
}
func (o *Object) IsSpatial() bool {
_, ok := o.geo().(geojson.Spatial)
return ok
}
func (o *Object) Weight() int {
var weight int
weight += len(o.ID())
ogeo := o.geo()
if ogeo != nil {
if o.IsSpatial() {
weight += ogeo.NumPoints() * 16
} else {
weight += len(ogeo.String())
}
}
weight += o.Fields().Weight()
return weight
}
func makeHead(kind byte, id string, expires int64) string {
var exb [20]byte
exn := 1
if expires != 0 {
exn = binary.PutVarint(exb[:], expires)
}
n := 1 + exn + len(id)
head := make([]byte, n)
head[0] = kind
copy(head[1:], exb[:exn])
copy(head[1+exn:], id)
return *(*string)(unsafe.Pointer(&head))
}
func newPoint(id string, pt geometry.Point, expires int64, fields field.List,
) *Object {
return (*Object)(unsafe.Pointer(&pointObject{
Object{
head: makeHead(opoint, id, expires),
fields: fields,
},
geojson.SimplePoint{Point: pt},
}))
}
func newGeo(id string, geo geojson.Object, expires int64, fields field.List,
) *Object {
return (*Object)(unsafe.Pointer(&geoObject{
Object{
head: makeHead(ogeo, id, expires),
fields: fields,
},
geo,
}))
}
func New(id string, geo geojson.Object, expires int64, fields field.List,
) *Object {
switch p := geo.(type) {
case *geojson.SimplePoint:
return newPoint(id, p.Base(), expires, fields)
case *geojson.Point:
if p.IsSimple() {
return newPoint(id, p.Base(), expires, fields)
}
}
return newGeo(id, geo, expires, fields)
}
|
package main
import (
"fmt"
"html"
"log"
"net/http"
"time"
)
type hijackHandler struct{}
func (hijackHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
hj, ok := w.(http.Hijacker)
if !ok {
http.Error(w, "webserver doesn't support hijacking", http.StatusInternalServerError)
return
}
time.Sleep(1 * time.Second)
conn, bufrw, err := hj.Hijack()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Println("============")
// Don't forget to close the connection:
defer conn.Close()
bufrw.WriteString("Now we're speaking raw TCP. Say hi: ")
bufrw.Flush()
s, err := bufrw.ReadString('\n')
if err != nil {
log.Printf("error reading string: %v", err)
return
}
fmt.Fprintf(bufrw, "You said: %q\nBye.\n", s)
bufrw.Flush()
}
type barHandler struct{}
func (barHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
time.Sleep(3 * time.Second)
fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
// w.Write([]byte("hello world"))
}
func main() {
mux := http.NewServeMux()
mux.Handle("/hijack", hijackHandler{})
mux.Handle("/bar", barHandler{})
s := &http.Server{
Addr: ":8889",
Handler: mux,
ReadTimeout: 10 * time.Second,
WriteTimeout: 100 * time.Second,
MaxHeaderBytes: 1 << 20,
}
log.Fatal(s.ListenAndServe())
}
|
package entities
type BirdVoteResponse struct {
BirdId int `json:"bird_id"`
Votes int `json:"votes"`
Description string `json:"description"`
} |
package shamir_test
import (
"math/rand"
"testing"
"time"
"github.com/renproject/secp256k1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/renproject/shamir"
. "github.com/renproject/shamir/shamirutil"
)
// The key properties of verifiable secret sharing is that it is the same as
// normal secret sharing, except that additional auxiliary information is
// included within and without the shares that allows other parties to verify
// that any shares they receive are correct. Thus to reduce overlap with
// testing for standard Shamir shares, we aim to test the properties that are
// unique to verifiable shares. These are as follows.
//
// 1. Correctness: The shares and commitments produced by the relevant
// function calls should be correct. That is, all thusly created shares should
// constitute a consistent sharing of some secret, and should be found to be
// valid when checking their validity using the auxiliary information.
//
// 2. Soundness: Any shares that are altered after that have been correctly
// constructed by the VSS scheme should be detectable. That is, when checking
// the validity of such a share with the produced auxiliary information, the
// check should fail.
//
// 3. Homomorphic under addition: Pedersen VSS is homomorphic under addition,
// which means that if we have two verifiable shares and their respective
// commitments from two different sharings of respective secrets, we can "add"
// the shares and the commitments such that we end up with a new share and
// commitment. Further, this new share and commitment will form part of a
// valid verifiable sharing of the sum of the two original secrets.
//
// 4. Homomorphic under addition of a constant: Pedersen VSS is also
// homomorphic under addition by some public constant value. We require a
// property analogous to point 3 in this case.
//
// 5. Homomorphic under scaling: Pedersen VSS is also homomorphic under
// scaling by some public constant value. We require a property analogous to
// point 3 in this case.
//
var _ = Describe("Verifiable secret sharing", func() {
rand.Seed(time.Now().UnixNano())
// Pedersen commitment parameter. This curve point needs to be a generator
// of the elliptic curve group. Since the group has prime order, any curve
// point is a generator (expect for the identity), and so we may just one
// at random. Note that in practice it is crucial that no one knows
// log_g(h), that is, no one should know a number x such that h = g^x. For
// testing this obivously does not matter.
h := secp256k1.RandomPoint()
Context("Correctness (1)", func() {
trials := 20
n := 20
var k int
var secret secp256k1.Fn
indices := RandomIndices(n)
vshares := make(VerifiableShares, n)
c := NewCommitmentWithCapacity(n)
Specify("all shares constructed from the VSS scheme should be valid", func() {
for i := 0; i < trials; i++ {
// Create a random sharing.
k = RandRange(1, n)
secret = secp256k1.RandomFn()
err := VShareSecret(&vshares, &c, indices, h, secret, k)
Expect(err).ToNot(HaveOccurred())
// Check that all shares are valid.
for _, share := range vshares {
Expect(IsValid(h, &c, &share)).To(BeTrue())
}
}
})
})
// Tests for the soundness property (2). We want to check that any shares
// that get altered are detected by the checker. There are three ways in
// which a share can be altered:
//
// 1. The index of the share could be changed.
// 2. The value of the share could be changed.
// 3. The decommitment value of the verifiable share could be changed.
Context("Soundness (2)", func() {
trials := 20
n := 20
var k, badInd int
var indices []secp256k1.Fn
var vshares VerifiableShares
var c Commitment
var secret secp256k1.Fn
BeforeEach(func() {
indices = RandomIndices(n)
vshares = make(VerifiableShares, n)
c = NewCommitmentWithCapacity(n)
})
ShareAndCheckWithPerturbed := func(kLower int, perturbShare func(vs *VerifiableShare)) {
for i := 0; i < trials; i++ {
k = RandRange(kLower, n)
secret = secp256k1.RandomFn()
err := VShareSecret(&vshares, &c, indices, h, secret, k)
Expect(err).ToNot(HaveOccurred())
// Change one of the shares to be invalid
badInd = rand.Intn(n)
perturbShare(&vshares[badInd])
for i, share := range vshares {
Expect(IsValid(h, &c, &share)).To(Equal(i != badInd))
}
}
}
Specify("a share with a modified index should be invalid (1)", func() {
// We need to ensure that k is at least 2, otherwise every point on
// the sharing polynomial is the same and changing the index won't
// actually make the share invalid.
ShareAndCheckWithPerturbed(2, PerturbIndex)
})
Specify("a share with a modified value should be invalid (2)", func() {
ShareAndCheckWithPerturbed(1, PerturbValue)
})
Specify("a share with a modified decommitment should be invalid (3)", func() {
ShareAndCheckWithPerturbed(1, PerturbDecommitment)
})
})
// Tests for the Homomorphic addition property (3). This property states
// that if we have two sharings and then add them together (including the
// auxiliary information), we should get a new sharing that is valid and
// corresponds to the sum of the original two secrets. Specifically, we
// want the following to hold after adding two verifiable sharings
// together:
//
// 1. Each summed share should be valid when checked against the new
// "summed" auxiliary information.
// 2. If one of the newly created shares is altered in any way, this share
// should fail the validity check of the new auxiliary information.
// 3. The summed shares should form a consistent sharing of the secret
// that is defined as the sum of the two original secrets.
Context("Homomorphic addition (3)", func() {
trials := 20
n := 20
var k1, k2, kmax int
var indices []secp256k1.Fn
var vshares1, vshares2, vsharesSummed VerifiableShares
var c1, c2, cSummed Commitment
var secret1, secret2, secretSummed secp256k1.Fn
BeforeEach(func() {
indices = RandomIndices(n)
vshares1 = make(VerifiableShares, n)
vshares2 = make(VerifiableShares, n)
vsharesSummed = make(VerifiableShares, n)
c1 = NewCommitmentWithCapacity(n)
c2 = NewCommitmentWithCapacity(n)
cSummed = NewCommitmentWithCapacity(n)
})
CreateShares := func(kLower int) {
k1 = RandRange(kLower, n)
k2 = RandRange(kLower, n)
kmax = Max(k1, k2)
secret1 = secp256k1.RandomFn()
secret2 = secp256k1.RandomFn()
secretSummed.Add(&secret1, &secret2)
_ = VShareSecret(&vshares1, &c1, indices, h, secret1, k1)
_ = VShareSecret(&vshares2, &c2, indices, h, secret2, k2)
// Create the shares for the sum
cSummed.Add(c1, c2)
for i := range vsharesSummed {
vsharesSummed[i].Add(&vshares1[i], &vshares2[i])
}
}
PerturbAndCheck := func(perturb func(vs *VerifiableShare)) {
badInd := rand.Intn(n)
perturb(&vsharesSummed[badInd])
// The shares should be valid
for i, share := range vsharesSummed {
Expect(IsValid(h, &cSummed, &share)).To(Equal(i != badInd))
}
}
Specify("the summed shares should be valid (1)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
// The shares should be valid
for _, share := range vsharesSummed {
Expect(IsValid(h, &cSummed, &share)).To(BeTrue())
}
}
})
// The parts of a share that can be maliciously altered are the:
// 1. Index
// 2. Value
// 3. Decommitment
Specify("a share with an altered index should be detected (2.1)", func() {
for i := 0; i < trials; i++ {
// We need to ensure that k is at least 2, otherwise every
// point on the sharing polynomial is the same and changing the
// index won't actually make the share invalid.
CreateShares(2)
PerturbAndCheck(PerturbIndex)
}
})
Specify("a share with an altered value should be detected (2.2)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
PerturbAndCheck(PerturbValue)
}
})
Specify("a share with an altered decommitment should be detected (2.3)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
PerturbAndCheck(PerturbDecommitment)
}
})
Specify("the resulting secret should be the sum of the original secrets (3)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
sharesSummed := vsharesSummed.Shares()
recon := Open(sharesSummed)
Expect(recon.Eq(&secretSummed)).To(BeTrue())
Expect(VsharesAreConsistent(vsharesSummed, kmax)).To(BeTrue())
}
})
})
// Tests for the Homomorphic addition of a constant property (4). This
// property states that if we have a sharing and then add a public constant
// (also adding the constant to the auxiliary information), we should get a
// new sharing that is valid and corresponds to the sum of the original
// secret and the constant. Specifically, we want the following to hold
// after adding a constant to a sharing:
//
// 1. Each resulting share should be valid when checked against the new
// resulting auxiliary information.
// 2. If one of the newly created shares is altered in any way, this share
// should fail the validity check of the new auxiliary information.
// 3. The resulting shares should form a consistent sharing of the secret
// that is defined as the sum of the original secret and the constant.
Context("Homomorphic addition of constant (4)", func() {
trials := 20
n := 20
var k int
var indices []secp256k1.Fn
var vshares, vsharesSummed VerifiableShares
var c, cSummed Commitment
var secret, constant, secretSummed secp256k1.Fn
BeforeEach(func() {
indices = RandomIndices(n)
vshares = make(VerifiableShares, n)
vsharesSummed = make(VerifiableShares, n)
c = NewCommitmentWithCapacity(n)
cSummed = NewCommitmentWithCapacity(n)
})
CreateShares := func(kLower int) {
k = RandRange(kLower, n)
secret = secp256k1.RandomFn()
constant = secp256k1.RandomFn()
secretSummed.Add(&secret, &constant)
_ = VShareSecret(&vshares, &c, indices, h, secret, k)
// Create the shares for the sum
cSummed.AddConstant(c, &constant)
for i := range vsharesSummed {
vsharesSummed[i].AddConstant(&vshares[i], &constant)
}
}
PerturbAndCheck := func(perturb func(vs *VerifiableShare)) {
badInd := rand.Intn(n)
perturb(&vsharesSummed[badInd])
// The shares should be valid
for i, share := range vsharesSummed {
Expect(IsValid(h, &cSummed, &share)).To(Equal(i != badInd))
}
}
Specify("the summed shares should be valid (1)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
// The shares should be valid
for _, share := range vsharesSummed {
Expect(IsValid(h, &cSummed, &share)).To(BeTrue())
}
}
})
// The parts of a share that can be maliciously altered are the:
// 1. Index
// 2. Value
// 3. Decommitment
Specify("a share with an altered index should be detected (2.1)", func() {
for i := 0; i < trials; i++ {
// We need to ensure that k is at least 2, otherwise every
// point on the sharing polynomial is the same and changing the
// index won't actually make the share invalid.
CreateShares(2)
PerturbAndCheck(PerturbIndex)
}
})
Specify("a share with an altered value should be detected (2.2)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
PerturbAndCheck(PerturbValue)
}
})
Specify("a share with an altered decommitment should be detected (2.3)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
PerturbAndCheck(PerturbDecommitment)
}
})
Specify("the resulting secret should be the sum of the original secret and the constant (3)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
sharesSummed := vsharesSummed.Shares()
recon := Open(sharesSummed)
Expect(recon.Eq(&secretSummed)).To(BeTrue())
Expect(VsharesAreConsistent(vsharesSummed, k)).To(BeTrue())
}
})
})
// Tests for the Homomorphic scaling property (4). This property states
// that if we have a sharing and then scale it by some scalar (including
// the auxiliary information), we should get a new sharing that is valid
// and corresponds to the product of the original two secret and the
// scalar. Specifically, we want the following to hold after scaling a
// verifiable sharing:
//
// 1. Each scaled share should be valid when checked against the new
// "scaled" auxiliary information.
// 2. If one of the newly created shares is altered in any way, this share
// should fail the validity check of the new auxiliary information.
// 3. The scaled shares should form a consistent sharing of the secret
// that is defined as the product of the original secret and the scalar.
Context("Homomorphic scaling (5)", func() {
trials := 20
n := 20
var k int
var indices []secp256k1.Fn
var vshares, vsharesScaled VerifiableShares
var c, cScaled Commitment
var secret, scale, secretScaled secp256k1.Fn
BeforeEach(func() {
indices = RandomIndices(n)
vshares = make(VerifiableShares, n)
vsharesScaled = make(VerifiableShares, n)
c = NewCommitmentWithCapacity(n)
cScaled = NewCommitmentWithCapacity(n)
})
CreateShares := func(kLower int) {
k = RandRange(kLower, n)
secret = secp256k1.RandomFn()
scale = secp256k1.RandomFn()
secretScaled.Mul(&secret, &scale)
_ = VShareSecret(&vshares, &c, indices, h, secret, k)
// Create the scaled shares
cScaled.Scale(c, &scale)
for i := range vsharesScaled {
vsharesScaled[i].Scale(&vshares[i], &scale)
}
}
PerturbAndCheck := func(perturb func(vs *VerifiableShare)) {
badInd := rand.Intn(n)
perturb(&vsharesScaled[badInd])
// The shares should be valid
for i, share := range vsharesScaled {
Expect(IsValid(h, &cScaled, &share)).To(Equal(i != badInd))
}
}
Specify("the scaled shares should be valid (1)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
// The shares should be valid
for _, share := range vsharesScaled {
Expect(IsValid(h, &cScaled, &share)).To(BeTrue())
}
}
})
// The parts of a share that can be maliciously altered are the:
// 1. Index
// 2. Value
// 3. Decommitment
Specify("a share with an altered index should be detected (2.1)", func() {
for i := 0; i < trials; i++ {
// We need to ensure that k is at least 2, otherwise every
// point on the sharing polynomial is the same and changing the
// index won't actually make the share invalid.
CreateShares(2)
PerturbAndCheck(PerturbIndex)
}
})
Specify("a share with an altered value should be detected (2.2)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
PerturbAndCheck(PerturbValue)
}
})
Specify("a share with an altered decommitment should be detected (2.3)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
PerturbAndCheck(PerturbDecommitment)
}
})
Specify("the resulting secret should be the product of the original secret and the scale (3)", func() {
for i := 0; i < trials; i++ {
CreateShares(1)
sharesScaled := vsharesScaled.Shares()
recon := Open(sharesScaled)
Expect(recon.Eq(&secretScaled)).To(BeTrue())
Expect(VsharesAreConsistent(vsharesScaled, k)).To(BeTrue())
}
})
})
//
// Miscellaneous tests
//
Specify("trying to share when k is larger than n should fail", func() {
n := 20
indices := RandomIndices(n)
err := VShareSecret(nil, nil, indices, h, secp256k1.Fn{}, n+1)
Expect(err).To(HaveOccurred())
})
Context("Commitments", func() {
trials := 100
maxK := 10
Specify("should be unequal when they have different lengths", func() {
for i := 0; i < trials; i++ {
k1 := rand.Intn(maxK) + 1
k2 := k1
for k2 == k1 {
k2 = rand.Intn(maxK) + 1
}
com1 := RandomCommitment(k1)
com2 := RandomCommitment(k2)
Expect(com1.Eq(com2)).To(BeFalse())
}
})
Specify("should be unequal when they have different curve points", func() {
for i := 0; i < trials; i++ {
k := rand.Intn(maxK) + 1
com1 := RandomCommitment(k)
com2 := RandomCommitment(k)
Expect(com1.Eq(com2)).To(BeFalse())
}
})
Specify("setting a commitment should make it equal to the argument", func() {
var com2 Commitment
for i := 0; i < trials; i++ {
k := rand.Intn(maxK) + 1
com1 := RandomCommitment(k)
com2.Set(com1)
Expect(com1.Eq(com2)).To(BeTrue())
}
})
Specify("accessing and appending elements should work correctly", func() {
points := make([]secp256k1.Point, maxK)
for i := 0; i < trials; i++ {
k := rand.Intn(maxK) + 1
com := NewCommitmentWithCapacity(k)
for j := 0; j < k; j++ {
points[j] = secp256k1.RandomPoint()
com.Append(points[j])
}
for j := 0; j < k; j++ {
p := com[j]
Expect(p.Eq(&points[j])).To(BeTrue())
}
}
})
It("should return the correct number of curve points", func() {
for i := 0; i < trials; i++ {
k := rand.Intn(maxK) + 1
com := RandomCommitment(k)
Expect(com.Len()).To(Equal(k))
}
})
})
Context("Verifiable shares", func() {
It("should be able to unmarshal into an empty struct", func() {
var bs [VShareSize]byte
share1 := NewVerifiableShare(
NewShare(secp256k1.RandomFn(), secp256k1.RandomFn()),
secp256k1.RandomFn(),
)
share2 := VerifiableShare{}
_, _, _ = share1.Marshal(bs[:], share1.SizeHint())
_, m, err := share2.Unmarshal(bs[:], share1.SizeHint())
Expect(err).ToNot(HaveOccurred())
Expect(m).To(Equal(0))
Expect(share1.Eq(&share2)).To(BeTrue())
})
})
//
// VerifiableShares tests
//
Context("VerifiableShares", func() {
const maxN = 20
const maxLen = 4 + maxN*VShareSize
var bs [maxLen]byte
shares1 := make(VerifiableShares, maxN)
shares2 := make(VerifiableShares, maxN)
RandomiseVerifiableShares := func(shares VerifiableShares) {
for i := range shares {
shares[i] = NewVerifiableShare(
NewShare(
secp256k1.RandomFn(),
secp256k1.RandomFn(),
),
secp256k1.RandomFn(),
)
}
}
VerifiableSharesAreEq := func(shares1, shares2 VerifiableShares) bool {
if len(shares1) != len(shares2) {
return false
}
for i := range shares1 {
if !shares1[i].Eq(&shares2[i]) {
return false
}
}
return true
}
It("should be able to unmarshal into an empty struct", func() {
shares1 = shares1[:maxN]
RandomiseVerifiableShares(shares1)
shares2 = VerifiableShares{}
_, _, _ = shares1.Marshal(bs[:], shares1.SizeHint())
_, m, err := shares2.Unmarshal(bs[:], shares1.SizeHint())
Expect(err).ToNot(HaveOccurred())
Expect(m).To(Equal(0))
Expect(VerifiableSharesAreEq(shares1, shares2)).To(BeTrue())
})
})
Context("Constants", func() {
Specify("VShareSize should have the correct value", func() {
vshare := VerifiableShare{}
Expect(VShareSize).To(Equal(vshare.SizeHint()))
})
})
})
func BenchmarkVSShare(b *testing.B) {
n := 100
k := 33
h := secp256k1.RandomPoint()
indices := RandomIndices(n)
vshares := make(VerifiableShares, n)
c := NewCommitmentWithCapacity(n)
secret := secp256k1.RandomFn()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_ = VShareSecret(&vshares, &c, indices, h, secret, k)
}
}
func BenchmarkVSSVerify(b *testing.B) {
n := 100
k := 33
h := secp256k1.RandomPoint()
indices := RandomIndices(n)
vshares := make(VerifiableShares, n)
c := NewCommitmentWithCapacity(n)
secret := secp256k1.RandomFn()
_ = VShareSecret(&vshares, &c, indices, h, secret, k)
ind := rand.Intn(n)
share := vshares[ind]
b.ResetTimer()
for i := 0; i < b.N; i++ {
IsValid(h, &c, &share)
}
}
|
package main
import (
"encoding/json"
"log"
"net/http"
"time"
"github.com/gorilla/mux"
)
var bookings []Spot
func main() {
r := mux.NewRouter()
// Create Mock data
bookings = append(bookings, Spot{ID: "1", BookedDate: time.Date(2021, 9, 4, 0, 0, 0, 0, time.Now().Location()), BookingStatus: "NB", BookedOn: time.Now(), BookedBy: "123123"})
initializeRoutes(r)
log.Fatal(http.ListenAndServe(":8000", r))
}
func initializeRoutes(r *mux.Router) {
// General / Admin routes
r.HandleFunc("/api/bookings", createBooking).Methods("POST")
// Me / Employee routes
r.HandleFunc("/api/me/bookings", getBookings).Methods("GET")
r.HandleFunc("/api/me/bookings/{id}", getBooking).Methods("GET")
r.HandleFunc("/api/me/bookings", createBooking).Methods("POST")
}
func getBookings(w http.ResponseWriter, r *http.Request) {
respondWithJSON(w, http.StatusOK, bookings)
}
func getBooking(w http.ResponseWriter, r *http.Request) {
if !isAuthorized(*r) {
respondWithError(w, http.StatusUnauthorized, "You are not authorized to book a spot.")
return
}
// Extract the id from the url
vars := mux.Vars(r)
id, ok := vars["id"]
i := findBooking(bookings, id)
if !ok || i == -1 {
respondWithError(w, http.StatusNotFound, "The booking was not found.")
return
}
respondWithJSON(w, http.StatusOK, bookings[i])
}
func createBooking(w http.ResponseWriter, r *http.Request) {
if !isAuthorized(*r) {
respondWithError(w, http.StatusUnauthorized, "You are not authorized to book a spot.")
return
}
const MAX_BOOKINGS = 15
if len(bookings) >= MAX_BOOKINGS {
respondWithError(w, http.StatusServiceUnavailable, "There are no spots available.")
return
}
var newSpot Spot
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(&newSpot); err != nil {
respondWithError(w, http.StatusBadRequest, "Invalid request payload")
return
}
defer r.Body.Close()
bookings = append(bookings, newSpot)
respondWithJSON(w, http.StatusCreated, newSpot)
}
|
package service
import (
"context"
"errors"
"git.dustess.com/mk-base/log"
"git.dustess.com/mk-training/mk-blog-svc/pkg/blog/dao"
"git.dustess.com/mk-training/mk-blog-svc/pkg/blog/model"
statDao "git.dustess.com/mk-training/mk-blog-svc/pkg/blogstatistics/dao"
statModel "git.dustess.com/mk-training/mk-blog-svc/pkg/blogstatistics/model"
tagDao "git.dustess.com/mk-training/mk-blog-svc/pkg/tags/dao"
userDao "git.dustess.com/mk-training/mk-blog-svc/pkg/user/dao"
userModel "git.dustess.com/mk-training/mk-blog-svc/pkg/user/model"
"go.mongodb.org/mongo-driver/bson"
)
// BlogService 博客服务
type BlogService struct {
dao *dao.BlogDao
ctx context.Context
}
// NewBlogService [...]
func NewBlogService(ctx context.Context) *BlogService {
return &BlogService{
dao: dao.NewBlogDao(ctx),
ctx: ctx,
}
}
// CreateBlog 创建博客
func (b *BlogService) CreateBlog(data model.Blog, tags []string) (err error) {
if len(tags) > 0 {
td := tagDao.NewTagDao(b.ctx)
go func() {
_, _ = td.InsertTags(tags)
}()
}
data.Tags = tags
var id string
id, err = b.dao.InsertOne(data)
es := dao.NewEsDao()
data.ID = id
_ = es.CreateIndex(b.ctx, data)
return
}
// FindBlog 查找单条数据
func (b *BlogService) FindBlog(id string) (model.Blog, error) {
return b.dao.FindByID(id)
}
// UpdateBlog 修改博客
func (b *BlogService) UpdateBlog(session userModel.UserSession, id string, blog model.Blog) error {
article, err := b.FindBlog(id)
if err != nil {
return err
}
if article.Author != session.ID {
return errors.New("无权限修改当前用户的文章")
}
if len(blog.Tags) > 0 {
td := tagDao.NewTagDao(b.ctx)
go func() {
_, _ = td.InsertTags(blog.Tags)
}()
}
article.Title = blog.Title
article.Content = blog.Content
article.Tags = blog.Tags
err = b.dao.UpdateBlog(id, article)
es := dao.NewEsDao()
err = es.UpdateIndex(b.ctx, article)
return err
}
// Delete 删除
func (b *BlogService) Delete(session userModel.UserSession, id string) error {
article, err := b.FindBlog(id)
if err != nil {
return err
}
if article.Author != session.ID {
return errors.New("无权限修改当前用户的文章")
}
err = b.dao.DeleteByID(id)
es := dao.NewEsDao()
_ = es.DeleteBlog(b.ctx, id)
return err
}
// Lists 列表
func (b *BlogService) Lists(session userModel.UserSession, offset, limit int64) (resp []model.BlogList, count int64) {
resp = make([]model.BlogList, 0)
filter := bson.M{"author": session.ID}
result, err := b.dao.FindAll(offset, limit, filter)
if err != nil {
return
}
resp = b.HandleLists(result)
count = b.dao.FindCount(filter)
return
}
// HandleLists 处理list
func (b *BlogService) HandleLists(result []model.Blog) (resp []model.BlogList) {
if len(result) < 1 {
return
}
type _st struct {
Pv int64
Uv int64
}
var (
bid = make([]string, len(result))
uids = make(map[string]struct{}, 0)
users []string
stMap = make(map[string]_st)
userMap = make(map[string]userModel.User)
)
for k, v := range result {
resp = append(resp, model.BlogList{
CreatedAt: v.CreatedAt,
Title: v.Title,
ID: v.ID,
Author: v.Author,
Tags: v.Tags,
})
bid[k] = v.ID
uids[v.Author] = struct{}{}
}
stDao := statDao.NewBlogStatDao(b.ctx)
query := stDao.BlogStatQuery(bid, 0, 0, "")
stResult := stDao.AsynStaticsByBlog(query)
for _, v := range stResult {
var temp _st
if _st, ok := stMap[v.BlogID]; ok {
temp = _st
}
if v.Typ == statModel.UV {
temp.Uv =v.View
} else {
temp.Pv = v.View
}
stMap[v.BlogID] = temp
}
for k := range uids {
users = append(users, k)
}
ud := userDao.NewUserDao(b.ctx)
userRe, _ := ud.FindManyByID(users)
for _, v := range userRe {
userMap[v.ID] = v
}
for k, v := range resp {
if st, ok := stMap[v.ID]; ok {
resp[k].Uv = st.Uv
resp[k].Pv = st.Pv
}
if us, ok := userMap[v.Author]; ok {
resp[k].Author = us.Name
}
}
return
}
// AttrUser 添加用户数据
func (b *BlogService) AttrUser(result []model.BlogList)(resp []model.BlogList) {
var (
users = make([]string, len(result))
userMap = make(map[string]userModel.User)
)
for k, v := range result {
users[k] = v.Author
}
ud := userDao.NewUserDao(b.ctx)
userRe, _ := ud.FindManyByID(users)
for _, v := range userRe {
userMap[v.ID] = v
}
for k, v := range result {
if us, ok := userMap[v.Author]; ok {
result[k].Author = us.Name
}
}
return result
}
// Detail 博客详情
func (b *BlogService) Detail(id string) ( model.BlogDetail ,error) {
blog, err := b.FindBlog(id)
if err != nil {
return model.BlogDetail{}, err
}
bl := []model.Blog{ blog }
lists := b.HandleLists(bl)
resp := model.BlogDetail{
BlogList: model.BlogList{
Uv: 0,
Pv: 0,
CreatedAt: blog.CreatedAt,
Title: blog.Title,
ID: blog.ID,
Tags: blog.Tags,
},
Content: blog.Content,
}
if len(lists) > 0 {
resp.BlogList = lists[0]
}
return resp, nil
}
//VisitorLists 访客博客
func (b *BlogService) VisitorLists(search model.ListSearch) ([]model.BlogList, int64 ){
var (
result = make([]model.BlogList, 0)
err error
count int64
)
es := dao.NewEsDao()
result,count, err = es.VisitorBlogSearch(b.ctx, search)
if err != nil {
log.Error("elastic search error: ", err)
return result, 0
}
result = b.AttrUser(result)
return result, count
} |
package gogacap
import (
"testing"
)
import (
"fmt"
)
func sliceEq(a, b []int) bool {
if len(a) != len(b) {
return false
}
for i := 0; i != len(a); i++ {
if a[i] != b[i] {
return false
}
}
return true
}
func sliceLt(a, b []int) bool {
i, j := 0, 0
for i < len(a) && j < len(b) && a[i] == b[j] {
i++
j++
}
al := i < len(a)
bl := j < len(b)
if al && bl {
return a[i] < b[j]
} else {
return !al && bl
}
}
func sliceGt(a, b []int) bool {
return sliceLt(b, a)
}
func sliceCp(a []int) []int {
c := make([]int, len(a))
copy(c, a)
return c
}
func TestSliceF(t *testing.T) {
test := func(f func(a, b []int) bool, a, b []int, r bool) {
if f(a, b) != r {
t.Errorf("slice fail: %v(%v, %v) != %v", f, a, b, r)
}
}
test(sliceEq, []int{}, []int{}, true)
test(sliceEq, []int{}, []int{1}, false)
test(sliceEq, []int{2}, []int{1}, false)
test(sliceEq, []int{2}, []int{}, false)
test(sliceLt, []int{}, []int{}, false)
test(sliceLt, []int{}, []int{1}, true)
test(sliceLt, []int{1}, []int{}, false)
test(sliceLt, []int{1}, []int{1}, false)
test(sliceLt, []int{2}, []int{1}, false)
test(sliceLt, []int{1}, []int{2}, true)
}
func TestReverseInts(t *testing.T) {
test := func(a, b []int) {
reverseInts(a)
if !sliceEq(a, b) {
t.Errorf("result(%v) != %v", a, b)
}
}
test([]int{}, []int{})
test([]int{0}, []int{0})
test([]int{0, 1}, []int{1, 0})
test([]int{1, 1}, []int{1, 1})
test([]int{1, 0}, []int{0, 1})
test([]int{1, 0, 2}, []int{2, 0, 1})
test([]int{1, 0, 2, 0}, []int{0, 2, 0, 1})
test([]int{1, 0, 2, 0, 4}, []int{4, 0, 2, 0, 1})
}
func TestUpperBoundInts(t *testing.T) {
test := func(a []int, x, i int) {
p := upperBoundInts(a, x)
if p != i {
t.Errorf("upperBoundInts(%v, %d) => %d != %d", a, x, p, i)
}
}
test([]int{}, 1, 0)
test([]int{0}, -1, 0)
test([]int{0}, 0, 1)
test([]int{0}, 1, 1)
test([]int{0, 0}, -1, 0)
test([]int{0, 0}, 0, 2)
test([]int{0, 0}, 1, 2)
test([]int{0, 0, 1}, -1, 0)
test([]int{0, 0, 1}, 0, 2)
test([]int{0, 0, 1}, 1, 3)
test([]int{0, 0, 1}, 2, 3)
test([]int{0, 1, 1}, -1, 0)
test([]int{0, 1, 1}, 0, 1)
test([]int{0, 1, 1}, 1, 3)
test([]int{0, 1, 1}, 2, 3)
test([]int{0, 1, 2}, -1, 0)
test([]int{0, 1, 2}, 0, 1)
test([]int{0, 1, 2}, 1, 2)
test([]int{0, 1, 2}, 2, 3)
test([]int{0, 1, 2}, 3, 3)
}
func TestLowerBoundInts(t *testing.T) {
test := func(a []int, x, i int) {
p := lowerBoundInts(a, x)
if p != i {
t.Errorf("lowerBoundInts(%v, %d) => %d != %d", a, x, p, i)
}
}
test([]int{}, 1, 0)
test([]int{0}, -1, 0)
test([]int{0}, 0, 0)
test([]int{0}, 1, 1)
test([]int{0, 0}, -1, 0)
test([]int{0, 0}, 0, 0)
test([]int{0, 0}, 1, 2)
test([]int{0, 0, 1}, -1, 0)
test([]int{0, 0, 1}, 0, 0)
test([]int{0, 0, 1}, 1, 2)
test([]int{0, 0, 1}, 2, 3)
test([]int{0, 1, 1}, -1, 0)
test([]int{0, 1, 1}, 0, 0)
test([]int{0, 1, 1}, 1, 1)
test([]int{0, 1, 1}, 2, 3)
test([]int{0, 1, 2}, -1, 0)
test([]int{0, 1, 2}, 0, 0)
test([]int{0, 1, 2}, 1, 1)
test([]int{0, 1, 2}, 2, 2)
test([]int{0, 1, 2}, 3, 3)
}
func TestInplaceMergeInts(t *testing.T) {
test := func(a, b, c, d []int) {
input := fmt.Sprintf("%v-%v", a, b)
inplaceMergeInts(a, b)
if !sliceEq(a, c) || !sliceEq(b, d) {
t.Errorf("%s => %v-%v != %v-%v", input, a, b, c, d)
}
}
test([]int{}, []int{}, []int{}, []int{})
test([]int{0}, []int{1}, []int{0}, []int{1})
test([]int{1}, []int{0}, []int{0}, []int{1})
test([]int{1, 2}, []int{0}, []int{0, 1}, []int{2})
test([]int{2}, []int{0, 1}, []int{0}, []int{1, 2})
test([]int{0, 1}, []int{1}, []int{0, 1}, []int{1})
test([]int{0, 2}, []int{1}, []int{0, 1}, []int{2})
test([]int{0, 2}, []int{1, 2, 2}, []int{0, 1}, []int{2, 2, 2})
test([]int{0, 2}, []int{1, 2, 3}, []int{0, 1}, []int{2, 2, 3})
test([]int{0, 4}, []int{1, 2, 3}, []int{0, 1}, []int{2, 3, 4})
test([]int{1, 4}, []int{0, 2, 3}, []int{0, 1}, []int{2, 3, 4})
}
func TestRotateShiftRightOneInts(t *testing.T) {
test := func(a, b []int) {
input := fmt.Sprintf("%v", a)
rotateShiftRightOneInts(a)
if !sliceEq(a, b) {
t.Errorf("rotateShiftRightOneInts(%s) => %v != %v", input, a, b)
}
}
test([]int{}, []int{})
test([]int{1}, []int{1})
test([]int{1, 2}, []int{2, 1})
test([]int{1, 2, 3}, []int{3, 1, 2})
}
func TestRotateShiftLeftOneInts(t *testing.T) {
test := func(a, b []int) {
input := fmt.Sprintf("%v", a)
rotateShiftLeftOneInts(a)
if !sliceEq(a, b) {
t.Errorf("rotateShiftLeftOneInts(%s) => %v != %v", input, a, b)
}
}
test([]int{}, []int{})
test([]int{1}, []int{1})
test([]int{1, 2}, []int{2, 1})
test([]int{1, 2, 3}, []int{2, 3, 1})
}
func TestRotateInts(t *testing.T) {
test := func(a []int, c int, b []int) {
d := make([]int, len(a))
copy(d, a)
rotateInts(a, c)
if !sliceEq(a, b) {
t.Errorf("rotateInts(%v/%d) => %v != %v", d, c, a, b)
}
rotateBackInts(a, c)
if !sliceEq(a, d) {
t.Errorf("rotateBackInts(%v/%d) => %v != %v", b, c, a, d)
}
}
test([]int{}, 0, []int{})
test([]int{1}, 0, []int{1})
test([]int{1}, 1, []int{1})
test([]int{1, 2}, 0, []int{1, 2})
test([]int{1, 2}, 1, []int{2, 1})
test([]int{1, 2}, 2, []int{1, 2})
test([]int{1, 2, 3}, 0, []int{1, 2, 3})
test([]int{1, 2, 3}, 1, []int{2, 3, 1})
test([]int{1, 2, 3}, 2, []int{3, 1, 2})
test([]int{1, 2, 3}, 3, []int{1, 2, 3})
}
|
package client
import (
"gorm.io/driver/sqlite"
"gorm.io/gorm"
)
var DB *gorm.DB
func init() {
db_, err := gorm.Open(sqlite.Open(""), &gorm.Config{})
if err != nil {
panic(err)
}
DB = db_
}
|
// Package forkexec provides interface to run a subprocess with seccomp filter, rlimit and
// containerized or ptraced.
//
// unshare cgroup namespace requires kernel >= 4.6
// seccomp, unshare pid / user namespaces requires kernel >= 3.8
// pipe2, dup3 requires kernel >= 2.6.27
package forkexec
|
package wappin
import (
"github.com/jarcoal/httpmock"
"github.com/stretchr/testify/assert"
"testing"
)
func TestSendNotificationHSM(t *testing.T) {
httpmock.ActivateNonDefault(client.GetClient())
defer httpmock.DeactivateAndReset()
mockGetAccessToken()
fixture := `{ "message_id": "id-123", "status": "200", "message": "Success" }`
responder := httpmock.NewStringResponder(200, fixture)
fakeUrl := baseUrl + SEND_HSM_ENDPOINT
httpmock.RegisterResponder("POST", fakeUrl, responder)
config := Config{
ProjectId: "0123",
SecretKey: "cs-key",
ClientKey: "ck-key",
}
sender := New(config)
reqMsg := ReqWaMessage{
ClientId: "123",
ProjectId: "0123",
Type: "template_name",
RecipientNumber: "089891234123",
Params: map[string]string{
"1": "John",
"2": "Depok",
},
}
res, _ := sender.SendMessage(reqMsg)
assert.Equal(t, "id-123", res.MessageId)
assert.Equal(t, "200", res.Status)
assert.Equal(t, "Success", res.Message)
}
func TestFailSendNotificationHSM(t *testing.T) {
httpmock.ActivateNonDefault(client.GetClient())
defer httpmock.DeactivateAndReset()
mockGetAccessToken()
fixture := `{ "message_id": "id-124", "status": "600", "message": "Not delivered, Contact validate Failed" }`
responder := httpmock.NewStringResponder(200, fixture)
fakeUrl := baseUrl + SEND_HSM_ENDPOINT
httpmock.RegisterResponder("POST", fakeUrl, responder)
config := Config{
ProjectId: "0123",
SecretKey: "cs-key",
ClientKey: "ck-key",
}
sender := New(config)
reqMsg := ReqWaMessage{
ClientId: "123",
ProjectId: "0123",
Type: "template_name",
RecipientNumber: "089891234123",
Params: map[string]string{
"1": "John",
"2": "Depok",
},
}
res, _ := sender.SendMessage(reqMsg)
assert.Equal(t, "id-124", res.MessageId)
assert.Equal(t, "600", res.Status)
assert.Equal(t, "Not delivered, Contact validate Failed", res.Message)
}
func TestInvalidRequestFormat(t *testing.T) {
httpmock.ActivateNonDefault(client.GetClient())
defer httpmock.DeactivateAndReset()
mockGetAccessToken()
config := Config{
ProjectId: "0123",
SecretKey: "cs-key",
ClientKey: "ck-key",
}
sender := New(config)
var reqMsg interface{}
_, err := sender.SendMessage(reqMsg)
assert.Equal(t, "invalid request message format", err.Error())
}
func mockGetAccessToken() {
fixture := `{ "status": "200", "message": "Success", "data": { "access_token": "677b800f9b694f98bb9db6edb18336743a3f416cadff1953a59190f309220936", "expired_datetime": "2020-12-28 10:20:23", "token_type": "Bearer" } }`
responder := httpmock.NewStringResponder(200, fixture)
fakeUrl := baseUrl + TOKEN_ENDPOINT
httpmock.RegisterResponder("POST", fakeUrl, responder)
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println(hardestWorker(70, [][]int{{36, 3}, {1, 5}, {12, 8}, {25, 9}, {53, 11}, {29, 12}, {52, 14}}))
}
func hardestWorker(n int, logs [][]int) int {
if len(logs) == 1 {
return logs[0][0]
}
ans := logs[0][1] // 取第一个 耗时
idx := logs[0][0] // 取第一个 id
for i := 1; i < len(logs); i++ {
if d := logs[i][1] - logs[i-1][1]; d > ans || (d == ans && logs[i][0] < idx) {
ans = d
idx = logs[i][0] // 如果找到新的则用新的
}
}
return idx // 最小的 id
}
|
package main
import (
"log"
"os"
"time"
"github.com/faiface/beep"
"github.com/faiface/beep/mp3"
"github.com/faiface/beep/speaker"
)
func main() {
f, err := os.Open("../Lame_Drivers_-_01_-_Frozen_Egg.mp3")
if err != nil {
log.Fatal(err)
}
streamer, format, err := mp3.Decode(f)
if err != nil {
log.Fatal(err)
}
defer streamer.Close()
sr := format.SampleRate * 2
speaker.Init(sr, sr.N(time.Second/10))
resampled := beep.Resample(4, format.SampleRate, sr, streamer)
done := make(chan bool)
speaker.Play(beep.Seq(resampled, beep.Callback(func() {
done <- true
})))
<-done
}
|
//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -o zz_search_test.go ../../../../../vendor/github.com/go-air/gini/inter S
package solver
import (
"context"
"testing"
"github.com/go-air/gini/inter"
"github.com/go-air/gini/z"
"github.com/stretchr/testify/assert"
)
type TestScopeCounter struct {
depth *int
inter.S
}
func (c *TestScopeCounter) Test(dst []z.Lit) (result int, out []z.Lit) {
result, out = c.S.Test(dst)
*c.depth++
return
}
func (c *TestScopeCounter) Untest() (result int) {
result = c.S.Untest()
*c.depth--
return
}
func TestSearch(t *testing.T) {
type tc struct {
Name string
Variables []Variable
TestReturns []int
UntestReturns []int
Result int
Assumptions []Identifier
}
for _, tt := range []tc{
{
Name: "children popped from back of deque when guess popped",
Variables: []Variable{
variable("a", Mandatory(), Dependency("c")),
variable("b", Mandatory()),
variable("c"),
},
TestReturns: []int{0, -1},
UntestReturns: []int{-1, -1},
Result: -1,
Assumptions: nil,
},
{
Name: "candidates exhausted",
Variables: []Variable{
variable("a", Mandatory(), Dependency("x")),
variable("b", Mandatory(), Dependency("y")),
variable("x"),
variable("y"),
},
TestReturns: []int{0, 0, -1, 1},
UntestReturns: []int{0},
Result: 1,
Assumptions: []Identifier{"a", "b", "y"},
},
} {
t.Run(tt.Name, func(t *testing.T) {
assert := assert.New(t)
var s FakeS
for i, result := range tt.TestReturns {
s.TestReturnsOnCall(i, result, nil)
}
for i, result := range tt.UntestReturns {
s.UntestReturnsOnCall(i, result)
}
var depth int
counter := &TestScopeCounter{depth: &depth, S: &s}
lits, err := newLitMapping(tt.Variables)
assert.NoError(err)
h := search{
s: counter,
lits: lits,
tracer: DefaultTracer{},
}
var anchors []z.Lit
for _, id := range h.lits.AnchorIdentifiers() {
anchors = append(anchors, h.lits.LitOf(id))
}
result, ms, _ := h.Do(context.Background(), anchors)
assert.Equal(tt.Result, result)
var ids []Identifier
for _, m := range ms {
ids = append(ids, lits.VariableOf(m).Identifier())
}
assert.Equal(tt.Assumptions, ids)
assert.Equal(0, depth)
})
}
}
|
package main
import (
"context"
"flag"
"fmt"
"log"
"net"
pb "github.com/rifannurmuhammad/go-grpc/movie/proto"
"google.golang.org/grpc"
)
var (
port = flag.Int("port", 6565, "The server port")
)
type movieServer struct {
pb.MovieMessageList
pb.MovieMessage
pb.UnimplementedMovieServiceServer
}
func (s *movieServer) FindAll(ctx context.Context, empty *pb.Empty) (*pb.MovieMessageList, error) {
var movieMessages = []*pb.MovieMessage{
&pb.MovieMessage{
Title: "Captain america",
Author: "Stan Lee",
Isbn: "1234567800",
Category: "comic",
},
&pb.MovieMessage{
Title: "Iron Man",
Author: "Stan Lee",
Isbn: "1234567800",
Category: "comic",
},
}
return &pb.MovieMessageList{Movie: movieMessages}, nil
}
func (s *movieServer) AddMovie(ctx context.Context, MovieMessage *pb.MovieMessage) (*pb.MovieMessage, error) {
fmt.Println("receive add movie")
return MovieMessage, nil
}
func main() {
flag.Parse()
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
log.Println("Starting server localhost:", *port)
server := &movieServer{}
grpcServer := grpc.NewServer()
pb.RegisterMovieServiceServer(grpcServer, server)
grpcServer.Serve(lis)
}
|
package spec
import (
"github.com/agiledragon/trans-dsl"
)
type IsDefExist struct {
}
func (this *IsDefExist) Ok(transInfo *transdsl.TransInfo) bool {
return true
}
|
package queueinformer
import (
"context"
"testing"
"time"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/version"
)
type versionFunc func() (*version.Info, error)
func (f versionFunc) ServerVersion() (*version.Info, error) {
if f == nil {
return &version.Info{}, nil
}
return (func() (*version.Info, error))(f)()
}
func TestOperatorRunChannelClosure(t *testing.T) {
for _, tc := range []struct {
name string
// set up the operator under test and return a cleanup func to be invoked when the test completes
of func(cancel context.CancelFunc, o *operator) func()
}{
{
name: "error getting server version",
of: func(cancel context.CancelFunc, o *operator) func() {
o.serverVersion = versionFunc(func() (*version.Info, error) {
return nil, errors.New("test error")
})
return func() {}
},
},
{
name: "context cancelled while getting server version",
of: func(cancel context.CancelFunc, o *operator) func() {
done := make(chan struct{})
o.serverVersion = versionFunc(func() (*version.Info, error) {
defer func() {
<-done
}()
cancel()
return nil, errors.New("test error")
})
return func() {
close(done)
}
},
},
{
name: "context cancelled before cache sync",
of: func(cancel context.CancelFunc, o *operator) func() {
o.hasSynced = func() bool {
cancel()
return false
}
return func() {}
},
},
} {
t.Run(tc.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
o, err := newOperatorFromConfig(defaultOperatorConfig())
if err != nil {
t.Fatalf("could not create operator from default config: %s", err)
}
o.serverVersion = versionFunc(nil)
o.hasSynced = func() bool { return true }
done := func() {}
if tc.of != nil {
done = tc.of(cancel, o)
}
defer done()
o.Run(ctx)
timeout := time.After(2 * defaultServerVersionInterval)
for n, ch := range map[string]<-chan struct{}{
"ready": o.Ready(),
"done": o.Done(),
} {
select {
case <-ch:
case <-timeout:
t.Errorf("timed out before %s channel closed", n)
}
}
})
}
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package domain
import (
"testing"
"time"
"github.com/pingcap/tidb/parser/terror"
"github.com/stretchr/testify/require"
"github.com/tikv/client-go/v2/txnkv/transaction"
)
func TestSchemaCheckerSimple(t *testing.T) {
lease := 5 * time.Millisecond
validator := NewSchemaValidator(lease, nil)
checker := &SchemaChecker{SchemaValidator: validator, needCheckSchema: true}
// Add some schema versions and delta table IDs.
ts := uint64(time.Now().UnixNano())
validator.Update(ts, 0, 2, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{1}, ActionTypes: []uint64{1}})
validator.Update(ts, 2, 4, &transaction.RelatedSchemaChange{PhyTblIDS: []int64{2}, ActionTypes: []uint64{2}})
// checker's schema version is the same as the current schema version.
checker.schemaVer = 4
_, err := checker.Check(ts)
require.NoError(t, err)
// checker's schema version is less than the current schema version, and it doesn't exist in validator's items.
// checker's related table ID isn't in validator's changed table IDs.
checker.schemaVer = 2
checker.relatedTableIDs = []int64{3}
_, err = checker.Check(ts)
require.NoError(t, err)
// The checker's schema version isn't in validator's items.
checker.schemaVer = 1
checker.relatedTableIDs = []int64{3}
_, err = checker.Check(ts)
require.True(t, terror.ErrorEqual(err, ErrInfoSchemaChanged))
// checker's related table ID is in validator's changed table IDs.
checker.relatedTableIDs = []int64{2}
_, err = checker.Check(ts)
require.True(t, terror.ErrorEqual(err, ErrInfoSchemaChanged))
// validator's latest schema version is expired.
time.Sleep(lease + time.Microsecond)
checker.schemaVer = 4
checker.relatedTableIDs = []int64{3}
_, err = checker.Check(ts)
require.NoError(t, err)
// Use checker.SchemaValidator.Check instead of checker.Check here because backoff make CI slow.
nowTS := uint64(time.Now().UnixNano())
_, result := checker.SchemaValidator.Check(nowTS, checker.schemaVer, checker.relatedTableIDs, true)
require.Equal(t, ResultUnknown, result)
}
|
package dto
import (
"bytes"
"encoding/binary"
"fmt"
)
//This is Header of the message that is send to node. Each message must be preceded by this header.
type Header struct {
TaskId int64
Type int32
DataSize int32
DeviceId int32
}
func (h *Header) String() string {
return fmt.Sprintf("#%d Code: %d [%d] - dev#%d", h.TaskId, h.Type, h.DataSize, h.DeviceId)
}
func (h *Header) Encode() ([]byte, error) {
buf := new(bytes.Buffer)
err := binary.Write(buf, binary.LittleEndian, h)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (h *Header) Decode(buf []byte) error {
buffer := bytes.NewBuffer(buf)
return binary.Read(buffer, binary.LittleEndian, h)
}
func (h *Header) Size() int {
return binary.Size(h)
}
|
/*
当前市场深度
返回当前市场深度(委托挂单),其中 asks 是委卖单, bids 是委买单。
请替换 [CURR_A] and [CURR_B] 为您需要查看的币种.
http://data.gate.io/api2/1/orderBook/[CURR_A]_[CURR_B]
*/
package main
import (
"github.com/buger/jsonparser"
"strconv"
"errors"
"encoding/json"
)
type ApiOrderBook struct {
Api
Pair string
Result bool
asks []ApiDepth
bids []ApiDepth
}
type ApiDepth struct {
Amount float64
Price float64
}
func (api *ApiOrderBook) Init(pg *Postgres, pair string) (*ApiOrderBook) {
api.desc = "当前市场深度" + pair
api.uri = "orderBook/" + pair
api.Pair = pair
api.pg = pg
return api
}
func (api *ApiOrderBook) Request() ([]byte, error) {
return api.httpGet(api.uri)
}
func (api *ApiOrderBook) Parser(body []byte) (error) {
result, err := jsonparser.GetString(body, "result")
if err != nil {
return err
}
api.Result, err = strconv.ParseBool(result)
if err != nil {
return err
}
if !api.Result {
return errors.New("接口返回失败")
}
var asks []ApiDepth
_, err =jsonparser.ArrayEach(body, func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
var values []float64
json.Unmarshal(value, &values)
var ask ApiDepth
ask.Amount = values[0]
ask.Price = values[1]
asks = append(asks, ask)
}, "asks")
if err != nil {
return err
}
var bids []ApiDepth
_, err = jsonparser.ArrayEach(body, func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
var values []float64
json.Unmarshal(value, &values)
var bid ApiDepth
bid.Amount = values[0]
bid.Price = values[1]
bids = append(bids, bid)
}, "bids")
if err != nil {
return err
}
api.asks = asks
api.bids = bids
return nil
}
func (api *ApiOrderBook) Save() (error) {
return nil
}
|
package storage
import (
"bytes"
"context"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLocal(t *testing.T) {
ts := []struct {
name string
uri string
data []byte
}{
{
name: "normal data",
uri: "testfile1",
data: []byte("hello local storage"),
},
{
name: "multiple directory",
uri: "test_dir/testfile2",
data: []byte("this is test file2"),
},
}
for _, tc := range ts {
t.Run(tc.name, func(t *testing.T) {
dir := filepath.Join(os.TempDir(), "local-test")
l, err := NewLocal(dir)
assert.NoError(t, err)
defer func() {
os.RemoveAll(dir)
}()
buf := bytes.NewBuffer(tc.data)
err = l.Put(context.Background(), tc.uri, buf)
assert.NoError(t, err)
rc, err := l.Get(context.Background(), tc.uri)
assert.NoError(t, err)
got, err := ioutil.ReadAll(rc)
assert.NoError(t, err)
rc.Close()
assert.Equal(t, tc.data, got)
})
}
}
func TestPutErrorURI(t *testing.T) {
dir := filepath.Join(os.TempDir(), "local-test")
l, err := NewLocal(dir)
assert.NoError(t, err)
defer func() {
os.RemoveAll(dir)
}()
ts := []struct {
name string
uri string
}{
{name: "begin_with_backlash", uri: "/test_dir"},
{name: "end_with_backlash", uri: "test_dir/"},
{name: "empty_uri", uri: ""},
}
for _, tc := range ts {
t.Run(tc.name, func(t *testing.T) {
err = l.Put(context.Background(), tc.uri, bytes.NewBuffer([]byte("only dir")))
assert.Error(t, err)
})
}
}
func TestPutDuplicateFile(t *testing.T) {
dir := filepath.Join(os.TempDir(), "local-test")
l, err := NewLocal(dir)
assert.NoError(t, err)
defer func() {
os.RemoveAll(dir)
}()
for i := 0; i < 2; i++ {
err = l.Put(context.Background(), "test1.txt", bytes.NewBuffer([]byte("file data")))
assert.NoError(t, err)
}
}
func TestDeleteNotExistsFile(t *testing.T) {
dir := filepath.Join(os.TempDir(), "local-test")
l, err := NewLocal(dir)
assert.NoError(t, err)
defer func() {
os.RemoveAll(dir)
}()
err = l.Delete(context.Background(), "test-not-exists.txt")
assert.NoError(t, err)
}
func TestDeleteDir(t *testing.T) {
dir := filepath.Join(os.TempDir(), "local-test")
l, err := NewLocal(dir)
assert.NoError(t, err)
defer func() {
os.RemoveAll(dir)
}()
// add test file
dirName := "test-dir"
dirFullPath := filepath.Join(dir, dirName)
err = os.Mkdir(dirFullPath, os.ModePerm)
assert.NoError(t, err)
testFilePath := filepath.Join(dirFullPath, "test1")
_, err = os.OpenFile(testFilePath, os.O_CREATE|os.O_RDONLY, os.ModePerm)
assert.NoError(t, err)
err = l.Delete(context.Background(), dirName)
assert.NoError(t, err)
if _, err = os.Stat(dirFullPath); err != nil {
if os.IsExist(err) {
t.Errorf("dir should be delete: %s", dirFullPath)
}
}
}
|
package util
import (
"io/ioutil"
"testing"
"github.com/ghodss/yaml"
"github.com/pmezard/go-difflib/difflib"
"github.com/stretchr/testify/require"
yaml2 "gopkg.in/yaml.v2"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// ReadAsset reads an asset from the filesystem, panicking in case of error
func ReadAsset(path string) []byte {
b, err := ioutil.ReadFile(path)
if err != nil {
panic(err)
}
return b
}
func LoadObject(path string, obj interface{}) error {
b := ReadAsset(path)
err := yaml.Unmarshal(b, &obj)
if err != nil {
return err
}
return nil
}
func LoadUnstructured(path string) *unstructured.Unstructured {
b := ReadAsset(path)
obj := map[string]interface{}{}
err := yaml.Unmarshal(b, &obj)
if err != nil {
panic(err)
}
return &unstructured.Unstructured{Object: obj}
}
func RequireYamlEqual(t *testing.T, a interface{}, b interface{}) {
yamlActual, _ := yaml2.Marshal(a)
yamlExpected, _ := yaml2.Marshal(b)
diff := difflib.UnifiedDiff{
A: difflib.SplitLines(string(yamlExpected)),
B: difflib.SplitLines(string(yamlActual)),
FromFile: "Expected",
ToFile: "Actual",
Context: 4,
}
m, err := difflib.GetUnifiedDiffString(diff)
require.NoError(t, err)
if len(m) > 0 {
t.Errorf("The following differences have been found:\n%s", m)
}
}
|
package sdk
const (
KafkaPlatformModel = "Kafka"
)
var (
// KafkaPlatform represent a kafka platform
KafkaPlatform = PlatformModel{
Name: KafkaPlatformModel,
Author: "CDS",
Identifier: "github.com/ovh/cds/platform/builtin/kafka",
Icon: "",
DefaultConfig: PlatformConfig{
"broker url": PlatformConfigValue{
Type: PlatformConfigTypeString,
},
"username": PlatformConfigValue{
Type: PlatformConfigTypeString,
},
"password": PlatformConfigValue{
Type: PlatformConfigTypePassword,
},
},
Disabled: false,
Hook: true,
}
)
// PlatformConfig represent the configuration of a plateform
type PlatformConfig map[string]PlatformConfigValue
const (
// PlatformConfigTypeString represents a string configuration value
PlatformConfigTypeString = "string"
// PlatformConfigTypeString represents a password configuration value
PlatformConfigTypePassword = "password"
)
// PlatformConfigValue represent a configuration value for a platform
type PlatformConfigValue struct {
Value string `json:"value"`
Type string `json:"type"`
}
// PlatformModel represent a platform model with its default configuration
type PlatformModel struct {
ID int64 `json:"id" db:"id"`
Name string `json:"name" db:"name"`
Author string `json:"author" db:"author"`
Identifier string `json:"identifier" db:"identifier"`
Icon string `json:"icon" db:"icon"`
DefaultConfig PlatformConfig `json:"default_config" db:"-"`
Disabled bool `json:"disabled" db:"disabled"`
Hook bool `json:"hook" db:"hook"`
FileStorage bool `json:"file_storage" db:"file_storage"`
BlockStorage bool `json:"block_storage" db:"block_storage"`
Deployment bool `json:"deployment" db:"deployment"`
Compute bool `json:"compute" db:"compute"`
}
// ProjectPlatform is an instanciation of a platform model
type ProjectPlatform struct {
ID int64 `json:"id" db:"id"`
ProjectID int64 `json:"project_id" db:"project_id"`
Name string `json:"name" db:"name"`
PlatformModelID int64 `json:"platform_model_id" db:"platform_model_id"`
Model PlatformModel `json:"model" db:"-"`
Config PlatformConfig `json:"config" db:"-"`
}
|
package cfmysql_test
import (
"code.cloudfoundry.org/cli/cf/errors"
"code.cloudfoundry.org/cli/plugin"
"code.cloudfoundry.org/cli/plugin/models"
"code.cloudfoundry.org/cli/plugin/pluginfakes"
"fmt"
. "github.com/andreasf/cf-mysql-plugin/cfmysql"
"github.com/andreasf/cf-mysql-plugin/cfmysql/cfmysqlfakes"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
)
var _ = Describe("Plugin", func() {
var appList []plugin_models.GetAppsModel
usage := "cf mysql - Connect to a MySQL database service\n\nUSAGE:\n Open a mysql client to a database:\n cf mysql <service-name> [mysql args...]\n\n\ncf mysqldump - Dump a MySQL database\n\nUSAGE:\n Dump all tables in a database:\n cf mysqldump <service-name> [mysqldump args...]\n Dump specific tables in a database:\n cf mysqldump <service-name> [tables...] [mysqldump args...]\n"
BeforeEach(func() {
appList = []plugin_models.GetAppsModel{
{
Name: "app-name-1",
},
{
Name: "app-name-2",
},
}
})
Context("When calling 'cf plugins'", func() {
It("Shows the mysql plugin with the current version", func() {
mysqlPlugin, _ := NewPluginAndMocks()
Expect(mysqlPlugin.GetMetadata().Name).To(Equal("mysql"))
Expect(mysqlPlugin.GetMetadata().Version).To(Equal(plugin.VersionType{
Major: 2,
Minor: 1,
Build: 0,
}))
})
})
Context("When calling 'cf mysql -h'", func() {
It("Shows instructions for 'cf mysql'", func() {
mysqlPlugin, _ := NewPluginAndMocks()
Expect(mysqlPlugin.GetMetadata().Commands).To(HaveLen(2))
Expect(mysqlPlugin.GetMetadata().Commands[0].Name).To(Equal("mysql"))
})
})
Context("When calling 'cf mysql' without arguments", func() {
It("Prints usage instructions to STDERR and exits with 1", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mysqlPlugin.Run(mocks.CliConnection, []string{"mysql"})
Expect(mocks.Out).To(gbytes.Say(""))
Expect(string(mocks.Err.Contents())).To(Equal(usage))
Expect(mysqlPlugin.GetExitCode()).To(Equal(1))
})
})
Context("When calling 'cf mysql db-name'", func() {
var serviceA MysqlService
BeforeEach(func() {
serviceA = MysqlService{
Name: "database-a",
Hostname: "database-a.host",
Port: "123",
DbName: "dbname-a",
Username: "username",
Password: "password",
CaCert: "ca-cert",
}
})
Context("When the database is available", func() {
It("Opens an SSH tunnel through a started app", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(serviceA, nil)
mocks.CfService.GetStartedAppsReturns(appList, nil)
mocks.PortFinder.GetPortReturns(2342)
mysqlPlugin.Run(mocks.CliConnection, []string{"mysql", "database-a"})
Expect(mocks.CfService.GetServiceCallCount()).To(Equal(1))
calledCliConnection, calledName := mocks.CfService.GetServiceArgsForCall(0)
Expect(calledName).To(Equal("database-a"))
Expect(calledCliConnection).To(Equal(mocks.CliConnection))
Expect(mocks.CfService.GetStartedAppsCallCount()).To(Equal(1))
Expect(mocks.PortFinder.GetPortCallCount()).To(Equal(1))
Expect(mocks.CfService.OpenSshTunnelCallCount()).To(Equal(1))
calledCliConnection, calledService, calledAppList, localPort := mocks.CfService.OpenSshTunnelArgsForCall(0)
Expect(calledCliConnection).To(Equal(mocks.CliConnection))
Expect(calledService).To(Equal(serviceA))
Expect(calledAppList).To(Equal(appList))
Expect(localPort).To(Equal(2342))
})
It("Opens a MySQL client connecting through the tunnel", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(serviceA, nil)
mocks.CfService.GetStartedAppsReturns(appList, nil)
mocks.PortFinder.GetPortReturns(2342)
mysqlPlugin.Run(mocks.CliConnection, []string{"mysql", "database-a"})
Expect(mocks.PortFinder.GetPortCallCount()).To(Equal(1))
Expect(mocks.MysqlRunner.RunMysqlCallCount()).To(Equal(1))
hostname, port, dbName, username, password, caCert, _ := mocks.MysqlRunner.RunMysqlArgsForCall(0)
Expect(hostname).To(Equal("127.0.0.1"))
Expect(port).To(Equal(2342))
Expect(dbName).To(Equal(serviceA.DbName))
Expect(username).To(Equal(serviceA.Username))
Expect(password).To(Equal(serviceA.Password))
Expect(caCert).To(Equal(serviceA.CaCert))
})
Context("When passing additional arguments", func() {
It("Passes the arguments to mysql", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(serviceA, nil)
mocks.CfService.GetStartedAppsReturns(appList, nil)
mocks.PortFinder.GetPortReturns(2342)
mysqlPlugin.Run(mocks.CliConnection, []string{"mysql", "database-a", "--foo", "bar", "--baz"})
Expect(mocks.PortFinder.GetPortCallCount()).To(Equal(1))
Expect(mocks.MysqlRunner.RunMysqlCallCount()).To(Equal(1))
hostname, port, dbName, username, password, _, args := mocks.MysqlRunner.RunMysqlArgsForCall(0)
Expect(hostname).To(Equal("127.0.0.1"))
Expect(port).To(Equal(2342))
Expect(dbName).To(Equal(serviceA.DbName))
Expect(username).To(Equal(serviceA.Username))
Expect(password).To(Equal(serviceA.Password))
Expect(args).To(Equal([]string{"--foo", "bar", "--baz"}))
})
})
})
Context("When a service key cannot be retrieved", func() {
It("Shows an error message and exits with 1", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(MysqlService{}, errors.New("database not found"))
mysqlPlugin.Run(mocks.CliConnection, []string{"mysql", "db-name"})
Expect(mocks.CfService.GetServiceCallCount()).To(Equal(1))
Expect(mocks.Out).To(gbytes.Say(""))
Expect(mocks.Err).To(gbytes.Say("^FAILED\nUnable to retrieve service credentials: database not found\n$"))
Expect(mysqlPlugin.GetExitCode()).To(Equal(1))
})
})
Context("When there are no started apps", func() {
It("Shows an error message and exits with 1", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(serviceA, nil)
mocks.CfService.GetStartedAppsReturns([]plugin_models.GetAppsModel{}, nil)
mysqlPlugin.Run(mocks.CliConnection, []string{"mysql", "database-a"})
Expect(mocks.CfService.GetServiceCallCount()).To(Equal(1))
Expect(mocks.CfService.GetStartedAppsCallCount()).To(Equal(1))
Expect(mocks.Out).To(gbytes.Say("^$"))
Expect(mocks.Err).To(gbytes.Say("^FAILED\nUnable to connect to 'database-a': no started apps in current space\n$"))
Expect(mysqlPlugin.GetExitCode()).To(Equal(1))
})
})
Context("When GetStartedApps returns an error", func() {
It("Shows an error message and exits with 1", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(serviceA, nil)
mocks.CfService.GetStartedAppsReturns(nil, fmt.Errorf("PC LOAD LETTER"))
mysqlPlugin.Run(mocks.CliConnection, []string{"mysql", "database-a"})
Expect(mocks.CfService.GetServiceCallCount()).To(Equal(1))
Expect(mocks.CfService.GetStartedAppsCallCount()).To(Equal(1))
Expect(mocks.Out).To(gbytes.Say(""))
Expect(mocks.Err).To(gbytes.Say("^FAILED\nUnable to retrieve started apps: PC LOAD LETTER\n$"))
Expect(mysqlPlugin.GetExitCode()).To(Equal(1))
})
})
})
Context("When calling 'cf mysqldump -h'", func() {
It("Shows instructions for 'cf mysqldump'", func() {
mysqlPlugin, _ := NewPluginAndMocks()
Expect(mysqlPlugin.GetMetadata().Commands).To(HaveLen(2))
Expect(mysqlPlugin.GetMetadata().Commands[1].Name).To(Equal("mysqldump"))
})
})
Context("When calling 'cf mysqldump' without arguments", func() {
It("Prints usage information to STDERR and exits with 1", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mysqlPlugin.Run(mocks.CliConnection, []string{"mysqldump"})
Expect(mocks.Out).To(gbytes.Say(""))
Expect(string(mocks.Err.Contents())).To(Equal(usage))
Expect(mysqlPlugin.GetExitCode()).To(Equal(1))
})
})
Context("When calling 'cf mysqldump db-name'", func() {
var serviceA MysqlService
BeforeEach(func() {
serviceA = MysqlService{
Name: "database-a",
Hostname: "database-a.host",
Port: "123",
DbName: "dbname-a",
Username: "username",
Password: "password",
CaCert: "ca-cert",
}
})
Context("When the database is available", func() {
var app1 plugin_models.GetAppsModel
var app2 plugin_models.GetAppsModel
BeforeEach(func() {
app1 = plugin_models.GetAppsModel{
Name: "app-name-1",
}
app2 = plugin_models.GetAppsModel{
Name: "app-name-2",
}
})
It("Opens an SSH tunnel through a started app", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(serviceA, nil)
mocks.CfService.GetStartedAppsReturns([]plugin_models.GetAppsModel{app1, app2}, nil)
mocks.PortFinder.GetPortReturns(2342)
mysqlPlugin.Run(mocks.CliConnection, []string{"mysqldump", "database-a"})
Expect(mocks.CfService.GetServiceCallCount()).To(Equal(1))
Expect(mocks.CfService.GetStartedAppsCallCount()).To(Equal(1))
Expect(mocks.PortFinder.GetPortCallCount()).To(Equal(1))
Expect(mocks.CfService.OpenSshTunnelCallCount()).To(Equal(1))
calledCliConnection, calledService, calledAppList, localPort := mocks.CfService.OpenSshTunnelArgsForCall(0)
Expect(calledCliConnection).To(Equal(mocks.CliConnection))
Expect(calledService).To(Equal(serviceA))
Expect(calledAppList).To(Equal(appList))
Expect(localPort).To(Equal(2342))
})
It("Opens mysqldump connecting through the tunnel", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mocks.CfService.GetServiceReturns(serviceA, nil)
mocks.CfService.GetStartedAppsReturns([]plugin_models.GetAppsModel{app1}, nil)
mocks.PortFinder.GetPortReturns(2342)
mysqlPlugin.Run(mocks.CliConnection, []string{"mysqldump", "database-a"})
Expect(mocks.PortFinder.GetPortCallCount()).To(Equal(1))
Expect(mocks.MysqlRunner.RunMysqlDumpCallCount()).To(Equal(1))
hostname, port, dbName, username, password, caCert, _ := mocks.MysqlRunner.RunMysqlDumpArgsForCall(0)
Expect(hostname).To(Equal("127.0.0.1"))
Expect(port).To(Equal(2342))
Expect(dbName).To(Equal(serviceA.DbName))
Expect(username).To(Equal(serviceA.Username))
Expect(password).To(Equal(serviceA.Password))
Expect(caCert).To(Equal(serviceA.CaCert))
})
})
})
Context("When uninstalling the plugin", func() {
It("Does not give any output or call the API", func() {
mysqlPlugin, mocks := NewPluginAndMocks()
mysqlPlugin.Run(mocks.CliConnection, []string{"CLI-MESSAGE-UNINSTALL"})
Expect(mocks.Out).To(gbytes.Say("^$"))
Expect(mocks.Err).To(gbytes.Say("^$"))
Expect(mysqlPlugin.GetExitCode()).To(Equal(0))
})
})
})
type Mocks struct {
In *gbytes.Buffer
Out *gbytes.Buffer
Err *gbytes.Buffer
CfService *cfmysqlfakes.FakeCfService
PortFinder *cfmysqlfakes.FakePortFinder
CliConnection *pluginfakes.FakeCliConnection
MysqlRunner *cfmysqlfakes.FakeMysqlRunner
}
func NewPluginAndMocks() (*MysqlPlugin, Mocks) {
mocks := Mocks{
In: gbytes.NewBuffer(),
Out: gbytes.NewBuffer(),
Err: gbytes.NewBuffer(),
CfService: new(cfmysqlfakes.FakeCfService),
CliConnection: new(pluginfakes.FakeCliConnection),
MysqlRunner: new(cfmysqlfakes.FakeMysqlRunner),
PortFinder: new(cfmysqlfakes.FakePortFinder),
}
mysqlPlugin := NewMysqlPlugin(PluginConf{
In: mocks.In,
Out: mocks.Out,
Err: mocks.Err,
CfService: mocks.CfService,
MysqlRunner: mocks.MysqlRunner,
PortFinder: mocks.PortFinder,
})
return mysqlPlugin, mocks
}
|
package cli
import (
"errors"
"strings"
authclient "github.com/cosmos/cosmos-sdk/x/auth/client"
"github.com/gookit/gcli/v3"
"github.com/ovrclk/akcmd/client"
"github.com/ovrclk/akcmd/flags"
)
// GetBroadcastCommand returns the tx broadcast command.
func GetBroadcastCommand() *gcli.Command {
cmd := &gcli.Command{
Name: "broadcast",
Desc: "Broadcast transactions generated offline",
Help: strings.TrimSpace(`Broadcast transactions created with the --generate-only
flag and signed with the sign command. Read a transaction from [file_path] and
broadcast it to a node. If you supply a dash (-) argument in place of an input
filename, the command reads from standard input.
$ <appd> tx broadcast ./mytxn.json
`),
Config: func(cmd *gcli.Command) {
flags.AddTxFlagsToCmd(cmd)
cmd.AddArg("file-path", "", true)
},
Func: func(cmd *gcli.Command, args []string) error {
clientCtx, err := client.GetClientTxContext()
if err != nil {
return err
}
if flags.TxFlagsFromCmd().Offline {
return errors.New("cannot broadcast tx during offline mode")
}
stdTx, err := authclient.ReadTxFromFile(clientCtx, args[0])
if err != nil {
return err
}
txBytes, err := clientCtx.TxConfig.TxEncoder()(stdTx)
if err != nil {
return err
}
res, err := clientCtx.BroadcastTx(txBytes)
if err != nil {
return err
}
return clientCtx.PrintProto(res)
},
}
return cmd
}
|
package ionic
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"net/http"
)
const (
sessionsLoginEndpoint = "v1/sessions/login"
)
// Session represents the BearerToken and User for the current session
type Session struct {
BearerToken string `json:"jwt"`
User User `json:"user"`
}
type LoginRequest struct {
Username string `json:"username"`
Password string `json:"password"`
}
// Login performs basic auth requests with a username and returns a Login
// response with bearer token and user for the session. Returns an error for
// HTTP and JSON errors.
func (ic *IonClient) Login(username, password string) (Session, error) {
auth := fmt.Sprintf("%v:%v", username, password)
headers := http.Header{}
headers.Add("Authorization", fmt.Sprintf("Basic %v", base64.StdEncoding.EncodeToString([]byte(auth))))
headers.Add("Content-Type", "application/json; charset=UTF-8")
login := LoginRequest{username, password}
b, err := json.Marshal(login)
if err != nil {
return Session{}, fmt.Errorf("session: failed to marshal login body: %v", err.Error())
}
buff := bytes.NewBuffer(b)
b, err = ic.Post(sessionsLoginEndpoint, "", nil, *buff, headers)
if err != nil {
return Session{}, fmt.Errorf("session: failed login request: %v", err.Error())
}
var resp Session
err = json.Unmarshal(b, &resp)
if err != nil {
return Session{}, fmt.Errorf("session: failed to unmarshal response: %v", err.Error())
}
return resp, nil
}
|
package topsort
import (
"fmt"
"strings"
)
type Graph struct {
nodes map[string]node
}
func NewGraph() *Graph {
return &Graph{
nodes: make(map[string]node),
}
}
func (g *Graph) AddNode(name string) {
if !g.ContainsNode(name) {
g.nodes[name] = make(node)
}
}
func (g *Graph) AddEdge(from string, to string) error {
f, ok := g.nodes[from]
if !ok {
return fmt.Errorf("Node %q not found", from)
}
_, ok = g.nodes[to]
if !ok {
return fmt.Errorf("Node %q not found", to)
}
f.addEdge(to)
return nil
}
func (g *Graph) ContainsNode(name string) bool {
_, ok := g.nodes[name]
return ok
}
func (g *Graph) TopSort(name string) ([]string, error) {
results := newOrderedSet()
err := g.visit(name, results, nil)
if err != nil {
return nil, err
}
return results.items, nil
}
func (g *Graph) visit(name string, results *orderedset, visited *orderedset) error {
if visited == nil {
visited = newOrderedSet()
}
added := visited.add(name)
if !added {
index := visited.index(name)
cycle := append(visited.items[index:], name)
return fmt.Errorf("Cycle error: %s", strings.Join(cycle, " -> "))
}
n := g.nodes[name]
for _, edge := range n.edges() {
err := g.visit(edge, results, visited.copy())
if err != nil {
return err
}
}
results.add(name)
return nil
}
type node map[string]bool
func (n node) addEdge(name string) {
n[name] = true
}
func (n node) edges() []string {
var keys []string
for k := range n {
keys = append(keys, k)
}
return keys
}
type orderedset struct {
indexes map[string]int
items []string
length int
}
func newOrderedSet() *orderedset {
return &orderedset{
indexes: make(map[string]int),
length: 0,
}
}
func (s *orderedset) add(item string) bool {
_, ok := s.indexes[item]
if !ok {
s.indexes[item] = s.length
s.items = append(s.items, item)
s.length++
}
return !ok
}
func (s *orderedset) copy() *orderedset {
clone := newOrderedSet()
for _, item := range s.items {
clone.add(item)
}
return clone
}
func (s *orderedset) index(item string) int {
index, ok := s.indexes[item]
if ok {
return index
}
return -1
}
|
package 链表
// ------------------------- 方法1: 暴力K路归并 -------------------------
// 执行用时:320 ms, 在所有 Go 提交中击败了 10.75% 的用户
// 内存消耗:5.3 MB, 在所有 Go 提交中击败了 100.00% 的用户
const INF = 1000000000
const cantFindOutIndexFlag = -1
func mergeKLists(lists []*ListNode) *ListNode {
dummyHead := &ListNode{}
cur := dummyHead
for {
minIndex := getIndexOfMinNode(lists)
if minIndex == cantFindOutIndexFlag {
break
}
cur.Next = lists[minIndex]
cur = cur.Next
lists[minIndex] = lists[minIndex].Next
}
return dummyHead.Next
}
func getIndexOfMinNode(lists []*ListNode) int {
minIndex := cantFindOutIndexFlag
for i := 0; i < len(lists); i++ {
if lists[i] == nil {
continue
}
if minIndex == cantFindOutIndexFlag || lists[i].Val < lists[minIndex].Val {
minIndex = i
}
}
return minIndex
}
// ------------------------- 方法2: 2路归并实现K路归并 -------------------------
// 执行用时:4 ms, 在所有 Go 提交中击败了 99.41% 的用户
// 内存消耗:5.3 MB, 在所有 Go 提交中击败了 77.78% 的用户
func mergeKLists(lists []*ListNode) *ListNode {
if len(lists) == 0 {
return nil
}
if len(lists) == 1 {
return lists[0]
}
mid := len(lists) / 2
return mergeTwoLists(mergeKLists(lists[:mid]), mergeKLists(lists[mid:]))
}
func mergeTwoLists(listA *ListNode, listB *ListNode) *ListNode {
dummyMergedListHead := &ListNode{-1, nil}
mergedListHead := dummyMergedListHead
for listA != nil && listB != nil {
if listA.Val > listB.Val {
mergedListHead.Next = listB
listB = listB.Next
} else {
mergedListHead.Next = listA
listA = listA.Next
}
mergedListHead = mergedListHead.Next
}
if listA == nil {
mergedListHead.Next = listB
}
if listB == nil {
mergedListHead.Next = listA
}
return dummyMergedListHead.Next
}
/*
题目链接: https://leetcode-cn.com/problems/merge-k-sorted-lists/
总结
1. 方法1 K路归并找最小节点的时候 (即 getIndexOfMinNode 函数),可以使用优先队列优化!
*/
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package view
import (
"fmt"
"strings"
"github.com/gdamore/tcell/v2"
"github.com/rivo/tview"
"github.com/oam-dev/kubevela/references/cli/top/component"
"github.com/oam-dev/kubevela/references/cli/top/model"
)
// HelpView is the view which display help tips about how to use app
type HelpView struct {
*tview.TextView
app *App
actions model.KeyActions
}
var (
helpViewInstance = new(HelpView)
)
// NewHelpView return a new help view
func NewHelpView(app *App) model.View {
if helpViewInstance.TextView == nil {
helpViewInstance.TextView = tview.NewTextView()
helpViewInstance.app = app
helpViewInstance.actions = make(model.KeyActions)
}
return helpViewInstance
}
// Init help view init
func (v *HelpView) Init() {
title := fmt.Sprintf("[ %s ]", v.Name())
v.SetDynamicColors(true)
v.SetTitle(title).SetTitleColor(v.app.config.Theme.Table.Title.Color())
v.SetBorder(true)
v.SetBorderAttributes(tcell.AttrItalic)
v.SetBorderPadding(1, 1, 2, 2)
v.bindKeys()
}
// Start the help view
func (v *HelpView) Start() {
tips := `
[highlight:]vela top[normal:] is a UI based CLI tool provided in KubeVela. By using it, you can obtain the overview information of the platform and diagnose the resource status of the application.
At present, the tool has provided the following feature:
[highlight:]*[normal:] Platform information overview
[highlight:]*[normal:] Display of resource status information in Application, Managed Resource, Pod and Container levels
[highlight:]*[normal:] Application Resource Topology
[highlight:]*[normal:] Resource YAML text display
[highlight:]*[normal:] Theme switching
This information panel component in UI header will display the performance information of the KubeVela system.
Resource tables are in the UI body, resource of four levels are displayed here. You can use the <enter> key to enter the next resource level or the <q> key to return to the previous level.
The crumbs component in the footer indicates the current resource level.
At present, vela top has provided more than ten built-in themes, which you can use the <ctrl+t> key to enter theme switching view and choose according to your own preferences. What's more, vela top also supports custom themes, you can refer to the following link to customize your own theme: https://kubevela.io/docs/next/tutorials/vela-top .
`
tips = strings.ReplaceAll(tips, "highlight", v.app.config.Theme.Info.Title.String())
tips = strings.ReplaceAll(tips, "normal", v.app.config.Theme.Info.Text.String())
v.SetText(tips)
}
// Stop the help view
func (v *HelpView) Stop() {}
// Name return help view name
func (v *HelpView) Name() string {
return "Help"
}
// Hint return the menu hints of yaml view
func (v *HelpView) Hint() []model.MenuHint {
return v.actions.Hint()
}
func (v *HelpView) bindKeys() {
v.actions.Add(model.KeyActions{
component.KeyQ: model.KeyAction{Description: "Back", Action: v.app.Back, Visible: true, Shared: true},
component.KeyHelp: model.KeyAction{Description: "Back", Action: v.app.Back, Visible: true, Shared: true},
})
}
|
/*
* Copyright 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package service
import "path/filepath"
const (
ExtensionType = "shoot-dns-service"
ServiceName = ExtensionType
ExtensionServiceName = "extension-" + ServiceName
SeedChartName = ServiceName + "-seed"
ShootChartName = ServiceName + "-shoot"
// ImageName is the name of the dns controller manager.
ImageName = "dns-controller-manager"
// UserName is the name of the user used to connect to the target cluster.
UserName = "dns.gardener.cloud:system:" + ServiceName
// SecretName is the name of the secret used to store the access data for the shoot cluster.
SecretName = ExtensionServiceName
)
// ChartsPath is the path to the charts
var ChartsPath = filepath.Join("charts", "internal")
|
package mime
const (
ContentTypeHeader = "content-type"
ApplicationJSON = "application/json"
ApplicationForm = "application/x-www-form-urlencoded"
)
|
package server
import (
"coreydaley.com/mailgoon/api"
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"testing"
"time"
"coreydaley.com/mailgoon/database"
)
func TestServerAPIKeyNoDatabase(t *testing.T) {
d := database.Database{}
if err := d.New("/tmp/test.db"); err != nil {
t.Errorf("Unable to create new database")
}
defer os.Remove("/tmp/test.db")
s := Server{Database: &d}
go s.Start()
defer s.Stop()
if err := hostIsReachable("http://localhost:8080"); err != nil {
t.Errorf("%v", err)
return
}
resp, err := http.Get("http://localhost:8080/apikey")
if err != nil {
t.Errorf("unable to get /apikey endpoint")
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("unable to read response body")
return
}
if string(body) != "{\"error\":\"Unable to generate API key, please try again\"}" {
t.Errorf("Should have gotten an error generating API key, but didn't")
}
}
func TestServerAPIKey(t *testing.T) {
d := database.Database{}
if err := d.New("/tmp/test.db"); err != nil {
t.Errorf("Unable to create new database")
}
defer os.Remove("/tmp/test.db")
d.Setup()
s := Server{Database: &d}
go s.Start()
defer s.Stop()
if err := hostIsReachable("http://localhost:8080"); err != nil {
t.Errorf("%v", err)
return
}
apiKey, err := getAPIKey()
if err != nil {
t.Errorf("Unable to obtain an API key: %v", err)
}
if len(apiKey) == 0 {
t.Errorf("No API key was returned from the server")
}
_, err = s.Database.GetAPIKey(apiKey)
if err == sql.ErrNoRows {
t.Errorf("API key %s was not found in the database", apiKey)
return
}
}
func TestServerSendAuthentication(t *testing.T) {
d := database.Database{}
if err := d.New("/tmp/test.db"); err != nil {
t.Errorf("Unable to create new database")
}
defer os.Remove("/tmp/test.db")
d.Setup()
s := Server{Database: &d}
go s.Start()
defer s.Stop()
if err := hostIsReachable("http://localhost:8080"); err != nil {
t.Errorf("%v", err)
return
}
resp, err := http.Get("http://localhost:8080/send?to=bob@bob.com")
if err != nil {
t.Errorf("unable to get /apikey endpoint")
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("unable to read response body")
return
}
if string(body) != `{"error":"Authentication required"}` {
t.Errorf("Authentication should be required, but wasn't")
}
}
func hostIsReachable(url string) error {
for i := 0; i <= 10; i++ {
_, err := http.Get(url)
if err == nil {
return nil
}
sleepTime, err := time.ParseDuration(fmt.Sprintf("%ds", i))
if err != nil {
return fmt.Errorf("unable to parse duration: %v", err)
}
time.Sleep(sleepTime)
}
return fmt.Errorf("endpoint %s is unreachable", url)
}
func getAPIKey() (string, error) {
resp, err := http.Get("http://localhost:8080/apikey")
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
var apiKey api.APIKey
if err := json.Unmarshal(body, &apiKey); err != nil {
return "", err
}
return apiKey.APIKey, nil
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mock
import (
"testing"
"github.com/stretchr/testify/assert"
)
type contextKeyType int
func (k contextKeyType) String() string {
return "mock_key"
}
const contextKey contextKeyType = 0
func TestContext(t *testing.T) {
ctx := NewContext()
ctx.SetValue(contextKey, 1)
v := ctx.Value(contextKey)
assert.Equal(t, 1, v)
ctx.ClearValue(contextKey)
v = ctx.Value(contextKey)
assert.Nil(t, v)
}
func BenchmarkNewContext(b *testing.B) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
NewContext()
}
}
|
package draw
func init() {
SystemInstace = &System{}
}
|
package util
import (
"fmt"
"github.com/spf13/pflag"
"strings"
"github.com/fsnotify/fsnotify"
"github.com/spf13/viper"
)
// InitialiseConfig initialises a generic config.
//
// Any registered flags are available as environment variables under the `[APP]` prefix:
// e.g. db.username => BACKEND_DB.USERNAME=backend
//
// Any registered flags are also available as command line arguments:
// e.g. db.username => ./app --db.username=backend
//
// Any registered flags are also available within toml, yaml, json or xml configuration files:
// e.g. db.username => backend.toml
// [db]
// username = app
//
// The config library looks for config files in the following paths:
// - /etc/[app]/
// - $HOME/.[app]/
// - ./config/
// - . (working directory)
//
// The name of the config file ([app].toml) depends on the argument passed to InitialiseConfig:
// e.g. "backend" => /etc/faceit/backend.toml, backend.yaml, backend.json...
func InitialiseConfig(name string) {
// look for env variables in the format "[APP]_PORT=1338"
viper.SetEnvPrefix(strings.ToUpper(name))
// look for config files with name name.yml, name.toml, name.json...
viper.SetConfigName(name)
// ... in these folders
viper.AddConfigPath(fmt.Sprintf("/etc/%s", name))
viper.AddConfigPath(fmt.Sprintf("$HOME/.%s", name))
viper.AddConfigPath("./config")
viper.AddConfigPath(".") // working directory
// parse flags from process arg list
pflag.Parse()
// bind parsed flags to config library
if err := viper.BindPFlags(pflag.CommandLine); err != nil {
panic(err)
}
// check for environment variables now
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
// try to find and read config file now
if err := viper.ReadInConfig(); err != nil {
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
// Config file not found; ignore error if desired
} else {
// Config file was found but another error was produced
panic(err)
}
}
// watch config file for changes
viper.WatchConfig()
viper.OnConfigChange(func(e fsnotify.Event) {
GetS().Info("Config file changed:", e.Name)
})
if used := viper.ConfigFileUsed(); used != "" {
GetS().Info("Using config file:", used)
}
}
|
package parser
import (
"github.com/goodwine/yaaji/parser/structures"
"github.com/goodwine/yaaji/tokenizer"
)
func Parse(tokens []tokenizer.Token) structures.Structure {
var root structures.Structure = &structures.Code{}
tree := root
for _, token := range tokens {
tree = tree.Add(structures.Structurify(token))
}
return structures.Structure(root)
}
|
package model
type PresetConfiguration string
// List of PresetConfiguration
const (
PresetConfiguration_LIVE_HIGH_QUALITY PresetConfiguration = "LIVE_HIGH_QUALITY"
PresetConfiguration_LIVE_LOW_LATENCY PresetConfiguration = "LIVE_LOW_LATENCY"
PresetConfiguration_VOD_HIGH_QUALITY PresetConfiguration = "VOD_HIGH_QUALITY"
PresetConfiguration_VOD_HIGH_SPEED PresetConfiguration = "VOD_HIGH_SPEED"
PresetConfiguration_VOD_SPEED PresetConfiguration = "VOD_SPEED"
PresetConfiguration_VOD_STANDARD PresetConfiguration = "VOD_STANDARD"
PresetConfiguration_VOD_EXTRAHIGH_SPEED PresetConfiguration = "VOD_EXTRAHIGH_SPEED"
PresetConfiguration_VOD_VERYHIGH_SPEED PresetConfiguration = "VOD_VERYHIGH_SPEED"
PresetConfiguration_VOD_SUPERHIGH_SPEED PresetConfiguration = "VOD_SUPERHIGH_SPEED"
PresetConfiguration_VOD_ULTRAHIGH_SPEED PresetConfiguration = "VOD_ULTRAHIGH_SPEED"
) |
package main
import (
"fmt"
"net"
"strconv"
"bufio"
)
const (
defaultHost = "localhost"
defaultPort = 9999
)
// To test your server implementation, you might find it helpful to implement a
// simple 'client runner' program. The program could be very simple, as long as
// it is able to connect with and send messages to your server and is able to
// read and print out the server's echoed response to standard output. Whether or
// not you add any code to this file will not affect your grade.
func main() {
conn, err := net.Dial("tcp", defaultHost + ":" + strconv.Itoa(defaultPort))
if err != nil {
fmt.Printf("Dial error\n")
return
}
reader := bufio.NewReader(conn)
testPut(conn, reader, "foo", "bar")
testGet(conn, reader, "foo")
testPut(conn, reader, "foo", "sun")
testGet(conn, reader, "foo")
testPut(conn, reader, "foo", "sweet")
testGet(conn, reader, "foo")
testDelete(conn, reader, "foo")
testGet(conn, reader, "foo")
conn.Close()
}
func testPut(conn net.Conn, reader *bufio.Reader, key string, value string) {
fmt.Fprintf(conn, "Put:%s:%s\n", key, value)
}
func testGet(conn net.Conn, reader *bufio.Reader, key string) {
fmt.Fprintf(conn, "Get:%s\n", key)
// TODO: this is wrong, it does not read all messages.
bytes, _ := reader.ReadBytes('\n')
fmt.Printf("resp: %s", string(bytes))
}
func testDelete(conn net.Conn, reader *bufio.Reader, key string) {
fmt.Fprintf(conn, "Delete:%s\n", key)
}
|
package main
import (
"fmt"
"math"
)
type Circle struct {
x, y, r float64
}
func (c *Circle) getArea() float64 {
return math.Pi * c.r * c.r
}
func main() {
c := Circle{10, 20, 10}
fmt.Println(c.getArea())
}
|
package main
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"golang.org/x/sync/errgroup"
)
// server: 提供http服务
func server(ctx context.Context, addr string) error {
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprintf(w, "Get response from %s server", addr)
})
s := http.Server{Addr: addr, Handler: mux}
go func() {
select {
case <-ctx.Done():
_ = s.Shutdown(ctx)
fmt.Printf("server %s is cancelled\n", addr)
}
}()
fmt.Printf("start %s service...\n", addr)
return s.ListenAndServe()
}
// fakeService:模拟的服务处理函数
func fakeService(ctx context.Context) error {
ch := make(chan struct{})
go func() {
select {
case <-ctx.Done():
close(ch)
fmt.Println("fake service is cancelled")
}
}()
fmt.Println("start fake service...")
select {
case <-time.After(10 * time.Second): //程序不中断的话默认10s后返回一个错误
return errors.New("fake service error")
case <-ch:
return nil
}
}
// listenSignal: 信号监听
func listenSignal(ctx context.Context) error {
fmt.Println("start listen signal service...")
sigCh := make(chan os.Signal)
signal.Notify(sigCh)
go func() {
select {
case <-ctx.Done():
signal.Stop(sigCh)
close(sigCh)
fmt.Println("listen signal service is cancelled")
}
}()
if sig, ok := <-sigCh; ok {
fmt.Printf("Got signal: %s\n", sig)
switch sig {
case syscall.SIGINT, syscall.SIGTERM:
return fmt.Errorf("Got %s signal, exit the program...", sig)
default:
return nil
}
} else {
return nil
}
}
func main() {
g, ctx := errgroup.WithContext(context.Background())
g.Go(func() error {
return listenSignal(ctx)
})
g.Go(func() error {
return server(ctx, "127.0.0.1:9000")
})
g.Go(func() error {
return server(ctx, "127.0.0.1:9001")
})
g.Go(func() error {
return fakeService(ctx)
})
if err := g.Wait(); err != nil {
fmt.Printf("[main] Got error: %+v\n", err)
}
time.Sleep(2 * time.Second)
fmt.Printf("[main] All services are cancelled!\n")
}
|
package main
import (
"testing"
)
func TestFindMode(t *testing.T) {
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
"errors"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metricsV1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
clusterv1 "open-cluster-management.io/api/cluster/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
clustercommon "github.com/oam-dev/cluster-gateway/pkg/common"
"github.com/oam-dev/kubevela/pkg/utils/common"
)
const (
NormalClusterName = "normal-cluster"
DisconnectedClusterName = "disconnected-cluster"
NodeName1 = "node-1"
NodeName2 = "node-2"
)
func TestRefresh(t *testing.T) {
ClusterGatewaySecretNamespace = "default"
fakeClient := NewFakeClient(fake.NewClientBuilder().
WithScheme(common.Scheme).
WithRuntimeObjects(FakeManagedCluster("managed-cluster")).
WithObjects(FakeSecret(NormalClusterName), FakeSecret(DisconnectedClusterName)).
Build())
normalCluster := fake.NewClientBuilder().
WithScheme(common.Scheme).
WithObjects(FakeNode(NodeName1, "8", strconv.FormatInt(16*1024*1024*1024, 10)),
FakeNode(NodeName2, "7", strconv.FormatInt(32*1024*1024*1024, 10)),
FakeNodeMetrics(NodeName1, "4", strconv.FormatInt(8*1024*1024*1024, 10)),
FakeNodeMetrics(NodeName2, "1", strconv.FormatInt(3*1024*1024*1024, 10))).
Build()
disconnectedCluster := &disconnectedClient{}
fakeClient.AddCluster(NormalClusterName, normalCluster)
fakeClient.AddCluster(DisconnectedClusterName, disconnectedCluster)
mgr, err := NewClusterMetricsMgr(context.Background(), fakeClient, 15*time.Second)
assert.NoError(t, err)
_, err = mgr.Refresh()
assert.NoError(t, err)
clusters, err := ListVirtualClusters(context.Background(), fakeClient)
assert.NoError(t, err)
for _, cluster := range clusters {
assertClusterMetrics(t, &cluster)
}
disCluster, err := GetVirtualCluster(context.Background(), fakeClient, DisconnectedClusterName)
assert.NoError(t, err)
assertClusterMetrics(t, disCluster)
norCluster, err := GetVirtualCluster(context.Background(), fakeClient, NormalClusterName)
assert.NoError(t, err)
assertClusterMetrics(t, norCluster)
exportMetrics(disCluster.Metrics, disCluster.Name)
exportMetrics(norCluster.Metrics, norCluster.Name)
}
func assertClusterMetrics(t *testing.T, cluster *VirtualCluster) {
metrics := cluster.Metrics
switch cluster.Name {
case DisconnectedClusterName:
assert.Equal(t, metrics.IsConnected, false)
assert.True(t, metrics.ClusterInfo == nil)
assert.True(t, metrics.ClusterUsageMetrics == nil)
case NormalClusterName:
assert.Equal(t, metrics.IsConnected, true)
assert.True(t, resource.MustParse("15").Equal(metrics.ClusterInfo.CPUCapacity))
assert.True(t, resource.MustParse(strconv.FormatInt(48*1024*1024*1024, 10)).Equal(metrics.ClusterInfo.MemoryCapacity))
assert.True(t, resource.MustParse("15").Equal(metrics.ClusterInfo.CPUAllocatable))
assert.True(t, resource.MustParse(strconv.FormatInt(48*1024*1024*1024, 10)).Equal(metrics.ClusterInfo.MemoryAllocatable))
assert.True(t, resource.MustParse("5").Equal(metrics.ClusterUsageMetrics.CPUUsage))
assert.True(t, resource.MustParse(strconv.FormatInt(11*1024*1024*1024, 10)).Equal(metrics.ClusterUsageMetrics.MemoryUsage))
}
}
func FakeNodeMetrics(name string, cpu string, memory string) *metricsV1beta1api.NodeMetrics {
nodeMetrics := &metricsV1beta1api.NodeMetrics{}
nodeMetrics.Name = name
nodeMetrics.Usage = corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse(cpu),
corev1.ResourceMemory: resource.MustParse(memory),
}
return nodeMetrics
}
func FakeNode(name string, cpu string, memory string) *corev1.Node {
node := &corev1.Node{}
node.Name = name
node.Status = corev1.NodeStatus{
Allocatable: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse(cpu),
corev1.ResourceMemory: resource.MustParse(memory),
},
Capacity: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse(cpu),
corev1.ResourceMemory: resource.MustParse(memory),
},
}
return node
}
func FakeSecret(name string) *corev1.Secret {
secret := &corev1.Secret{}
secret.Name = name
secret.Namespace = ClusterGatewaySecretNamespace
secret.Labels = map[string]string{
clustercommon.LabelKeyClusterCredentialType: "ServiceAccountToken",
}
return secret
}
func FakeManagedCluster(name string) *clusterv1.ManagedCluster {
managedCluster := &clusterv1.ManagedCluster{}
managedCluster.Name = name
return managedCluster
}
type disconnectedClient struct {
client.Client
}
func (cli *disconnectedClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
return errors.New("no such host")
}
|
package errors
var ErrNoID = New("user's ID not specified", 400)
var ErrUserNotFound = New("user not found", 404)
|
package testing
import (
"context"
"github.com/brigadecore/brigade/sdk/v3"
)
type MockSubstrateClient struct {
CountRunningWorkersFn func(
context.Context,
*sdk.RunningWorkerCountOptions,
) (sdk.SubstrateWorkerCount, error)
CountRunningJobsFn func(
context.Context,
*sdk.RunningJobCountOptions,
) (sdk.SubstrateJobCount, error)
}
func (m *MockSubstrateClient) CountRunningWorkers(
ctx context.Context,
opts *sdk.RunningWorkerCountOptions,
) (sdk.SubstrateWorkerCount, error) {
return m.CountRunningWorkersFn(ctx, opts)
}
func (m *MockSubstrateClient) CountRunningJobs(
ctx context.Context,
opts *sdk.RunningJobCountOptions,
) (sdk.SubstrateJobCount, error) {
return m.CountRunningJobsFn(ctx, opts)
}
|
package main
import (
"fmt"
"strings"
)
/**
面试题 16.18. 模式匹配
你有两个字符串,即`pattern`和`value`。
`pattern`字符串由字母`"a"`和`"b"`组成,用于描述字符串中的模式。
例如,字符串`"catcatgocatgo"`匹配模式`"aabab"`(其中`"cat"`是`"a"`,`"go"`是`"b"`),该字符串也匹配像`"a"`、`"ab"`和`"b"`这样的模式。
但需注意`"a"`和`"b"`不能同时表示相同的字符串。编写一个方法判断`value`字符串是否匹配`pattern`字符串。
示例1:
```
输入: pattern = "abba", value = "dogcatcatdog"
输出: true
```
示例2:
```
输入: pattern = "abba", value = "dogcatcatfish"
输出: false
```
示例3:
```
输入: pattern = "aaaa", value = "dogcatcatdog"
输出: false
```
示例4:
```
输入: pattern = "abba", value = "dogdogdogdog"
输出: true
解释: "a"="dogdog",b="",反之也符合规则
```
提示:
- `0 <= len(pattern) <= 1000`
- `0 <= len(value) <= 1000`
- 你可以假设`pattern`只包含字母`"a"`和`"b"`,`value`仅包含小写字母。
*/
/**
简单题,代码写的确实有点长了,一点都不优雅,一会去找找 Go 自带的校验和转换方法,就不用自己写方法了
*/
/**
修修补补,思路到底还是错的
ERROR
*/
func PatternMatching(pattern string, value string) bool {
vlen := len(value)
plen := len(pattern)
if plen == 0 && vlen == 0 {
return true
}
if plen == 0 {
return false
}
if vlen == 0 {
return (len(strings.ReplaceAll(pattern, "a", "")) == 0) || (len(strings.ReplaceAll(pattern, "b", "")) == 0)
}
for i := 0; i < vlen; i++ {
// a
a := value[0:i]
step := i
for step < vlen {
// b
b := value[i:step]
fmt.Println("a is ", a, ", b is ", b)
// 如果能被整除,说明数量能对得上
if len(a)+len(b) > 0 {
newStr := strings.ReplaceAll(strings.ReplaceAll(pattern, "a", a), "b", b)
fmt.Println("a is ", a, ", b is ", b, ", String is "+value+" - "+newStr)
if newStr == value {
return true
}
}
step++
}
}
return false
}
|
package viewmodel
type SuccesVM struct {
Data interface{} `json:"data,omitempty"`
Message string `json:"message,omitempty"`
}
|
package models
import "github.com/jinzhu/gorm"
type Article struct {
gorm.Model
Category int
Title string
Tags string
Content string
Author string
Status int
Intro string
OuterLink string
}
func (Article) TableName() string {
return "article"
}
|
/*
* Copyright 2018, CS Systemes d'Information, http://www.c-s.fr
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package boh
/*
* Implements a cluster of hosts without cluster management environment
*/
import (
"bytes"
"encoding/gob"
"fmt"
"runtime"
"strconv"
"strings"
txttmpl "text/template"
"time"
rice "github.com/GeertJohan/go.rice"
log "github.com/sirupsen/logrus"
clusterapi "github.com/CS-SI/SafeScale/deploy/cluster/api"
pb "github.com/CS-SI/SafeScale/broker"
brokerclient "github.com/CS-SI/SafeScale/broker/client"
pbutils "github.com/CS-SI/SafeScale/broker/utils"
"github.com/CS-SI/SafeScale/deploy/cluster/enums/ClusterState"
"github.com/CS-SI/SafeScale/deploy/cluster/enums/Complexity"
"github.com/CS-SI/SafeScale/deploy/cluster/enums/Extension"
"github.com/CS-SI/SafeScale/deploy/cluster/enums/Flavor"
"github.com/CS-SI/SafeScale/deploy/cluster/enums/NodeType"
"github.com/CS-SI/SafeScale/deploy/cluster/flavors/boh/enums/ErrorCode"
flavortools "github.com/CS-SI/SafeScale/deploy/cluster/flavors/utils"
"github.com/CS-SI/SafeScale/deploy/cluster/metadata"
"github.com/CS-SI/SafeScale/deploy/install"
"github.com/CS-SI/SafeScale/providers"
providermetadata "github.com/CS-SI/SafeScale/providers/metadata"
"github.com/CS-SI/SafeScale/providers/model"
"github.com/CS-SI/SafeScale/utils"
"github.com/CS-SI/SafeScale/utils/provideruse"
"github.com/CS-SI/SafeScale/utils/retry"
"github.com/CS-SI/SafeScale/utils/template"
)
//go:generate rice embed-go
const (
timeoutCtxHost = 10 * time.Minute
shortTimeoutSSH = time.Minute
longTimeoutSSH = 5 * time.Minute
tempFolder = "/var/tmp/"
)
var (
// bohTemplateBox is the rice box to use in this package
bohTemplateBox *rice.Box
// funcMap defines the custome functions to be used in templates
funcMap = txttmpl.FuncMap{
// The name "inc" is what the function will be called in the template text.
"inc": func(i int) int {
return i + 1
},
"errcode": func(msg string) int {
if code, ok := ErrorCode.StringMap[msg]; ok {
return int(code)
}
return 1023
},
}
installCommonRequirementsContent *string
)
// managerData defines the data used by the manager of cluster we want to keep in Object Storage
type managerData struct {
// MasterIDs contains the ID of the masters
MasterIDs []string
// MasterIPs contains the IP of the masters
MasterIPs []string
// PublicNodeIPs contains a list of IP of the Public Agent nodes
PublicNodeIPs []string
// PrivateAvgentIPs contains a list of IP of the Private Agent Nodes
PrivateNodeIPs []string
// StateCollectInterval in seconds
StateCollectInterval time.Duration
// PrivateLastIndex
PrivateLastIndex int
// PublicLastIndex
PublicLastIndex int
}
// Cluster is the object describing a cluster
type Cluster struct {
// Core cluster data
Core *clusterapi.ClusterCore
// manager contains data specific to the cluster management
manager *managerData
// LastStateCollect contains the date of the last state collection
lastStateCollection time.Time
// metadata of cluster
metadata *metadata.Cluster
// provider is a pointer to current provider service instance
provider *providers.Service
}
// GetNetworkID returns the ID of the network used by the cluster
func (c *Cluster) GetNetworkID() string {
return c.Core.GetNetworkID()
}
// CountNodes returns the number of public or private nodes in the cluster
func (c *Cluster) CountNodes(public bool) uint {
return c.Core.CountNodes(public)
}
// GetExtension returns additional info of the cluster
func (c *Cluster) GetExtension(ctx Extension.Enum) interface{} {
return c.Core.GetExtension(ctx)
}
// SetExtension returns additional info of the cluster
func (c *Cluster) SetExtension(ctx Extension.Enum, info interface{}) {
c.Core.SetExtension(ctx, info)
}
// Load loads the internals of an existing cluster from metadata
func Load(data *metadata.Cluster) (clusterapi.Cluster, error) {
svc, err := provideruse.GetProviderService()
if err != nil {
return nil, err
}
core := data.Get()
instance := &Cluster{
Core: core,
metadata: data,
provider: svc,
}
instance.resetExtensions(core)
return instance, nil
}
func (c *Cluster) resetExtensions(core *clusterapi.ClusterCore) {
if core == nil {
return
}
anon := core.GetExtension(Extension.FlavorV1)
if anon != nil {
manager := anon.(managerData)
c.manager = &manager
// Note: On Load(), need to replace Extensions that are structs to pointers to struct
core.SetExtension(Extension.FlavorV1, &manager)
}
}
// Reload reloads metadata of Cluster from ObjectStorage
func (c *Cluster) Reload() error {
err := c.metadata.Reload()
if err != nil {
return err
}
c.resetExtensions(c.metadata.Get())
return nil
}
// Create creates the necessary infrastructure of cluster
func Create(req clusterapi.Request) (clusterapi.Cluster, error) {
var (
instance Cluster
privateNodeCount int
gw *model.Host
m *providermetadata.Gateway
// masterChannel chan error
// masterStatus error
rpChannel chan error
rpStatus error
nodesStatus error
ok bool
kpName string
kp *model.KeyPair
err error
feature *install.Feature
target install.Target
results install.Results
)
// Generate needed password for account cladm
cladmPassword, err := utils.GeneratePassword(16)
if err != nil {
return nil, fmt.Errorf("failed to generate password for user cladm: %s", err.Error())
}
nodesDef := pb.HostDefinition{
CPUNumber: 4,
RAM: 15.0,
Disk: 100,
ImageID: "Ubuntu 16.04",
}
if req.NodesDef != nil {
if req.NodesDef.CPUNumber > nodesDef.CPUNumber {
nodesDef.CPUNumber = req.NodesDef.CPUNumber
}
if req.NodesDef.RAM > nodesDef.RAM {
nodesDef.RAM = req.NodesDef.RAM
}
if req.NodesDef.Disk > nodesDef.Disk {
nodesDef.Disk = req.NodesDef.Disk
}
if req.NodesDef.ImageID != "" && req.NodesDef.ImageID != nodesDef.ImageID {
nodesDef.ImageID = req.NodesDef.ImageID
}
}
// Creates network
log.Printf("Creating Network 'net-%s'", req.Name)
req.Name = strings.ToLower(req.Name)
networkName := "net-" + req.Name
def := pb.NetworkDefinition{
Name: networkName,
CIDR: req.CIDR,
Gateway: &pb.GatewayDefinition{
CPU: 2,
RAM: 15.0,
Disk: 60,
ImageID: "Ubuntu 16.04",
},
}
network, err := brokerclient.New().Network.Create(def, brokerclient.DefaultExecutionTimeout)
if err != nil {
err = fmt.Errorf("Failed to create Network '%s': %s", networkName, err.Error())
return nil, err
}
req.NetworkID = network.ID
broker := brokerclient.New()
svc, err := provideruse.GetProviderService()
if err != nil {
goto cleanNetwork
}
m, err = providermetadata.NewGateway(svc, req.NetworkID)
if err != nil {
goto cleanNetwork
}
ok, err = m.Read()
if err != nil {
goto cleanNetwork
}
if !ok {
err = fmt.Errorf("failed to load gateway metadata")
goto cleanNetwork
}
gw = m.Get()
// Create a KeyPair for the user cladm
kpName = "cluster_" + req.Name + "_cladm_key"
kp, err = svc.CreateKeyPair(kpName)
if err != nil {
err = fmt.Errorf("failed to create Key Pair: %s", err.Error())
goto cleanNetwork
}
// Saving cluster parameters, with status 'Creating'
instance = Cluster{
Core: &clusterapi.ClusterCore{
Name: req.Name,
CIDR: req.CIDR,
Flavor: Flavor.BOH,
Keypair: kp,
State: ClusterState.Creating,
Complexity: req.Complexity,
Tenant: req.Tenant,
NetworkID: req.NetworkID,
GatewayIP: gw.GetPrivateIP(),
PublicIP: gw.GetAccessIP(),
AdminPassword: cladmPassword,
NodesDef: nodesDef,
DisabledFeatures: req.DisabledDefaultFeatures,
},
manager: &managerData{},
provider: svc,
}
instance.SetExtension(Extension.FlavorV1, instance.manager)
err = instance.updateMetadata(nil)
if err != nil {
err = fmt.Errorf("failed to create cluster '%s': %s", req.Name, err.Error())
goto cleanNetwork
}
switch req.Complexity {
case Complexity.Small:
privateNodeCount = 1
case Complexity.Normal:
privateNodeCount = 3
case Complexity.Large:
privateNodeCount = 7
}
runtime.GOMAXPROCS(runtime.NumCPU())
//VPL: disables unconditionnaly proxycache for now
instance.Core.DisabledFeatures["proxycache"] = struct{}{}
if _, ok = instance.Core.DisabledFeatures["proxycache"]; !ok {
target = install.NewHostTarget(pbutils.ToPBHost(gw))
feature, err = install.NewFeature("proxycache-server")
if err != nil {
goto cleanNetwork
}
results, err = feature.Add(target, install.Variables{}, install.Settings{})
if err != nil {
goto cleanNetwork
}
if !results.Successful() {
err = fmt.Errorf(results.AllErrorMessages())
goto cleanNetwork
}
}
// step 1: Launching reverseproxy installation on gateway, in parallel
rpChannel = make(chan error)
go instance.asyncInstallReverseProxy(gw, rpChannel)
// Step 2: starts master creation and nodes creation
err = instance.createMaster(nodesDef)
if err != nil {
goto cleanNetwork
}
// // step 2: configure master asynchronously
// masterChannel = make(chan error)
// go instance.asyncConfigureMasters(masterChannel)
// Step 3: starts node creation asynchronously
_, nodesStatus = instance.AddNodes(privateNodeCount, false, &nodesDef)
if nodesStatus != nil {
err = nodesStatus
goto cleanNodes
}
// Waits reverseproxy installation ended
rpStatus = <-rpChannel
if rpStatus != nil {
err = rpStatus
goto cleanNodes
}
// // Waits master configuretion ended
// masterStatus = <-masterChannel
// if masterStatus != nil {
// err = masterStatus
// goto cleanNodes
// }
// Cluster created and configured successfully, saving again to Metadata
err = instance.updateMetadata(func() error {
instance.Core.State = ClusterState.Created
return nil
})
if err != nil {
goto cleanNodes
}
// Get the state of the cluster until successful
err = retry.WhileUnsuccessfulDelay5Seconds(
func() error {
status, err := instance.ForceGetState()
if err != nil {
return err
}
if status != ClusterState.Nominal {
return fmt.Errorf("cluster is not ready for duty")
}
return nil
},
5*time.Minute,
)
if err != nil {
log.Println("failed to wait ready state of the cluster")
goto cleanNodes
}
return &instance, nil
cleanNodes:
if !req.KeepOnFailure {
broker.Host.Delete(instance.Core.PublicNodeIDs, brokerclient.DefaultExecutionTimeout)
broker.Host.Delete(instance.Core.PrivateNodeIDs, brokerclient.DefaultExecutionTimeout)
broker.Host.Delete(instance.manager.MasterIDs, brokerclient.DefaultExecutionTimeout)
}
cleanNetwork:
if !req.KeepOnFailure {
broker.Network.Delete([]string{instance.Core.NetworkID}, brokerclient.DefaultExecutionTimeout)
instance.metadata.Delete()
}
return nil, err
}
// createMaster creates an host acting as a master in the cluster
func (c *Cluster) createMaster(req pb.HostDefinition) error {
log.Println("[Master #1] starting creation...")
// Create the host
var err error
req.Name = c.Core.Name + "-master-1"
req.Public = false
req.Network = c.Core.NetworkID
host, err := brokerclient.New().Host.Create(req, 0)
if err != nil {
log.Printf("[Master #1] creation failed: %s\n", err.Error())
return err
}
// Registers the new master in the cluster struct
err = c.updateMetadata(func() error {
c.manager.MasterIDs = append(c.manager.MasterIDs, host.ID)
c.manager.MasterIPs = append(c.manager.MasterIPs, host.PrivateIP)
return nil
})
if err != nil {
c.manager.MasterIDs = c.manager.MasterIDs[:len(c.manager.MasterIDs)-1]
c.manager.MasterIPs = c.manager.MasterIPs[:len(c.manager.MasterIPs)-1]
brokerclient.New().Host.Delete([]string{host.ID}, brokerclient.DefaultExecutionTimeout)
log.Printf("[Master #1] creation failed: %s", err.Error())
return err
}
target := install.NewHostTarget(host)
//VPL: for now, disables unconditionally proxycache
c.Core.DisabledFeatures["proxycache"] = struct{}{}
if _, ok := c.Core.DisabledFeatures["proxycache"]; !ok {
// install proxycache-client feature
feature, err := install.NewFeature("proxycache-client")
if err != nil {
log.Printf("[master #%d (%s)] failed to prepare feature 'proxycache-client': %s", 1, host.ID, err.Error())
return fmt.Errorf("failed to install feature 'proxycache-client': %s", err.Error())
}
results, err := feature.Add(target, install.Variables{}, install.Settings{})
if err != nil {
log.Printf("[master #%d (%s)] failed to install feature '%s': %s\n", 1, host.Name, feature.DisplayName(), err.Error())
return fmt.Errorf("failed to install feature '%s' on host '%s': %s", feature.DisplayName(), host.Name, err.Error())
}
if !results.Successful() {
msg := results.AllErrorMessages()
log.Printf("[master #%d (%s)] failed to install feature '%s': %s", 1, host.Name, feature.DisplayName(), msg)
return fmt.Errorf(msg)
}
}
// Installs BOH requirements...
installCommonRequirements, err := c.getInstallCommonRequirements()
if err != nil {
return err
}
data := map[string]interface{}{
"InstallCommonRequirements": *installCommonRequirements,
"CladmPassword": c.Core.AdminPassword,
}
box, err := getBOHTemplateBox()
if err != nil {
return err
}
retcode, _, _, err := flavortools.ExecuteScript(box, funcMap, "boh_install_master.sh", data, host.ID)
if err != nil {
log.Printf("[master #%d (%s)] failed to remotely run installation script: %s\n", 1, host.Name, err.Error())
return err
}
if retcode != 0 {
if retcode == 255 {
log.Printf("[master #%d (%s)] remote connection failed", 1, host.Name)
return fmt.Errorf("remote connection failed on master '%s'", host.Name)
}
if retcode < int(ErrorCode.NextErrorCode) {
errcode := ErrorCode.Enum(retcode)
log.Printf("[master #%d (%s)] installation failed:\nretcode=%d (%s)", 1, host.Name, errcode, errcode.String())
return fmt.Errorf("scripted installation failed on master '%s' (retcode=%d=%s)", host.Name, errcode, errcode.String())
}
log.Printf("[master #%d (%s)] installation failed (retcode=%d)", 1, host.Name, retcode)
return fmt.Errorf("scripted installation failed on master '%s' (retcode=%d)", host.Name, retcode)
}
// install docker feature
log.Printf("[master #%d (%s)] adding feature 'docker'", 1, host.Name)
feature, err := install.NewFeature("docker")
if err != nil {
log.Printf("[master #%d (%s)] failed to prepare feature 'docker': %s", 1, host.ID, err.Error())
return fmt.Errorf("failed to add feature 'docker': %s", err.Error())
}
results, err := feature.Add(target, install.Variables{}, install.Settings{})
if err != nil {
log.Printf("[master #%d (%s)] failed to add feature '%s': %s\n", 1, host.Name, feature.DisplayName(), err.Error())
return fmt.Errorf("failed to add feature '%s' on host '%s': %s", feature.DisplayName(), host.Name, err.Error())
}
if !results.Successful() {
msg := results.AllErrorMessages()
log.Printf("[master #%d (%s)] failed to add feature '%s': %s", 1, host.Name, feature.DisplayName(), msg)
return fmt.Errorf(msg)
}
log.Printf("[master #%d (%s)] feature 'docker' added successfully\n", 1, host.Name)
log.Printf("[master #%d (%s)] creation successful", 1, host.Name)
return nil
}
// func (c *Cluster) createNodes(count int, public bool, def pb.HostDefinition) error {
// var countS string
// if count > 1 {
// countS = "s"
// }
// var nodeType NodeType.Enum
// var nodeTypeStr string
// if public {
// nodeType = NodeType.PublicNode
// nodeTypeStr = "public"
// } else {
// nodeType = NodeType.PrivateNode
// nodeTypeStr = "private"
// }
// fmt.Printf("Creating %d %s Node%s...\n", count, nodeTypeStr, countS)
// var dones []chan error
// var results []chan string
// for i := 1; i <= count; i++ {
// d := make(chan error)
// dones = append(dones, d)
// r := make(chan string)
// results = append(results, r)
// go c.asyncCreateNode(i, nodeType, def, r, d)
// }
// var state error
// var errors []string
// for i := range dones {
// <-results[i]
// state = <-dones[i]
// if state != nil {
// errors = append(errors, state.Error())
// }
// }
// if len(errors) > 0 {
// return fmt.Errorf(strings.Join(errors, "\n"))
// }
// return nil
// }
// asyncCreateNode creates a Node in the cluster
// This function is intended to be call as a goroutine
func (c *Cluster) asyncCreateNode(
index int, nodeType NodeType.Enum, def pb.HostDefinition, timeout time.Duration,
result chan string, done chan error,
) {
var publicIP bool
var nodeTypeStr string
if nodeType == NodeType.PublicNode {
nodeTypeStr = "public"
publicIP = true
} else {
nodeTypeStr = "private"
publicIP = false
}
log.Printf("[%s node #%d] starting creation...\n", nodeTypeStr, index)
// Create the host
var err error
def.Name, err = c.buildHostname("node", nodeType)
if err != nil {
log.Printf("[%s node #%d] creation failed: %s\n", nodeTypeStr, index, err.Error())
result <- ""
done <- err
return
}
def.Public = publicIP
def.Network = c.Core.NetworkID
host, err := brokerclient.New().Host.Create(def, timeout)
if err != nil {
log.Printf("[%s node #%d] creation failed: %s\n", nodeTypeStr, index, err.Error())
result <- ""
done <- err
return
}
// Registers the new Agent in the cluster struct
err = c.updateMetadata(func() error {
if nodeType == NodeType.PublicNode {
c.Core.PublicNodeIDs = append(c.Core.PublicNodeIDs, host.ID)
c.manager.PublicNodeIPs = append(c.manager.PublicNodeIPs, host.PrivateIP)
} else {
c.Core.PrivateNodeIDs = append(c.Core.PrivateNodeIDs, host.ID)
c.manager.PrivateNodeIPs = append(c.manager.PrivateNodeIPs, host.PrivateIP)
}
return nil
})
if err != nil {
// Removes the ID we just added to the cluster struct
if nodeType == NodeType.PublicNode {
c.Core.PublicNodeIDs = c.Core.PublicNodeIDs[:len(c.Core.PublicNodeIDs)-1]
c.manager.PublicNodeIPs = c.manager.PublicNodeIPs[:len(c.manager.PublicNodeIPs)-1]
} else {
c.Core.PrivateNodeIDs = c.Core.PrivateNodeIDs[:len(c.Core.PrivateNodeIDs)-1]
c.manager.PrivateNodeIPs = c.manager.PrivateNodeIPs[:len(c.manager.PrivateNodeIPs)-1]
}
brokerclient.New().Host.Delete([]string{host.ID}, brokerclient.DefaultExecutionTimeout)
log.Printf("[%s node #%d] creation failed: %s", nodeTypeStr, index, err.Error())
result <- ""
done <- fmt.Errorf("failed to update Cluster configuration: %s", err.Error())
return
}
target := install.NewHostTarget(host)
//VPL: for now disables unconditionnaly the proxycache
c.Core.DisabledFeatures["proxycache"] = struct{}{}
if _, ok := c.Core.DisabledFeatures["proxycache"]; !ok {
// install proxycache-client feature
feature, err := install.NewFeature("proxycache-client")
if err != nil {
log.Printf("[%s node #%d (%s)] failed to prepare feature 'proxycache-client': %s", nodeTypeStr, index, host.ID, err.Error())
done <- fmt.Errorf("failed to add feature 'proxycache-client': %s", err.Error())
return
}
results, err := feature.Add(target, install.Variables{}, install.Settings{})
if err != nil {
log.Printf("[%s node #%d (%s)] failed to add feature '%s': %s\n", nodeTypeStr, index, host.Name, feature.DisplayName(), err.Error())
done <- fmt.Errorf("failed to add feature '%s' on host '%s': %s", feature.DisplayName(), host.Name, err.Error())
return
}
if !results.Successful() {
msg := results.AllErrorMessages()
log.Printf("[%s node #%d (%s)] failed to add feature '%s': %s", nodeTypeStr, index, host.Name, feature.DisplayName(), msg)
done <- fmt.Errorf(msg)
return
}
}
// Installs BOH requirements
installCommonRequirements, err := c.getInstallCommonRequirements()
if err != nil {
done <- err
return
}
data := map[string]interface{}{
"InstallCommonRequirements": *installCommonRequirements,
"CladmPassword": c.Core.AdminPassword,
}
box, err := getBOHTemplateBox()
if err != nil {
done <- err
return
}
retcode, _, _, err := flavortools.ExecuteScript(box, funcMap, "boh_install_node.sh", data, host.ID)
if err != nil {
log.Printf("[%s node #%d (%s)] failed to remotely run installation script: %s\n", nodeTypeStr, index, host.Name, err.Error())
result <- ""
done <- err
return
}
if retcode != 0 {
result <- ""
if retcode < int(ErrorCode.NextErrorCode) {
errcode := ErrorCode.Enum(retcode)
log.Printf("[%s node #%d (%s)] installation failed: retcode: %d (%s)", nodeTypeStr, index, host.Name, errcode, errcode.String())
done <- fmt.Errorf("scripted Node configuration failed with error code %d (%s)", errcode, errcode.String())
} else {
log.Printf("[%s node #%d (%s)] installation failed: retcode=%d", nodeTypeStr, index, host.Name, retcode)
done <- fmt.Errorf("scripted Agent configuration failed with error code %d", retcode)
}
return
}
// add docker feature
log.Printf("[%s node #%d (%s)] adding feature 'docker'...\n", nodeTypeStr, index, host.Name)
feature, err := install.NewFeature("docker")
if err != nil {
log.Printf("[%s node #%d (%s)] failed to prepare feature 'docker': %s", nodeTypeStr, index, host.Name, err.Error())
done <- fmt.Errorf("failed to add feature 'docker': %s", err.Error())
return
}
results, err := feature.Add(target, install.Variables{}, install.Settings{})
if err != nil {
log.Printf("[%s node #%d (%s)] failed to add feature '%s': %s\n", nodeTypeStr, index, host.Name, feature.DisplayName(), err.Error())
done <- fmt.Errorf("failed to add feature '%s' on host '%s': %s", feature.DisplayName(), host.Name, err.Error())
return
}
if !results.Successful() {
msg := results.AllErrorMessages()
log.Printf("[%s node #%d (%s)] failed to add feature '%s': %s", nodeTypeStr, index, host.Name, feature.DisplayName(), msg)
done <- fmt.Errorf("failed to add feature '%s' on host '%s': %s", feature.DisplayName(), host.Name, msg)
return
}
log.Printf("[%s node #%d (%s)] feature 'docker' installed successfully.\n", nodeTypeStr, index, host.Name)
log.Printf("[%s node #%d (%s)] creation successful\n", nodeTypeStr, index, host.Name)
result <- host.Name
done <- nil
}
// getBOHTemplateBox
func getBOHTemplateBox() (*rice.Box, error) {
var b *rice.Box
var err error
if bohTemplateBox == nil {
// Note: path MUST be literal for rice to work
b, err = rice.FindBox("../boh/scripts")
if err != nil {
return nil, err
}
bohTemplateBox = b
}
return bohTemplateBox, nil
}
// getInstallCommonRequirements returns the string corresponding to the script dcos_install_requirements.sh
// which installs common features (docker in particular)
func (c *Cluster) getInstallCommonRequirements() (*string, error) {
if installCommonRequirementsContent == nil {
// find the rice.Box
b, err := getBOHTemplateBox()
if err != nil {
return nil, err
}
// get file contents as string
tmplString, err := b.String("boh_install_requirements.sh")
if err != nil {
return nil, fmt.Errorf("error loading script template: %s", err.Error())
}
// parse then execute the template
tmplPrepared, err := txttmpl.New("install_requirements").Funcs(template.MergeFuncs(funcMap, false)).Parse(tmplString)
if err != nil {
return nil, fmt.Errorf("error parsing script template: %s", err.Error())
}
dataBuffer := bytes.NewBufferString("")
err = tmplPrepared.Execute(dataBuffer, map[string]interface{}{
"CIDR": c.Core.CIDR,
"CladmPassword": c.Core.AdminPassword,
"SSHPublicKey": c.Core.Keypair.PublicKey,
"SSHPrivateKey": c.Core.Keypair.PrivateKey,
})
if err != nil {
return nil, fmt.Errorf("error realizing script template: %s", err.Error())
}
result := dataBuffer.String()
installCommonRequirementsContent = &result
}
return installCommonRequirementsContent, nil
}
// buildHostname builds a unique hostname in the cluster
func (c *Cluster) buildHostname(core string, nodeType NodeType.Enum) (string, error) {
var (
index int
coreName string
)
switch nodeType {
case NodeType.PublicNode:
coreName = "pub" + core
case NodeType.PrivateNode:
coreName = core
default:
return "", fmt.Errorf("Invalid Node Type '%v'", nodeType)
}
err := c.updateMetadata(func() error {
switch nodeType {
case NodeType.PublicNode:
c.manager.PublicLastIndex++
index = c.manager.PublicLastIndex
case NodeType.PrivateNode:
c.manager.PrivateLastIndex++
index = c.manager.PrivateLastIndex
}
return nil
})
if err != nil {
return "", err
}
return c.Core.Name + "-" + coreName + "-" + strconv.Itoa(index), nil
}
// asyncInstallReverseProxy installs the feature reverseproxy on network gateway
func (c *Cluster) asyncInstallReverseProxy(host *model.Host, done chan error) {
sshCfg, err := brokerclient.New().Host.SSHConfig(host.ID)
if err != nil {
done <- err
return
}
err = sshCfg.WaitServerReady(5 * time.Minute)
if err != nil {
done <- err
return
}
target := install.NewHostTarget(pbutils.ToPBHost(host))
feature, err := install.NewFeature("reverseproxy")
if err != nil {
done <- err
return
}
results, err := feature.Add(target, install.Variables{}, install.Settings{})
if err != nil {
done <- fmt.Errorf("failed to execute installation of feature '%s' on host '%s': %s", feature.DisplayName(), host.Name, err.Error())
return
}
if !results.Successful() {
done <- fmt.Errorf("failed to install feature '%s' on host '%s': %s", feature.DisplayName(), host.Name, results.AllErrorMessages())
return
}
done <- nil
}
// // asyncConfigureMasters configure masters
// func (c *Cluster) asyncConfigureMasters(done chan error) {
// var (
// dones []chan error
// errors []string
// )
// for i, id := range c.manager.MasterIDs {
// d := make(chan error)
// dones = append(dones, d)
// go c.asyncConfigureMaster(i, id, d)
// }
// for _, d := range dones {
// err := <-d
// if err != nil {
// errors = append(errors, err.Error())
// }
// }
// if len(errors) > 0 {
// msg := strings.Join(errors, "\n")
// done <- fmt.Errorf("failed to configure masters: %s", msg)
// return
// }
// log.Println("Masters configured successfully")
// done <- nil
// }
// // asyncConfigureMaster configure one master
// func (c *Cluster) asyncConfigureMaster(index int, id string, done chan error) {
// log.Printf("[master #%d] starting configuration...\n", index+1)
// // remotedekstop is a feature, can be added after master creation; should be automatically install
// // in perform, not deploy
// // Installs remotedesktop feature on host
// feature, err := install.NewFeature("remotedesktop")
// if err != nil {
// log.Printf("[master #%d] failed to find feature 'remotedesktop': %s\n", index, err.Error())
// done <- fmt.Errorf("[master #%d] %s", index+1, err.Error())
// return
// }
// host, err := brokerclient.New(50051).Host.Inspect(id, brokerclient.DefaultExecutionTimeout)
// if err != nil {
// done <- fmt.Errorf("[master #%d] %s", index+1, err.Error())
// return
// }
// target := install.NewHostTarget(host)
// results, err := feature.Add(target, install.Variables{
// "GatewayIP": c.Core.GatewayIP,
// "Hostname": host.Name,
// "HostIP": host.PRIVATE_IP,
// "Username": "cladm",
// "Password": c.Core.AdminPassword,
// }, install.Settings{})
// if err != nil {
// done <- fmt.Errorf("[master #%d (%s)] failed to install feature '%s': %s", index, host.Name, feature.DisplayName(), err.Error())
// return
// }
// if !results.Successful() {
// msg := results.AllErrorMessages()
// log.Printf("[master #%d (%s)] installation script of feature '%s' failed: %s\n", index, host.Name, feature.DisplayName(), msg)
// done <- fmt.Errorf(msg)
// return
// }
// log.Printf("[master #%d (%s)] configuration successful\n", index, host.Name)
// done <- nil
// }
// GetName returns the name of the cluster
func (c *Cluster) GetName() string {
return c.Core.Name
}
// Start starts the cluster named 'name'
// In BOH, cluster state is logical, there is no way to stop a BOH cluster (except by stopping the hosts)
func (c *Cluster) Start() error {
state, err := c.ForceGetState()
if err != nil {
return err
}
if state == ClusterState.Stopped {
return c.updateMetadata(func() error {
c.Core.State = ClusterState.Nominal
return nil
})
}
if state != ClusterState.Nominal && state != ClusterState.Degraded {
return fmt.Errorf("failed to start cluster because of it's current state: %s", state.String())
}
return nil
}
// Stop stops the cluster is its current state is compatible
func (c *Cluster) Stop() error {
state, _ := c.ForceGetState()
if state == ClusterState.Nominal || state == ClusterState.Degraded {
return c.Stop()
}
if state != ClusterState.Stopped {
return fmt.Errorf("failed to stop cluster because of it's current state: %s", state.String())
}
return nil
}
//GetState returns the current state of the cluster
func (c *Cluster) GetState() (ClusterState.Enum, error) {
now := time.Now()
if now.After(c.lastStateCollection.Add(c.manager.StateCollectInterval)) {
return c.ForceGetState()
}
return c.Core.State, nil
}
// ForceGetState returns the current state of the cluster
// Does nothing currently...
func (c *Cluster) ForceGetState() (ClusterState.Enum, error) {
c.updateMetadata(func() error {
c.Core.State = ClusterState.Nominal
c.lastStateCollection = time.Now()
return nil
})
return c.Core.State, nil
}
// AddNode adds one node
func (c *Cluster) AddNode(public bool, req *pb.HostDefinition) (string, error) {
hosts, err := c.AddNodes(1, public, req)
if err != nil {
return "", err
}
return hosts[0], nil
}
// AddNodes adds <count> nodes
func (c *Cluster) AddNodes(count int, public bool, req *pb.HostDefinition) ([]string, error) {
hostDef := c.GetConfig().NodesDef
if req != nil {
if req.CPUNumber > 0 {
hostDef.CPUNumber = req.CPUNumber
}
if req.RAM > 0.0 {
hostDef.RAM = req.RAM
}
if req.Disk > 0 {
hostDef.Disk = req.Disk
}
}
var nodeType NodeType.Enum
if public {
nodeType = NodeType.PublicNode
} else {
nodeType = NodeType.PrivateNode
}
var hosts []string
var errors []string
var dones []chan error
var results []chan string
timeout := brokerclient.DefaultExecutionTimeout + time.Duration(count)*time.Minute
for i := 0; i < count; i++ {
r := make(chan string)
results = append(results, r)
d := make(chan error)
dones = append(dones, d)
go c.asyncCreateNode(i+1, nodeType, hostDef, timeout, r, d)
}
for i := range dones {
hostName := <-results[i]
if hostName != "" {
hosts = append(hosts, hostName)
}
err := <-dones[i]
if err != nil {
errors = append(errors, err.Error())
}
}
if len(errors) > 0 {
if len(hosts) > 0 {
brokerclient.New().Host.Delete(hosts, brokerclient.DefaultExecutionTimeout)
}
return nil, fmt.Errorf("errors occured on node addition: %s", strings.Join(errors, "\n"))
}
return hosts, nil
}
// DeleteLastNode deletes the last Agent node added
func (c *Cluster) DeleteLastNode(public bool) error {
var hostID string
if public {
hostID = c.Core.PublicNodeIDs[len(c.Core.PublicNodeIDs)-1]
} else {
hostID = c.Core.PrivateNodeIDs[len(c.Core.PrivateNodeIDs)-1]
}
err := brokerclient.New().Host.Delete([]string{hostID}, brokerclient.DefaultExecutionTimeout)
if err != nil {
return nil
}
return c.updateMetadata(func() error {
if public {
c.Core.PublicNodeIDs = c.Core.PublicNodeIDs[:len(c.Core.PublicNodeIDs)-1]
} else {
c.Core.PrivateNodeIDs = c.Core.PrivateNodeIDs[:len(c.Core.PrivateNodeIDs)-1]
}
return nil
})
}
// DeleteSpecificNode deletes the node specified by its ID
func (c *Cluster) DeleteSpecificNode(hostID string) error {
var foundInPrivate bool
foundInPublic, idx := contains(c.Core.PublicNodeIDs, hostID)
if !foundInPublic {
foundInPrivate, idx = contains(c.Core.PrivateNodeIDs, hostID)
}
if !foundInPublic && !foundInPrivate {
return fmt.Errorf("host '%s' isn't a registered Node of the Cluster '%s'", hostID, c.Core.Name)
}
err := brokerclient.New().Host.Delete([]string{hostID}, brokerclient.DefaultExecutionTimeout)
if err != nil {
return err
}
return c.updateMetadata(func() error {
if foundInPublic {
c.Core.PublicNodeIDs = append(c.Core.PublicNodeIDs[:idx], c.Core.PublicNodeIDs[idx+1:]...)
} else {
c.Core.PrivateNodeIDs = append(c.Core.PrivateNodeIDs[:idx], c.Core.PrivateNodeIDs[idx+1:]...)
}
return nil
})
}
// ListMasterIDs lists the master nodes in the cluster
// No masters in BOH...
func (c *Cluster) ListMasterIDs() []string {
return c.manager.MasterIDs
}
// ListMasterIPs lists the master nodes in the cluster
// No masters in BOH...
func (c *Cluster) ListMasterIPs() []string {
return c.manager.MasterIPs
}
// ListNodeIDs lists the IDs of the nodes in the cluster
func (c *Cluster) ListNodeIDs(public bool) []string {
if public {
return c.Core.PublicNodeIDs
}
return c.Core.PrivateNodeIDs
}
// ListNodeIPs lists the IPs of the nodes in the cluster
func (c *Cluster) ListNodeIPs(public bool) []string {
if public {
return c.manager.PublicNodeIPs
}
return c.manager.PrivateNodeIPs
}
// GetNode returns a node based on its ID
func (c *Cluster) GetNode(hostID string) (*pb.Host, error) {
found, _ := contains(c.Core.PublicNodeIDs, hostID)
if !found {
found, _ = contains(c.Core.PrivateNodeIDs, hostID)
}
if !found {
return nil, fmt.Errorf("failed to find node '%s' in cluster '%s'", hostID, c.Core.Name)
}
return brokerclient.New().Host.Inspect(hostID, brokerclient.DefaultExecutionTimeout)
}
func contains(list []string, hostID string) (bool, int) {
var idx int
found := false
for i, v := range list {
if v == hostID {
found = true
idx = i
break
}
}
return found, idx
}
// SearchNode tells if an host ID corresponds to a node of the cluster
func (c *Cluster) SearchNode(hostID string, public bool) bool {
found, _ := contains(c.Core.PublicNodeIDs, hostID)
if !found {
found, _ = contains(c.Core.PrivateNodeIDs, hostID)
}
return found
}
// GetConfig returns the public properties of the cluster
func (c *Cluster) GetConfig() clusterapi.ClusterCore {
return *c.Core
}
// FindAvailableMaster returns the ID of the first master available for execution
func (c *Cluster) FindAvailableMaster() (string, error) {
masterID := ""
found := false
brokerCltHost := brokerclient.New().Host
for _, masterID = range c.manager.MasterIDs {
sshCfg, err := brokerCltHost.SSHConfig(masterID)
if err != nil {
log.Errorf("failed to get ssh config for master '%s': %s", masterID, err.Error())
continue
}
err = sshCfg.WaitServerReady(2 * time.Minute)
if err != nil {
if _, ok := err.(retry.ErrTimeout); ok {
continue
}
return "", err
}
found = true
break
}
if !found {
return "", fmt.Errorf("failed to find available master")
}
return masterID, nil
}
// FindAvailableNode returns the ID of a node available
func (c *Cluster) FindAvailableNode(public bool) (string, error) {
hostID := ""
found := false
brokerCltHost := brokerclient.New().Host
for _, hostID = range c.ListNodeIDs(public) {
sshCfg, err := brokerCltHost.SSHConfig(hostID)
if err != nil {
return "", err
}
err = sshCfg.WaitServerReady(5 * time.Minute)
if err != nil {
if _, ok := err.(retry.ErrTimeout); ok {
continue
}
return "", err
}
found = true
break
}
if !found {
return "", fmt.Errorf("failed to find available node")
}
return hostID, nil
}
// updateMetadata writes cluster config in Object Storage
func (c *Cluster) updateMetadata(updatefn func() error) error {
if c.metadata == nil {
m, err := metadata.NewCluster()
if err != nil {
return err
}
m.Carry(c.Core)
c.metadata = m
c.metadata.Acquire()
} else {
c.metadata.Acquire()
c.Reload()
}
if updatefn != nil {
err := updatefn()
if err != nil {
c.metadata.Release()
return err
}
}
err := c.metadata.Write()
c.metadata.Release()
return err
}
// Delete destroys everything related to the infrastructure built for the cluster
func (c *Cluster) Delete() error {
if c.metadata == nil {
return fmt.Errorf("no metadata found for this cluster")
}
// Updates metadata
err := c.updateMetadata(func() error {
c.Core.State = ClusterState.Removed
return nil
})
if err != nil {
return err
}
broker := brokerclient.New()
// Deletes the public nodes
broker.Host.Delete(c.Core.PublicNodeIDs, brokerclient.DefaultExecutionTimeout)
// Deletes the private nodes
broker.Host.Delete(c.Core.PrivateNodeIDs, brokerclient.DefaultExecutionTimeout)
// Delete the Masters
broker.Host.Delete(c.manager.MasterIDs, brokerclient.DefaultExecutionTimeout)
// Deletes the network and gateway
err = broker.Network.Delete([]string{c.Core.NetworkID}, brokerclient.DefaultExecutionTimeout)
if err != nil {
return err
}
// Deletes the metadata
err = c.metadata.Delete()
if err != nil {
return nil
}
c.metadata = nil
c.Core = nil
c.manager = nil
return nil
}
func init() {
gob.Register(Cluster{})
gob.Register(managerData{})
}
|
package bardo
import (
"regexp"
)
var dbNameR = regexp.MustCompile(`dbname\=([^\s]+?)\s`)
// GetDBNameFromURL
// Get the database name from the open string
func GetDBNameFromURL(url string) string {
// Find database name (that we will create)
m := dbNameR.FindStringSubmatch(url)
return m[1]
}
// ReplaceDBNameInURL
// Replace the database name in the open string
func ReplaceDBNameInURL(url string, dbname string) string {
// Find database name (that we will create)
return dbNameR.ReplaceAllString(url, "dbname="+dbname+" ")
}
|
package parcels
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"spWebFront/FrontKeeper/infrastructure/bolt"
"spWebFront/FrontKeeper/infrastructure/core"
"spWebFront/FrontKeeper/infrastructure/memfile"
"spWebFront/FrontKeeper/server/app/domain/service/searcher/document"
"strings"
"testing"
"github.com/adverax/echo/generic"
"github.com/stretchr/testify/require"
)
func getRawFile() ([]map[string]interface{}, error) {
name := "testdata/drugs.json"
data, err := ioutil.ReadFile(name)
if err != nil {
return nil, err
}
var items []map[string]interface{}
err = core.JsonUnmarshal(data, &items)
if err != nil {
return nil, err
}
return items, nil
}
func makeDrugsFile(items []map[string]interface{}) error {
name := "testdata/drugs.csv"
f, err := os.Create(name)
if err != nil {
return err
}
defer f.Close()
for _, item := range items {
val, ok := item["search_index"]
if !ok {
continue
}
s, ok := core.ConvertToString(val)
if !ok {
continue
}
_, err := f.WriteString(s + "\n")
if err != nil {
return err
}
}
return nil
}
func readPolling() (map[string][]string, error) {
dir := "testdata/poll"
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
res := make(map[string][]string)
for _, file := range files {
if file.IsDir() {
continue
}
name := file.Name()
data, err := ioutil.ReadFile(filepath.Join(dir, name))
if err != nil {
return nil, err
}
vs := strings.Split(string(data), "\n")
res[name] = vs
}
return res, nil
}
func readDrugs() ([]string, error) {
name := "testdata/drugs.csv"
data, err := ioutil.ReadFile(name)
if err != nil {
return nil, err
}
return strings.Split(string(data), "\n"), nil
}
func TestSearcher(t *testing.T) {
/*{
// Convert drugs.json to drugs.csv
items, err := getRawFile()
require.NoError(t, err)
err = makeDrugsFile(items)
require.NoError(t, err)
}*/
// Create database file in memory
fd, err := memfile.New("drugs", []byte("Hello word"))
if err != nil {
log.Fatalf("memfile: %v", err)
}
fp := fmt.Sprintf("/proc/self/fd/%d", fd)
f := os.NewFile(uintptr(fd), fp)
defer f.Close()
err = ioutil.WriteFile(fp, []byte{}, 666)
require.NoError(t, err)
// Create database, based on virtual file
db, err := bolt.Open(fp, nil)
require.NoError(t, err)
// Create searcher
documents := document.New()
searcher, err := newTestSearcher(db, documents)
require.NoError(t, err)
// Import raw data
raw, err := readDrugs()
require.NoError(t, err)
var defs []*DocDef
for i, r := range raw {
id := fmt.Sprintf("%d", i)
doc := document.Doc{
Id: id,
Name: r,
Drug: id,
GroupIndex: r,
SearchIndex: r,
NameInn: r,
InnIndex: r,
ParcelCode: id,
ParcelCodeEx: id,
BarCode: "",
Weight: 1,
Analog: "",
}
body, err := json.Marshal(doc)
require.NoError(t, err)
defs = append(
defs,
&DocDef{
Head: doc,
Body: string(body),
},
)
}
err = searcher.UpdateAll(context.Background(), defs, true)
require.NoError(t, err)
// Execute tests
tests, err := readPolling()
require.NoError(t, err)
infinite := float32(len(raw))
var es []float32
// ExtendsTuner(true, nil)
details := new(Details)
searcher.InitDetails(details)
details.Tuner = ExtendsTuner(true, nil)
for query, expected := range tests {
res, err := searcher.Search(
context.Background(),
query,
"name",
details,
)
require.NoError(t, err)
actual, err := extractResults(res)
require.NoError(t, err)
e := estimateResults(expected, actual, infinite)
log.Printf("Estimation for %q is %g", query, e)
es = append(es, e)
}
e := estimateEstimations(es)
t.Error("Average poll distance is ", e)
}
func newTestSearcher(
db bolt.DB,
documents document.Manager,
) (Manager, error) {
options := DefaultStrategyOptions()
return NewManager(
db,
documents,
DefaultStrategyFactory(
options,
),
ManagerOptions{
Band: options.Band,
},
Rus,
)
}
func extractResults(res Results) ([]string, error) {
var rs []string
for _, r := range res {
var drug map[string]interface{}
err := core.JsonUnmarshal(r.Document, &drug)
if err != nil {
return nil, err
}
val, ok := drug["search_index"]
if !ok {
return nil, errors.New("search_index not found")
}
name, _ := core.ConvertToString(val)
rs = append(rs, name)
}
return rs, nil
}
func estimateResults(expected, actual []string, infinite float32) float32 {
var es []float32
for _, s := range expected {
e := indexOf(s, actual)
if e < 0 {
es = append(es, infinite)
} else {
es = append(es, float32(e))
}
}
return estimateEstimations(es)
}
func estimateEstimations(es []float32) float32 {
return avg(es)
if len(es) == 0 {
return 0
}
var ee float32
a := avg(es)
for _, e := range es {
x := a - e
ee += x * x
}
return ee / float32(len(es))
}
func indexOf(s string, ss []string) int {
for i, as := range ss {
if as == s {
return i
}
}
return -1
}
func avg(es []float32) float32 {
if len(es) == 0 {
return 0
}
var sum float32
for _, e := range es {
sum += e
}
return sum / float32(len(es))
}
|
package ibmcloud
// Platform stores all the global configuration that all machinesets use.
type Platform struct {
// Region specifies the IBM Cloud region where the cluster will be
// created.
Region string `json:"region"`
// ResourceGroupName is the name of an already existing resource group where the
// cluster should be installed. If empty, a new resource group will be created
// for the cluster.
// +optional
ResourceGroupName string `json:"resourceGroupName,omitempty"`
// NetworkResourceGroupName is the name of an already existing resource group
// where an existing VPC and set of Subnets exist, to be used during cluster
// creation.
// +optional
NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"`
// VPCName is the name of an already existing VPC to be used during cluster
// creation.
// +optional
VPCName string `json:"vpcName,omitempty"`
// ControlPlaneSubnets are the names of already existing subnets where the
// cluster control plane nodes should be created.
// +optional
ControlPlaneSubnets []string `json:"controlPlaneSubnets,omitempty"`
// ComputeSubnets are the names of already existing subnets where the cluster
// compute nodes should be created.
// +optional
ComputeSubnets []string `json:"computeSubnets,omitempty"`
// DefaultMachinePlatform is the default configuration used when installing
// on IBM Cloud for machine pools which do not define their own platform
// configuration.
// +optional
DefaultMachinePlatform *MachinePool `json:"defaultMachinePlatform,omitempty"`
}
// ClusterResourceGroupName returns the name of the resource group for the cluster.
func (p *Platform) ClusterResourceGroupName(infraID string) string {
if len(p.ResourceGroupName) > 0 {
return p.ResourceGroupName
}
return infraID
}
// GetVPCName returns the user provided name of the VPC for the cluster.
func (p *Platform) GetVPCName() string {
if len(p.VPCName) > 0 {
return p.VPCName
}
return ""
}
|
package conf
import (
"io"
"os"
"sort"
"strings"
"github.com/spf13/viper"
"github.com/bolaxy/common/hexutil"
"github.com/bolaxy/crypto"
"github.com/bolaxy/rlp"
)
type Genesis struct {
CoinBase string `mapstructure:"coinbase"`
ChainID string `mapstructure:"chain-id"`
ConsensusAccounts []string `mapstructure:"consensus-accounts"`
Alloc []Alloc `mapstructure:"alloc"`
Poa *PoaMap `mapstructure:"poa"`
Launcher *PoaMap `mapstructure:"launcher"`
}
type Alloc struct {
Account string `mapstructure:"account"`
Balance string `mapstructure:"balance"`
Code string `mapstructure:"code"`
Storage map[string]string `mapstructure:"storage"`
Authorising bool `mapstructure:"authorising"`
}
type PoaMap struct {
Address string `mapstructure:"address"`
Balance string `mapstructure:"balance"`
Abi string `mapstructure:"abi"`
SubAbi string `mapstructure:"subabi"`
Code string `mapstructure:"code"`
Storage map[string]string `mapstructure:"storage"`
}
type genesis struct {
ChainID string
ExtraData string
ConsensusAccounts []string `rlp:"nil"`
Allocs []alloc `rlp:"nil"`
Poa *poaMap `rlp:"nil"`
Launcher *poaMap `rlp:"nil"`
}
type alloc struct {
Account string
Balance string
Code string
Storage [][2]string `rlp:"nil"`
Authorising bool
}
type poaMap struct {
Address string
Balance string
ABI string
SubAbi string
Code string
Storage [][2]string `rlp:"nil"`
}
func (g *Genesis) Hash() ([]byte, error) {
if g == nil {
return nil, nil
}
buf, err := EncodeRLPGenesis(g)
if err != nil {
return nil, err
}
return crypto.Keccak256(buf), nil
}
func (g *Genesis) HexHash() (string, error) {
if g == nil {
return "", nil
}
hash, err := g.Hash()
if err != nil {
return "", err
}
return hexutil.Encode(hash), nil
}
func (g *Genesis) EncodeRLP(w io.Writer) error {
genesis := &genesis{
ChainID: g.ChainID,
ConsensusAccounts: g.ConsensusAccounts,
}
if len(g.Alloc) > 0 {
genesis.Allocs = translateFromAlloc(g.Alloc)
}
if g.Poa != nil {
genesis.Poa = translateFromPoaMap(g.Poa)
}
if g.Launcher != nil {
genesis.Launcher = translateFromPoaMap(g.Launcher)
}
return rlp.Encode(w, genesis)
}
func translateFromAlloc(original []Alloc) []alloc {
allocs := make([]alloc, 0, len(original))
for _, a := range original {
item := alloc{
Account: a.Account,
Balance: a.Balance,
Code: a.Code,
Authorising: a.Authorising,
}
if len(a.Storage) > 0 {
item.Storage = translateFromStorage(a.Storage)
}
allocs = append(allocs, item)
}
return allocs
}
func translateFromPoaMap(original *PoaMap) (poa *poaMap) {
poa = &poaMap{
Address: original.Address,
Balance: original.Balance,
ABI: original.Abi,
SubAbi: original.SubAbi,
Code: original.Code,
}
if len(original.Storage) > 0 {
poa.Storage = translateFromStorage(original.Storage)
}
return
}
func translateFromStorage(store map[string]string) (target [][2]string) {
target = make([][2]string, 0, len(store))
for k, v := range store {
target = append(target, [2]string{k, v})
}
sort.Sort(alphabetic(target))
return
}
func (g *Genesis) DecodeRLP(s *rlp.Stream) error {
var genesis genesis
if err := s.Decode(&genesis); err != nil {
return err
}
g.ChainID = genesis.ChainID
g.ConsensusAccounts = genesis.ConsensusAccounts
if len(genesis.Allocs) > 0 {
g.Alloc = translateToAlloc(genesis.Allocs)
}
if genesis.Poa != nil {
g.Poa = translateToPoaMap(genesis.Poa)
}
if genesis.Launcher != nil {
g.Launcher = translateToPoaMap(genesis.Launcher)
}
return nil
}
func translateToAlloc(source []alloc) (target []Alloc) {
target = make([]Alloc, len(source))
for i, item := range source {
data := Alloc{
Account: item.Account,
Balance: item.Balance,
Code: item.Code,
Authorising: item.Authorising,
}
if len(item.Storage) > 0 {
data.Storage = translateToStorage(item.Storage)
}
target[i] = data
}
return
}
func translateToPoaMap(source *poaMap) (target *PoaMap) {
target = &PoaMap{
Address: source.Address,
Balance: source.Balance,
Abi: source.ABI,
SubAbi: source.SubAbi,
Code: source.Code,
}
if len(source.Storage) > 0 {
target.Storage = translateToStorage(source.Storage)
}
return
}
func translateToStorage(source [][2]string) (store map[string]string) {
store = make(map[string]string, len(source))
for _, tuple := range source {
store[tuple[0]] = tuple[1]
}
return
}
func EncodeRLPGenesis(g *Genesis) ([]byte, error) {
return rlp.EncodeToBytes(g)
}
func DecodeRLPGenesis(genesis []byte) (*Genesis, error) {
var g Genesis
err := rlp.DecodeBytes(genesis, &g)
if err != nil {
return nil, err
}
return &g, nil
}
func GetGenesisFromFile(filePath string) (*Genesis, error) {
if _, err := os.Stat(filePath); err != nil {
return nil, err
}
viper.SetConfigFile(filePath)
if err := viper.ReadInConfig(); err != nil {
return nil, err
}
var gensis Genesis
if err := viper.Unmarshal(&gensis); err != nil {
return nil, err
}
return &gensis, nil
}
type alphabetic [][2]string
func (list alphabetic) Len() int { return len(list) }
func (list alphabetic) Swap(i, j int) { list[i], list[j] = list[j], list[i] }
func (list alphabetic) Less(i, j int) bool {
var (
si = list[i]
sj = list[j]
siLower = strings.ToLower(si[0])
sjLower = strings.ToLower(sj[0])
)
if siLower == sjLower {
return si[0] < sj[0]
}
return siLower < sjLower
}
|
/*
Given an m x n matrix, return all elements of the matrix in spiral order.
Example 1:
Input: matrix = [[1,2,3],[4,5,6],[7,8,9]]
Output: [1,2,3,6,9,8,7,4,5]
Input: matrix = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
Output: [1,2,3,4,8,12,11,10,9,5,6,7]
Constraints:
m == matrix.length
n == matrix[i].length
1 <= m, n <= 10
-100 <= matrix[i][j] <= 100
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test([][]int{{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, []int{1, 2, 3, 6, 9, 8, 7, 4, 5})
test([][]int{{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}, []int{1, 2, 3, 4, 8, 12, 11, 10, 9, 5, 6, 7})
test([][]int{{1, 2, 3, 4}}, []int{1, 2, 3, 4})
test([][]int{{1, 2}, {3, 4}}, []int{1, 2, 4, 3})
test([][]int{{10}, {11}, {12}, {13}}, []int{10, 11, 12, 13})
test([][]int{{1, 2, 3, 4}, {5, 6, 7, 8}}, []int{1, 2, 3, 4, 8, 7, 6, 5})
test([][]int{{}}, []int{})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(m [][]int, r []int) {
p := spiral(m)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func spiral(m [][]int) []int {
if len(m) == 0 || len(m[0]) == 0 {
return []int{}
}
h := len(m)
w := len(m[0])
p := []int{}
x0, x1 := 0, w-1
y0, y1 := 0, h-1
for x0 <= x1 && y0 <= y1 {
for x, y := x0, y0; x <= x1; x++ {
p = append(p, m[y][x])
}
for x, y := x1, y0+1; y <= y1; y++ {
p = append(p, m[y][x])
}
for x, y := x1-1, y1; x >= x0 && y0 != y1; x-- {
p = append(p, m[y][x])
}
for x, y := x0, y1-1; y > y0 && x0 != x1; y-- {
p = append(p, m[y][x])
}
x0, y0 = x0+1, y0+1
x1, y1 = x1-1, y1-1
}
return p
}
|
package calc
type TokenType int
type Token struct {
Type TokenType
Value string
}
var eof = rune(0)
const (
NUMBER TokenType = iota
LPAREN
RPAREN
CONSTANT
FUNCTION
OPERATOR
WHITESPACE
ERROR
EOF
)
|
// mp3rwInfo
package main
import (
"errors"
"flag"
"fmt"
"io"
"os"
"strings"
)
type mp3Tag struct {
title string
artist string
album string
year string
comment string
}
func flags() (string, string, string, string, string, string) {
sPath := flag.String("Path", "", "Path")
sTitle := flag.String("title", "", "title")
sArtist := flag.String("artist", "", "artist")
sAlbum := flag.String("album", "", "album")
sYear := flag.String("year", "", "year")
sComment := flag.String("comment", "", "comment")
flag.Parse()
return *sPath, *sTitle, *sArtist, *sAlbum, *sYear, *sComment
}
func getTagBytes(sPath string) ([]byte, error) {
file, err := os.Open(sPath)
defer file.Close()
if err != nil {
return nil, err
}
_, err = file.Seek(-int64(128), os.SEEK_END)
if err != nil {
return nil, err
}
tagBytes := make([]byte, 128)
_, err = file.Read(tagBytes)
if err != nil {
return nil, err
}
return tagBytes, nil
}
func setTagBytes(sPath string, tagBytes []byte) error {
file, err := os.OpenFile(sPath, os.O_RDWR, 0655)
defer file.Close()
if err != nil {
return err
}
_, err = file.Seek(-int64(128), os.SEEK_END)
if err != nil {
return err
}
_, err = file.Write(tagBytes)
if err != nil {
return err
}
return nil
}
func SetMp3Info(m mp3Tag, sPath string) error {
tagBytes := make([]byte, 128)
copy(tagBytes[:], "TAG")
copy(tagBytes[3:33], m.title)
copy(tagBytes[33:63], m.artist)
copy(tagBytes[63:93], m.album)
copy(tagBytes[93:97], m.year)
copy(tagBytes[97:127], m.comment)
err := setTagBytes(sPath, tagBytes)
if err != nil {
return err
}
return nil
}
func GetMp3Info(sPath string) (mp3Tag, error) {
tTag := mp3Tag{}
tagBytes, err := getTagBytes(sPath)
if err != nil {
return tTag, err
}
if string(tagBytes[:3]) != "TAG" {
return tTag, errors.New("error")
}
tTag.title = string(tagBytes[3:33])
tTag.artist = string(tagBytes[33:63])
tTag.album = string(tagBytes[63:93])
tTag.year = string(tagBytes[93:97])
tTag.comment = string(tagBytes[97:127])
return tTag, nil
}
func changeStrings(sVal string) string {
return strings.Replace(sVal, "<br>", " ", -1)
}
func WriteString(sPath, sText string) {
file, err := os.OpenFile(sPath, os.O_RDWR|os.O_APPEND, 0660)
if os.IsNotExist(err) {
file, err = os.Create(sPath)
}
defer file.Close()
if err != nil {
return
}
n, err := io.WriteString(file, sText)
if err != nil {
fmt.Println(n, err)
return
}
}
func f1() {
file := "D:\\test.mp3"
tTag := mp3Tag{}
tTag.title = string("test")
tTag.artist = string("test")
tTag.album = string("test")
tTag.year = string("2017")
tTag.comment = string("test")
SetMp3Info(tTag, file)
}
func f2() {
file := "D:\\test.mp3"
tTag, err := GetMp3Info(file)
if err == nil {
fmt.Println("title:" + tTag.title)
fmt.Println("artist:" + tTag.artist)
fmt.Println("album:" + tTag.album)
fmt.Println("year:" + tTag.year)
fmt.Println("comment:" + tTag.comment)
}
}
func f3() {
if len(os.Args) < 2 {
return
}
Arg1 := fmt.Sprintf("%s", os.Args[0:1])
Arg2 := fmt.Sprintf("%s", os.Args[1:2])
Arg3 := fmt.Sprintf("%s", os.Args[2:3])
Arg4 := fmt.Sprintf("%s", os.Args[3:4])
Arg5 := fmt.Sprintf("%s", os.Args[4:5])
Arg6 := fmt.Sprintf("%s", os.Args[5:6])
file := "D:\\Args.txt"
WriteString(file, Arg1)
WriteString(file, Arg2)
WriteString(file, Arg3)
WriteString(file, Arg4)
WriteString(file, Arg5)
WriteString(file, Arg6)
}
func f4() {
sPathFlag, sTitleFlag, sArtistFlag, sAlbumFlag, sYearFlag, sCommentFlag := flags()
replacer := strings.NewReplacer("<br>", " ")
sPath := replacer.Replace(sPathFlag)
sTitle := replacer.Replace(sTitleFlag)
sArtist := replacer.Replace(sArtistFlag)
sAlbum := replacer.Replace(sAlbumFlag)
sYear := replacer.Replace(sYearFlag)
sComment := replacer.Replace(sCommentFlag)
//fmt.Println("file Path:" + sPath)
//fmt.Println("title:" + sTitle)
//fmt.Println("artist:" + sArtist)
//fmt.Println("album:" + sAlbum)
//fmt.Println("year:" + sYear)
//fmt.Println("comment:" + sComment)
tTag := mp3Tag{}
tTag.title = sTitle
tTag.artist = sArtist
tTag.album = sAlbum
tTag.year = sYear
tTag.comment = sComment
SetMp3Info(tTag, sPath)
tReadTag, err := GetMp3Info(sPath)
if err == nil {
fmt.Println("title:" + tReadTag.title)
fmt.Println("artist:" + tReadTag.artist)
fmt.Println("album:" + tReadTag.album)
fmt.Println("year:" + tReadTag.year)
fmt.Println("comment:" + tReadTag.comment)
}
}
func main() {
f4()
}
|
package h2quic
import (
"bytes"
"errors"
"io/ioutil"
"net/http"
"net/textproto"
"strconv"
"strings"
"golang.org/x/net/http2"
)
// copied from net/http2/transport.go
var errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
var noBody = ioutil.NopCloser(bytes.NewReader(nil))
// from the handleResponse function
func responseFromHeaders(f *http2.MetaHeadersFrame) (*http.Response, error) {
if f.Truncated {
return nil, errResponseHeaderListSize
}
status := f.PseudoValue("status")
if status == "" {
return nil, errors.New("missing status pseudo header")
}
statusCode, err := strconv.Atoi(status)
if err != nil {
return nil, errors.New("malformed non-numeric status pseudo header")
}
// TODO: handle statusCode == 100
header := make(http.Header)
res := &http.Response{
Proto: "HTTP/2.0",
ProtoMajor: 2,
Header: header,
StatusCode: statusCode,
Status: status + " " + http.StatusText(statusCode),
}
for _, hf := range f.RegularFields() {
key := http.CanonicalHeaderKey(hf.Name)
if key == "Trailer" {
t := res.Trailer
if t == nil {
t = make(http.Header)
res.Trailer = t
}
foreachHeaderElement(hf.Value, func(v string) {
t[http.CanonicalHeaderKey(v)] = nil
})
} else {
header[key] = append(header[key], hf.Value)
}
}
return res, nil
}
// continuation of the handleResponse function
func setLength(res *http.Response, isHead, streamEnded bool) *http.Response {
if !streamEnded || isHead {
res.ContentLength = -1
if clens := res.Header["Content-Length"]; len(clens) == 1 {
if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
res.ContentLength = clen64
}
}
}
return res
}
// copied from net/http/server.go
// foreachHeaderElement splits v according to the "#rule" construction
// in RFC 2616 section 2.1 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v)
if v == "" {
return
}
if !strings.Contains(v, ",") {
fn(v)
return
}
for _, f := range strings.Split(v, ",") {
if f = textproto.TrimString(f); f != "" {
fn(f)
}
}
}
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"github.com/packethost/cluster-api-provider-packet/pkg/cloud/packet"
"github.com/packethost/cluster-api-provider-packet/pkg/cloud/packet/deployer"
"github.com/packethost/cluster-api-provider-packet/pkg/cloud/packet/util"
"k8s.io/klog"
"sigs.k8s.io/cluster-api/cmd/clusterctl/cmd"
"sigs.k8s.io/cluster-api/pkg/apis/cluster/common"
)
func main() {
var err error
flag.Parse()
// get a packet client
client, err := packet.GetClient()
if err != nil {
klog.Fatalf("unable to get Packet client: %v", err)
}
// get a deployer, which is needed at various stages
deployer, err := deployer.New(deployer.Params{
Port: util.ControlPort,
Client: client,
})
if err != nil {
klog.Fatalf("unable to get deployer: %v", err)
}
common.RegisterClusterProvisioner("packet", deployer)
cmd.Execute()
}
|
//
// Copyright (c) 2015-2017 Snowplow Analytics Ltd. All rights reserved.
//
// This program is licensed to you under the Apache License Version 2.0,
// and you may not use this file except in compliance with the Apache License Version 2.0.
// You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the Apache License Version 2.0 is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
//
package main
import (
"crypto/tls"
"gopkg.in/pg.v5"
"net"
"time"
)
// For Redshift queries
const (
dialTimeout = 10 * time.Second
readTimeout = 8 * time.Hour // TODO: make this user configurable
)
type PostgresTarget struct {
Target
Client *pg.DB
}
func NewPostgresTarget(target Target) *PostgresTarget {
var tlsConfig *tls.Config
if target.Ssl == true {
tlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
db := pg.Connect(&pg.Options{
Addr: target.Host + ":" + target.Port,
User: target.Username,
Password: target.Password,
Database: target.Database,
TLSConfig: tlsConfig,
DialTimeout: dialTimeout,
ReadTimeout: readTimeout,
Dialer: func(network, addr string) (net.Conn, error) {
cn, err := net.DialTimeout(network, addr, dialTimeout)
if err != nil {
return nil, err
}
return cn, cn.(*net.TCPConn).SetKeepAlive(true)
},
})
return &PostgresTarget{target, db}
}
func (pt PostgresTarget) GetTarget() Target {
return pt.Target
}
// Run a query against the target
func (pt PostgresTarget) RunQuery(query ReadyQuery, dryRun bool) QueryStatus {
if dryRun {
return QueryStatus{query, query.Path, 0, nil}
}
res, err := pt.Client.Exec(query.Script)
affected := 0
if err == nil {
affected = res.RowsAffected()
}
return QueryStatus{query, query.Path, affected, err}
}
|
package simulation
import (
"encoding/json"
"fmt"
"github.com/cosmos/cosmos-sdk/types/module"
simtypes "github.com/cosmos/cosmos-sdk/types/simulation"
"github.com/irisnet/irismod/modules/nft/types"
)
const (
kitties = "kitties"
doggos = "doggos"
)
// RandomizedGenState generates a random GenesisState for nft
func RandomizedGenState(simState *module.SimulationState) {
collections := types.NewCollections(
types.NewCollection(
types.Denom{
Id: doggos,
Name: doggos,
Schema: "",
Creator: nil,
},
types.NFTs{},
),
types.NewCollection(
types.Denom{
Id: kitties,
Name: kitties,
Schema: "",
Creator: nil,
},
types.NFTs{}),
)
for _, acc := range simState.Accounts {
// 10% of accounts own an NFT
if simState.Rand.Intn(100) < 10 {
baseNFT := types.NewBaseNFT(
simtypes.RandStringOfLength(simState.Rand, 20), // id
simtypes.RandStringOfLength(simState.Rand, 10),
acc.Address,
simtypes.RandStringOfLength(simState.Rand, 45), // tokenURI
simtypes.RandStringOfLength(simState.Rand, 10),
)
// 50% doggos and 50% kitties
if simState.Rand.Intn(100) < 50 {
collections[0].Denom.Creator = baseNFT.Owner
collections[0] = collections[0].AddNFT(baseNFT)
} else {
collections[1].Denom.Creator = baseNFT.Owner
collections[1] = collections[1].AddNFT(baseNFT)
}
}
}
nftGenesis := types.NewGenesisState(collections)
bz, err := json.MarshalIndent(nftGenesis, "", " ")
if err != nil {
panic(err)
}
fmt.Printf("Selected randomly generated %s parameters:\n%s\n", types.ModuleName, bz)
simState.GenState[types.ModuleName] = simState.Cdc.MustMarshalJSON(nftGenesis)
}
|
package binance
import (
"context"
"net/http"
)
// FiatDepositWithdrawHistoryService retrieve the fiat deposit/withdraw history
type FiatDepositWithdrawHistoryService struct {
c *Client
transactionType TransactionType
beginTime *int64
endTime *int64
page *int32
rows *int32
}
// TransactionType set transactionType
func (s *FiatDepositWithdrawHistoryService) TransactionType(transactionType TransactionType) *FiatDepositWithdrawHistoryService {
s.transactionType = transactionType
return s
}
// BeginTime set beginTime
func (s *FiatDepositWithdrawHistoryService) BeginTime(beginTime int64) *FiatDepositWithdrawHistoryService {
s.beginTime = &beginTime
return s
}
// EndTime set endTime
func (s *FiatDepositWithdrawHistoryService) EndTime(endTime int64) *FiatDepositWithdrawHistoryService {
s.endTime = &endTime
return s
}
// Page set page
func (s *FiatDepositWithdrawHistoryService) Page(page int32) *FiatDepositWithdrawHistoryService {
s.page = &page
return s
}
// Rows set rows
func (s *FiatDepositWithdrawHistoryService) Rows(rows int32) *FiatDepositWithdrawHistoryService {
s.rows = &rows
return s
}
// Do send request
func (s *FiatDepositWithdrawHistoryService) Do(ctx context.Context, opts ...RequestOption) (*FiatDepositWithdrawHistory, error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/fiat/orders",
secType: secTypeSigned,
}
r.setParam("transactionType", s.transactionType)
if s.beginTime != nil {
r.setParam("beginTime", *s.beginTime)
}
if s.endTime != nil {
r.setParam("endTime", *s.endTime)
}
if s.page != nil {
r.setParam("page", *s.page)
}
if s.rows != nil {
r.setParam("rows", *s.rows)
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
res := FiatDepositWithdrawHistory{}
if err = json.Unmarshal(data, &res); err != nil {
return nil, err
}
return &res, nil
}
// FiatDepositWithdrawHistory define the fiat deposit/withdraw history
type FiatDepositWithdrawHistory struct {
Code string `json:"code"`
Message string `json:"message"`
Data []FiatDepositWithdrawHistoryItem `json:"data"`
Total int32 `json:"total"`
Success bool `json:"success"`
}
// FiatDepositWithdrawHistoryItem define a fiat deposit/withdraw history item
type FiatDepositWithdrawHistoryItem struct {
OrderNo string `json:"orderNo"`
FiatCurrency string `json:"fiatCurrency"`
IndicatedAmount string `json:"indicatedAmount"`
Amount string `json:"amount"`
TotalFee string `json:"totalFee"`
Method string `json:"method"`
Status string `json:"status"`
CreateTime int64 `json:"createTime"`
UpdateTime int64 `json:"updateTime"`
}
// FiatPaymentsHistoryService retrieve the fiat payments history
type FiatPaymentsHistoryService struct {
c *Client
transactionType TransactionType
beginTime *int64
endTime *int64
page *int32
rows *int32
}
// TransactionType set transactionType
func (s *FiatPaymentsHistoryService) TransactionType(transactionType TransactionType) *FiatPaymentsHistoryService {
s.transactionType = transactionType
return s
}
// BeginTime set beginTime
func (s *FiatPaymentsHistoryService) BeginTime(beginTime int64) *FiatPaymentsHistoryService {
s.beginTime = &beginTime
return s
}
// EndTime set endTime
func (s *FiatPaymentsHistoryService) EndTime(endTime int64) *FiatPaymentsHistoryService {
s.endTime = &endTime
return s
}
// Page set page
func (s *FiatPaymentsHistoryService) Page(page int32) *FiatPaymentsHistoryService {
s.page = &page
return s
}
// Rows set rows
func (s *FiatPaymentsHistoryService) Rows(rows int32) *FiatPaymentsHistoryService {
s.rows = &rows
return s
}
// Do send request
func (s *FiatPaymentsHistoryService) Do(ctx context.Context, opts ...RequestOption) (*FiatPaymentsHistory, error) {
r := &request{
method: http.MethodGet,
endpoint: "/sapi/v1/fiat/payments",
secType: secTypeSigned,
}
r.setParam("transactionType", s.transactionType)
if s.beginTime != nil {
r.setParam("beginTime", *s.beginTime)
}
if s.endTime != nil {
r.setParam("endTime", *s.endTime)
}
if s.page != nil {
r.setParam("page", *s.page)
}
if s.rows != nil {
r.setParam("rows", *s.rows)
}
data, err := s.c.callAPI(ctx, r, opts...)
if err != nil {
return nil, err
}
res := FiatPaymentsHistory{}
if err = json.Unmarshal(data, &res); err != nil {
return nil, err
}
return &res, nil
}
// FiatPaymentsHistory define the fiat payments history
type FiatPaymentsHistory struct {
Code string `json:"code"`
Message string `json:"message"`
Data []FiatPaymentsHistoryItem `json:"data"`
Total int32 `json:"total"`
Success bool `json:"success"`
}
// FiatPaymentsHistoryItem define a fiat payments history item
type FiatPaymentsHistoryItem struct {
OrderNo string `json:"orderNo"`
SourceAmount string `json:"sourceAmount"`
FiatCurrency string `json:"fiatCurrency"`
ObtainAmount string `json:"obtainAmount"`
CryptoCurrency string `json:"cryptoCurrency"`
TotalFee string `json:"totalFee"`
Price string `json:"price"`
Status string `json:"status"`
CreateTime int64 `json:"createTime"`
UpdateTime int64 `json:"updateTime"`
}
|
/* nighthawk.audit.parser.systeminfo.go
* author: roshan maskey <roshanmaskey@gmail.com>
*
* Parser for SystemInformation
*/
package parser
import (
"fmt"
"os"
"encoding/xml"
"strings"
"nighthawk/elastic"
nhconfig "nighthawk/config"
nhs "nighthawk/nhstruct"
nhutil "nighthawk/util"
nhlog "nighthawk/log"
nhc "nighthawk/common"
)
func ParseSystemInfo(caseinfo nhs.CaseInformation, auditinfo nhs.AuditType, auditfile string) {
MAX_RECORD := nhconfig.BulkPostSize()
xmlFile,err := os.Open(auditfile)
if err != nil {
nhlog.LogMessage("ParseSystemInfo", "ERROR", fmt.Sprintf("Failed to read audit file. %s", err.Error()))
os.Exit(nhc.ERROR_READING_AUDIT_FILE)
}
defer xmlFile.Close()
decoder := xml.NewDecoder(xmlFile)
count := 0
total := 0
var inElement string
var esrecords []nhs.RlRecord
for {
if count == MAX_RECORD {
elastic.ProcessOutput(caseinfo, auditinfo, esrecords)
esrecords = esrecords[:0]
count = 0
}
t,_ := decoder.Token()
if t == nil {
elastic.ProcessOutput(caseinfo, auditinfo, esrecords)
esrecords = esrecords[:0]
count = 0
break
}
switch se := t.(type) {
case xml.StartElement:
inElement = se.Name.Local
if inElement == "SystemInfoItem" {
var item nhs.SystemInfoItem
decoder.DecodeElement(&item, &se)
// Fix empty timestamp
if item.BiosDate == "" {
item.BiosDate = nhutil.FixEmptyTimestamp()
} else {
fixedDate := FixBiosDate(item.BiosDate)
item.BiosDate = fixedDate
}
if item.Date == "" {item.Date = nhutil.FixEmptyTimestamp()}
if item.InstallDate == "" {item.InstallDate = nhutil.FixEmptyTimestamp()}
if item.AppCreated == "" {item.AppCreated = nhutil.FixEmptyTimestamp()}
for i := range item.NetworkList {
if item.NetworkList[i].DhcpLeaseObtained == "" {
item.NetworkList[i].DhcpLeaseObtained = nhutil.FixEmptyTimestamp()
}
if item.NetworkList[i].DhcpLeaseExpires == "" {
item.NetworkList[i].DhcpLeaseExpires = nhutil.FixEmptyTimestamp()
}
}
var rlrec nhs.RlRecord
rlrec.ComputerName = caseinfo.ComputerName
rlrec.CaseInfo = caseinfo
rlrec.AuditType = auditinfo
rlrec.Record = item
esrecords = append(esrecords, rlrec)
count++
total++
}
}
}
cmsg := fmt.Sprintf("Total SystemInfoItem %d", total)
nhlog.LogMessage("ParseSystemInfo", "INFO", cmsg)
}
// BiosDate is represented as MM/DD/YYYY
// Changing the date to ISO format
func FixBiosDate(biosdate string) string {
s := strings.SplitN(biosdate, "/", 3)
if len(s) == 3 {
newBiosDate := fmt.Sprintf("%s-%s-%sT00:00:00Z", s[2],s[1],s[0])
return newBiosDate
} else {
return nhutil.FixEmptyTimestamp()
}
} |
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package checksum
import (
"context"
"github.com/gogo/protobuf/proto"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
"github.com/pingcap/tidb/br/pkg/metautil"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/distsql"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tipb/go-tipb"
"go.uber.org/zap"
)
// ExecutorBuilder is used to build a "kv.Request".
type ExecutorBuilder struct {
table *model.TableInfo
ts uint64
oldTable *metautil.Table
concurrency uint
backoffWeight int
oldKeyspace []byte
newKeyspace []byte
resourceGroupName string
explicitRequestSourceType string
}
// NewExecutorBuilder returns a new executor builder.
func NewExecutorBuilder(table *model.TableInfo, ts uint64) *ExecutorBuilder {
return &ExecutorBuilder{
table: table,
ts: ts,
concurrency: variable.DefDistSQLScanConcurrency,
}
}
// SetOldTable set a old table info to the builder.
func (builder *ExecutorBuilder) SetOldTable(oldTable *metautil.Table) *ExecutorBuilder {
builder.oldTable = oldTable
return builder
}
// SetConcurrency set the concurrency of the checksum executing.
func (builder *ExecutorBuilder) SetConcurrency(conc uint) *ExecutorBuilder {
builder.concurrency = conc
return builder
}
// SetBackoffWeight set the backoffWeight of the checksum executing.
func (builder *ExecutorBuilder) SetBackoffWeight(backoffWeight int) *ExecutorBuilder {
builder.backoffWeight = backoffWeight
return builder
}
func (builder *ExecutorBuilder) SetOldKeyspace(keyspace []byte) *ExecutorBuilder {
builder.oldKeyspace = keyspace
return builder
}
func (builder *ExecutorBuilder) SetNewKeyspace(keyspace []byte) *ExecutorBuilder {
builder.newKeyspace = keyspace
return builder
}
func (builder *ExecutorBuilder) SetResourceGroupName(name string) *ExecutorBuilder {
builder.resourceGroupName = name
return builder
}
func (builder *ExecutorBuilder) SetExplicitRequestSourceType(name string) *ExecutorBuilder {
builder.explicitRequestSourceType = name
return builder
}
// Build builds a checksum executor.
func (builder *ExecutorBuilder) Build() (*Executor, error) {
reqs, err := buildChecksumRequest(
builder.table,
builder.oldTable,
builder.ts,
builder.concurrency,
builder.oldKeyspace,
builder.newKeyspace,
builder.resourceGroupName,
builder.explicitRequestSourceType,
)
if err != nil {
return nil, errors.Trace(err)
}
return &Executor{reqs: reqs, backoffWeight: builder.backoffWeight}, nil
}
func buildChecksumRequest(
newTable *model.TableInfo,
oldTable *metautil.Table,
startTS uint64,
concurrency uint,
oldKeyspace []byte,
newKeyspace []byte,
resourceGroupName, explicitRequestSourceType string,
) ([]*kv.Request, error) {
var partDefs []model.PartitionDefinition
if part := newTable.Partition; part != nil {
partDefs = part.Definitions
}
reqs := make([]*kv.Request, 0, (len(newTable.Indices)+1)*(len(partDefs)+1))
var oldTableID int64
if oldTable != nil {
oldTableID = oldTable.Info.ID
}
rs, err := buildRequest(newTable, newTable.ID, oldTable, oldTableID, startTS, concurrency,
oldKeyspace, newKeyspace, resourceGroupName, explicitRequestSourceType)
if err != nil {
return nil, errors.Trace(err)
}
reqs = append(reqs, rs...)
for _, partDef := range partDefs {
var oldPartID int64
if oldTable != nil {
for _, oldPartDef := range oldTable.Info.Partition.Definitions {
if oldPartDef.Name == partDef.Name {
oldPartID = oldPartDef.ID
}
}
}
rs, err := buildRequest(newTable, partDef.ID, oldTable, oldPartID, startTS, concurrency,
oldKeyspace, newKeyspace, resourceGroupName, explicitRequestSourceType)
if err != nil {
return nil, errors.Trace(err)
}
reqs = append(reqs, rs...)
}
return reqs, nil
}
func buildRequest(
tableInfo *model.TableInfo,
tableID int64,
oldTable *metautil.Table,
oldTableID int64,
startTS uint64,
concurrency uint,
oldKeyspace []byte,
newKeyspace []byte,
resourceGroupName, explicitRequestSourceType string,
) ([]*kv.Request, error) {
reqs := make([]*kv.Request, 0)
req, err := buildTableRequest(tableInfo, tableID, oldTable, oldTableID, startTS, concurrency,
oldKeyspace, newKeyspace, resourceGroupName, explicitRequestSourceType)
if err != nil {
return nil, errors.Trace(err)
}
reqs = append(reqs, req)
for _, indexInfo := range tableInfo.Indices {
if indexInfo.State != model.StatePublic {
continue
}
var oldIndexInfo *model.IndexInfo
if oldTable != nil {
for _, oldIndex := range oldTable.Info.Indices {
if oldIndex.Name == indexInfo.Name {
oldIndexInfo = oldIndex
break
}
}
if oldIndexInfo == nil {
log.Panic("index not found in origin table, "+
"please check the restore table has the same index info with origin table",
zap.Int64("table id", tableID),
zap.Stringer("table name", tableInfo.Name),
zap.Int64("origin table id", oldTableID),
zap.Stringer("origin table name", oldTable.Info.Name),
zap.Stringer("index name", indexInfo.Name))
}
}
req, err = buildIndexRequest(
tableID, indexInfo, oldTableID, oldIndexInfo, startTS, concurrency,
oldKeyspace, newKeyspace, resourceGroupName, explicitRequestSourceType)
if err != nil {
return nil, errors.Trace(err)
}
reqs = append(reqs, req)
}
return reqs, nil
}
func buildTableRequest(
tableInfo *model.TableInfo,
tableID int64,
oldTable *metautil.Table,
oldTableID int64,
startTS uint64,
concurrency uint,
oldKeyspace []byte,
newKeyspace []byte,
resourceGroupName, explicitRequestSourceType string,
) (*kv.Request, error) {
var rule *tipb.ChecksumRewriteRule
if oldTable != nil {
rule = &tipb.ChecksumRewriteRule{
OldPrefix: append(append([]byte{}, oldKeyspace...), tablecodec.GenTableRecordPrefix(oldTableID)...),
NewPrefix: append(append([]byte{}, newKeyspace...), tablecodec.GenTableRecordPrefix(tableID)...),
}
}
checksum := &tipb.ChecksumRequest{
ScanOn: tipb.ChecksumScanOn_Table,
Algorithm: tipb.ChecksumAlgorithm_Crc64_Xor,
Rule: rule,
}
var ranges []*ranger.Range
if tableInfo.IsCommonHandle {
ranges = ranger.FullNotNullRange()
} else {
ranges = ranger.FullIntRange(false)
}
var builder distsql.RequestBuilder
// Use low priority to reducing impact to other requests.
builder.Request.Priority = kv.PriorityLow
return builder.SetHandleRanges(nil, tableID, tableInfo.IsCommonHandle, ranges).
SetStartTS(startTS).
SetChecksumRequest(checksum).
SetConcurrency(int(concurrency)).
SetResourceGroupName(resourceGroupName).
SetExplicitRequestSourceType(explicitRequestSourceType).
Build()
}
func buildIndexRequest(
tableID int64,
indexInfo *model.IndexInfo,
oldTableID int64,
oldIndexInfo *model.IndexInfo,
startTS uint64,
concurrency uint,
oldKeyspace []byte,
newKeyspace []byte,
resourceGroupName, ExplicitRequestSourceType string,
) (*kv.Request, error) {
var rule *tipb.ChecksumRewriteRule
if oldIndexInfo != nil {
rule = &tipb.ChecksumRewriteRule{
OldPrefix: append(append([]byte{}, oldKeyspace...),
tablecodec.EncodeTableIndexPrefix(oldTableID, oldIndexInfo.ID)...),
NewPrefix: append(append([]byte{}, newKeyspace...),
tablecodec.EncodeTableIndexPrefix(tableID, indexInfo.ID)...),
}
}
checksum := &tipb.ChecksumRequest{
ScanOn: tipb.ChecksumScanOn_Index,
Algorithm: tipb.ChecksumAlgorithm_Crc64_Xor,
Rule: rule,
}
ranges := ranger.FullRange()
var builder distsql.RequestBuilder
// Use low priority to reducing impact to other requests.
builder.Request.Priority = kv.PriorityLow
return builder.SetIndexRanges(nil, tableID, indexInfo.ID, ranges).
SetStartTS(startTS).
SetChecksumRequest(checksum).
SetConcurrency(int(concurrency)).
SetResourceGroupName(resourceGroupName).
SetExplicitRequestSourceType(ExplicitRequestSourceType).
Build()
}
func sendChecksumRequest(
ctx context.Context, client kv.Client, req *kv.Request, vars *kv.Variables,
) (resp *tipb.ChecksumResponse, err error) {
res, err := distsql.Checksum(ctx, client, req, vars)
if err != nil {
return nil, errors.Trace(err)
}
defer func() {
if err1 := res.Close(); err1 != nil {
err = err1
}
}()
resp = &tipb.ChecksumResponse{}
for {
data, err := res.NextRaw(ctx)
if err != nil {
return nil, errors.Trace(err)
}
if data == nil {
break
}
checksum := &tipb.ChecksumResponse{}
if err = checksum.Unmarshal(data); err != nil {
return nil, errors.Trace(err)
}
updateChecksumResponse(resp, checksum)
}
return resp, nil
}
func updateChecksumResponse(resp, update *tipb.ChecksumResponse) {
resp.Checksum ^= update.Checksum
resp.TotalKvs += update.TotalKvs
resp.TotalBytes += update.TotalBytes
}
// Executor is a checksum executor.
type Executor struct {
reqs []*kv.Request
backoffWeight int
}
// Len returns the total number of checksum requests.
func (exec *Executor) Len() int {
return len(exec.reqs)
}
// Each executes the function to each requests in the executor.
func (exec *Executor) Each(f func(*kv.Request) error) error {
for _, req := range exec.reqs {
err := f(req)
if err != nil {
return errors.Trace(err)
}
}
return nil
}
// RawRequests extracts the raw requests associated with this executor.
// This is mainly used for debugging only.
func (exec *Executor) RawRequests() ([]*tipb.ChecksumRequest, error) {
res := make([]*tipb.ChecksumRequest, 0, len(exec.reqs))
for _, req := range exec.reqs {
rawReq := new(tipb.ChecksumRequest)
if err := proto.Unmarshal(req.Data, rawReq); err != nil {
return nil, errors.Trace(err)
}
res = append(res, rawReq)
}
return res, nil
}
// Execute executes a checksum executor.
func (exec *Executor) Execute(
ctx context.Context,
client kv.Client,
updateFn func(),
) (*tipb.ChecksumResponse, error) {
checksumResp := &tipb.ChecksumResponse{}
checksumBackoffer := utils.InitialRetryState(utils.ChecksumRetryTime,
utils.ChecksumWaitInterval, utils.ChecksumMaxWaitInterval)
for _, req := range exec.reqs {
// Pointer to SessionVars.Killed
// Killed is a flag to indicate that this query is killed.
//
// It is useful in TiDB, however, it's a place holder in BR.
killed := uint32(0)
var (
resp *tipb.ChecksumResponse
err error
)
err = utils.WithRetry(ctx, func() error {
vars := kv.NewVariables(&killed)
if exec.backoffWeight > 0 {
vars.BackOffWeight = exec.backoffWeight
}
resp, err = sendChecksumRequest(ctx, client, req, vars)
failpoint.Inject("checksumRetryErr", func(val failpoint.Value) {
// first time reach here. return error
if val.(bool) {
err = errors.New("inject checksum error")
}
})
if err != nil {
return errors.Trace(err)
}
return nil
}, &checksumBackoffer)
if err != nil {
return nil, errors.Trace(err)
}
updateChecksumResponse(checksumResp, resp)
updateFn()
}
return checksumResp, checkContextDone(ctx)
}
// The coprocessor won't return the error if the context is done,
// so sometimes BR would get the incomplete result.
// checkContextDone makes sure the result is not affected by CONTEXT DONE.
func checkContextDone(ctx context.Context) error {
ctxErr := ctx.Err()
if ctxErr != nil {
return errors.Annotate(ctxErr, "context is cancelled by other error")
}
return nil
}
|
package rss
import(
"encoding/xml"
"encoding/json"
)
type RSS struct {
Channel Channel `xml:"channel" json:"channel"`
}
type Channel struct {
Title string `xml:"title" json:"title"`
Link string `xml:"link" json:"link"`
Description string `xml:"description" json:"description"`
Language string `xml:"language" json:"language"`
Copyright string `xml:"copyright" json:"copyright"`
LastBuildDate string `xml:"lastBuildDate" json:"lastBuildDate"`
Item []Item `xml:"item" json:"item"`
}
type ItemEnclosure struct {
URL string `xml:"url,attr" json:"url"`
Type string `xml:"type,attr" json:"type"`
}
type Item struct {
Title string `xml:"title" json:"title"`
Link string `xml:"link" json:"link"`
Description string `xml:"description" json:"description,omitempty"`
Author string `xml:"author" json:"author,omitempty"`
Comments string `xml:"comments" json:"comments,omitempty"`
PubDate string `xml:"pubDate" json:"pubDate"`
GUID string `xml:"guid" json:"uid"`
Category []string `xml:"category" json:"category"`
Enclosure []ItemEnclosure `xml:"enclosure" json:"enclosure,omitempty"`
Media Media `xml:"http://search.yahoo.com/mrss/ content" json:"media,omitempty"`
Content string `xml:"http://purl.org/rss/1.0/modules/content encoded" json:"content"`
}
type Media struct {
Url string `xml:"url,attr" json:"url"`
Medium string `xml:"medium,attr" json:"medium"`
Credit string `xml:"credit" json:"credit"`
Description string `xml:"description" json:"description"`
}
func Unmarshal(b []byte, v interface{}) error {
var feed RSS
err := xml.Unmarshal(b, &feed)
if err != nil {
return err
}
jb, err := json.Marshal(feed);
if err != nil {
return err
}
err = json.Unmarshal(jb, &v)
if err != nil {
return err
}
return nil
}
|
package main
import "fmt"
func main() {
// 声明一个map; map[keyType]valueType
nameAgeMap := make(map[string]int)
//赋值
nameAgeMap["wangwu"] = 11
nameAgeMap["liliu"] = 22
fmt.Println("liliu old age: ", nameAgeMap["liliu"])
// 修改值
nameAgeMap["liliu"] = 33
fmt.Println("liliu new age: ", nameAgeMap["liliu"])
//初始化一个map
priceMap := map[string]float32{"java": 4.5, "go": 10, "php": 3}
// map返回2个值;第一个为value,第二个返回OK;如果key存在返回true
value1, ok1 := priceMap["go"]
fmt.Println("val=", value1)
fmt.Println("ok=", ok1)
value2, ok2 := priceMap["go2"]
fmt.Println("val=", value2)
fmt.Println("ok=", ok2)
// 删除key
delete(priceMap, "go")
value3, ok3 := priceMap["go"]
fmt.Println("val=", value3)
fmt.Println("ok=", ok3)
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"net"
"crypto/tls"
)
var count int64
func argsHandler(w http.ResponseWriter, r *http.Request) {
var s string
for i := 0; i < len(os.Args); i++ {
s += os.Args[i]
}
fmt.Fprint(w, s)
}
func reqTestHandler(w http.ResponseWriter, r *http.Request) {
resp, err := http.Get("https://ops.city")
if err != nil {
fmt.Println(err)
fmt.Fprint(w, err)
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
}
fmt.Fprint(w, string(body))
}
func envHandler(w http.ResponseWriter, r *http.Request) {
var s string
env := os.Environ()
for i := 0; i < len(env); i++ {
s += env[i]
}
fmt.Fprint(w, s)
}
func tsHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "%s\n", time.Now())
}
func handler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "unibooty %d", count)
count++
}
func filePersistenceHandler(w http.ResponseWriter, r *http.Request) {
f, err := os.OpenFile("a.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
panic(err)
}
if _, err := f.Write([]byte("something")); err != nil {
panic(err)
}
if err := f.Sync(); err != nil {
panic(err)
}
if err := f.Close(); err != nil {
panic(err)
}
dat, err := ioutil.ReadFile("a.log")
if err != nil {
panic(err)
}
fmt.Fprint(w, string(dat))
}
func main() {
port := "8080"
http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
http.HandleFunc("/", handler)
http.HandleFunc("/req", reqTestHandler)
http.HandleFunc("/args", argsHandler)
http.HandleFunc("/env", envHandler)
http.HandleFunc("/ts", tsHandler)
http.HandleFunc("/file", filePersistenceHandler)
done := make(chan bool)
ready := make(chan bool)
go func() {
listener, err := net.Listen("tcp", ":" + port)
if err != nil {
panic(err)
}
ready <- true
log.Fatal(http.Serve(listener, nil))
done <- true
}()
<-ready
fmt.Printf("Server started on port %v\n", port)
<-done
}
|
package v1
import "github.com/gin-gonic/gin"
type Tag struct{}
func NewTag() *Tag {
return &Tag{}
}
func (t *Tag) Get(c *gin.Context) {}
func (t *Tag) List(c *gin.Context) {}
func (t *Tag) Create(c *gin.Context) {}
func (t *Tag) Update(c *gin.Context) {}
func (t *Tag) Delete(c *gin.Context) {}
|
package main
import "fmt"
import "testing"
func TestCheck(t *testing.T) {
fmt.Println("Test: Parenthesis")
f := "()"
fmt.Println(check(f))
fmt.Println()
}
func TestCheck2(t *testing.T) {
fmt.Println("Test: All brackets")
f := "()[]{}"
fmt.Println(check(f))
fmt.Println()
}
func TestCheck3(t *testing.T) {
fmt.Println("Test: 2 Brackets")
f := "{[]}"
fmt.Println(check(f))
fmt.Println()
}
func TestCheck4(t *testing.T) {
fmt.Println("Fail brackets")
f := "(]"
fmt.Println(check(f))
fmt.Println()
f = "([)]"
fmt.Println(check(f))
fmt.Println()
}
func TestCheck5(t *testing.T) {
fmt.Println("bracket with words")
f := "r4*{3d-[f2%]s}1f"
fmt.Println(check(f))
fmt.Println()
}
func TestCheck6(t *testing.T) {
fmt.Println("Null string")
f := " "
fmt.Println(check(f))
fmt.Println()
}
|
package server
import (
"net/http"
"github.com/cinus-ue/securekit/common/errorx"
log "github.com/cinus-ue/securekit/common/log15"
"github.com/cinus-ue/securekit/internal/webapps/fileserver/param"
"github.com/cinus-ue/securekit/internal/webapps/fileserver/tpl"
"github.com/cinus-ue/securekit/internal/webapps/fileserver/user"
"github.com/cinus-ue/securekit/internal/webapps/fileserver/util"
)
var defaultHandler = http.NotFoundHandler()
type aliasHandler struct {
alias alias
handler http.Handler
}
type multiplexer struct {
aliasHandlers []aliasHandler
}
func (mux multiplexer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
rawReqPath := util.CleanUrlPath(r.URL.Path)
for _, aliasHandler := range mux.aliasHandlers {
if aliasHandler.alias.isMatch(rawReqPath) || aliasHandler.alias.isPredecessorOf(rawReqPath) {
aliasHandler.handler.ServeHTTP(w, r)
return
}
}
defaultHandler.ServeHTTP(w, r)
}
func NewMultiplexer(
p *param.Param,
users user.List,
theme tpl.Theme,
logger log.Logger,
errHandler *errorx.ErrHandler,
) http.Handler {
aliases := newAliases(p.Aliases, p.Binds)
if len(aliases) == 0 {
return defaultHandler
}
if len(aliases) == 1 {
alias, hasRootAlias := aliases.byUrlPath("/")
if hasRootAlias {
return newHandler(p, alias.fsPath(), alias.urlPath(), aliases, users, theme, logger, errHandler)
}
}
aliasHandlers := make([]aliasHandler, len(aliases))
for i, alias := range aliases {
aliasHandlers[i] = aliasHandler{
alias: alias,
handler: newHandler(p, alias.fsPath(), alias.urlPath(), aliases, users, theme, logger, errHandler),
}
}
return multiplexer{aliasHandlers}
}
|
package leetcode
import (
"reflect"
"testing"
)
func TestFindOcurrences(t *testing.T) {
ans1 := findOcurrences("alice is a good girl she is a good student", "a", "good")
if !reflect.DeepEqual(ans1, []string{"girl", "student"}) {
t.Fatal()
}
ans2 := findOcurrences("we will we will rock you", "we", "will")
if !reflect.DeepEqual(ans2, []string{"we", "rock"}) {
t.Fatal()
}
ans3 := findOcurrences("we we we rock you", "we", "we")
if !reflect.DeepEqual(ans3, []string{"we", "rock"}) {
t.Fatal()
}
}
|
package funciones
import (
"math"
"strconv"
"fmt"
)
const (
// Constante del radio de la tierra aproximado
radio = 6373000
// medidas que son las que debería ir ajustando
P = 0.95
EPSILON = 0.0001
EPSILONP = 0.001
L = 1000
PHI = .90
)
// Funcion que saca la gráfica completa de las ciudades
func Completa(cis []Ciudad, max float64) [][]float64 {
var matriz = [][]float64{}
for i := 0; i < len(cis); i++ {
adyacentes := make([]float64, len(cis))
for j := 0; j < len(cis); j++ {
adyacentes[j] = pesoAumentado(cis[i], cis[j], max)
}
matriz = append(matriz, adyacentes)
}
return matriz
}
// Funcion que transforma una corrdenada a radianes
func radianes(f float64) float64 {
return (f*math.Pi)/180
}
// Funcion que calcula la formula A del PDF
func obtenerA (u, v Ciudad) float64 {
latV := radianes(v.latitude)
lonV := radianes(v.longitude)
latU := radianes(u.latitude)
lonU := radianes(u.longitude)
sin1 := math.Pow(math.Sin((latV-latU)/2), 2)
sin2 := math.Pow(math.Sin((lonV-lonU)/2), 2)
cos1 := math.Cos(latU)
cos2 := math.Cos(latV)
return sin1 + cos1 * cos2 * sin2
}
//Funcion para obtener la distancia natural acorde al PDF
func distanciaNatural(u, v Ciudad) float64 {
a := obtenerA(u, v)
c := 2 * math.Atan2(math.Sqrt(a),math.Sqrt(1-a))
return radio * c
}
// Funcion que copia un arreglo de ciudades.
func copiarCiudades(cis []Ciudad) []Ciudad {
nueva := make([]Ciudad, len(cis))
for i:= 0; i < len(cis); i++ {
nueva[i] = cis[i]
}
return nueva
}
// Funcion que solo imprime los indices de la solucion
func PrettyPrint(cis []Ciudad) {
s := ""
for i := 0; i < len(cis); i++ {
s += strconv.Itoa(cis[i].id) + " "
}
fmt.Println(s)
}
|
package example
// NOTE: THIS FILE WAS PRODUCED BY THE
// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp)
// DO NOT EDIT
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *MyEvalResult) DecodeMsg(dc *msgp.Reader) (err error) {
{
var ssz uint32
ssz, err = dc.ReadArrayHeader()
if err != nil {
return
}
if ssz != 2 {
err = msgp.ArrayError{Wanted: 2, Got: ssz}
return
}
}
z.S, err = dc.ReadString()
if err != nil {
return
}
z.I, err = dc.ReadInt()
if err != nil {
return
}
return
}
// EncodeMsg implements msgp.Encodable
func (z MyEvalResult) EncodeMsg(en *msgp.Writer) (err error) {
// array header, size 2
err = en.Append(0x92)
if err != nil {
return err
}
err = en.WriteString(z.S)
if err != nil {
return
}
err = en.WriteInt(z.I)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z MyEvalResult) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// array header, size 2
o = append(o, 0x92)
o = msgp.AppendString(o, z.S)
o = msgp.AppendInt(o, z.I)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MyEvalResult) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var ssz uint32
ssz, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
return
}
if ssz != 2 {
err = msgp.ArrayError{Wanted: 2, Got: ssz}
return
}
}
z.S, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
return
}
z.I, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
return
}
o = bts
return
}
func (z MyEvalResult) Msgsize() (s int) {
s = 1 + msgp.StringPrefixSize + len(z.S) + msgp.IntSize
return
}
|
package window
import (
"testing"
)
func TestInitGlfw(t *testing.T) {
t.Skip("Unimplemented")
}
func TestDummyKeyCallback(t *testing.T) {
t.Skip("Unimplemented")
}
func TestDummyMouseButtonCallback(t *testing.T) {
t.Skip("Unimplemented")
}
|
// Pointers to a struct
// Pointers in Go programming language or Golang is a variable
// which is used to store the memory address of another variable.
// Golang program to illustrate
// the pointer to struct
package main
import "fmt"
// defining a structure
type Employee struct {
firstName, lastName string
age, salary int
}
func main() {
// passing the address of struct variable
// emp8 is a pointer to the Employee struct
emp8 := &Employee{"Sam", "Anderson", 55, 6000}
// (*emp8).firstName is the syntax to access
// the firstName field of the emp8 struct
fmt.Println("First Name:", (*emp8).firstName)
fmt.Println("Age:", (*emp8).age)
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package smb
import (
"context"
"fmt"
"os"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/kubernetes-csi/csi-driver-smb/test/utils/testutil"
"github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
testServer = "test-server/baseDir"
testCSIVolume = "test-csi"
testVolumeID = "test-server/baseDir#test-csi"
)
func TestControllerGetCapabilities(t *testing.T) {
d := NewFakeDriver()
controlCap := []*csi.ControllerServiceCapability{
{
Type: &csi.ControllerServiceCapability_Rpc{},
},
}
d.Cap = controlCap
req := csi.ControllerGetCapabilitiesRequest{}
resp, err := d.ControllerGetCapabilities(context.Background(), &req)
assert.NoError(t, err)
assert.NotNil(t, resp)
assert.Equal(t, resp.Capabilities, controlCap)
}
func TestCreateVolume(t *testing.T) {
d := NewFakeDriver()
// Setup workingMountDir
workingMountDir, err := os.Getwd()
if err != nil {
t.Errorf("failed to get current working directory")
}
d.workingMountDir = workingMountDir
// Setup mounter
mounter, err := NewFakeMounter()
if err != nil {
t.Fatalf(fmt.Sprintf("failed to get fake mounter: %v", err))
}
d.mounter = mounter
sourceTest := testutil.GetWorkDirPath("test-csi", t)
cases := []struct {
name string
req *csi.CreateVolumeRequest
resp *csi.CreateVolumeResponse
flakyWindowsErrorMessage string
expectErr bool
}{
{
name: "valid defaults",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
paramSource: testServer,
},
Secrets: map[string]string{
usernameField: "test",
passwordField: "test",
domainField: "test_doamin",
},
},
resp: &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: testVolumeID,
VolumeContext: map[string]string{
paramSource: filepath.Join(testServer, testCSIVolume),
},
},
},
flakyWindowsErrorMessage: fmt.Sprintf("volume(vol_1##) mount \"test-server\" on %#v failed with "+
"smb mapping failed with error: rpc error: code = Unknown desc = NewSmbGlobalMapping failed.",
sourceTest),
},
{
name: "name empty",
req: &csi.CreateVolumeRequest{
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
paramSource: testServer,
},
},
expectErr: true,
},
{
name: "invalid create context",
req: &csi.CreateVolumeRequest{
Name: testCSIVolume,
VolumeCapabilities: []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
},
Parameters: map[string]string{
"unknown-parameter": "foo",
},
},
expectErr: true,
},
}
for _, test := range cases {
test := test //pin
t.Run(test.name, func(t *testing.T) {
// Setup
_ = os.MkdirAll(filepath.Join(d.workingMountDir, testCSIVolume), os.ModePerm)
// Run
resp, err := d.CreateVolume(context.TODO(), test.req)
// Verify
if test.expectErr && err == nil {
t.Errorf("test %q failed; got success", test.name)
}
// separate assertion for flaky error messages
if test.flakyWindowsErrorMessage != "" && runtime.GOOS == "windows" {
fmt.Println("Skipping checks on Windows ENV")
} else {
if !test.expectErr && err != nil {
t.Errorf("test %q failed: %v", test.name, err)
}
if !reflect.DeepEqual(resp, test.resp) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.name, resp, test.resp)
}
if !test.expectErr {
info, err := os.Stat(filepath.Join(d.workingMountDir, test.req.Name, test.req.Name))
if err != nil {
t.Errorf("test %q failed: couldn't find volume subdirectory: %v", test.name, err)
}
if !info.IsDir() {
t.Errorf("test %q failed: subfile not a directory", test.name)
}
}
}
})
}
}
func TestDeleteVolume(t *testing.T) {
d := NewFakeDriver()
// Setup workingMountDir
workingMountDir, err := os.Getwd()
if err != nil {
t.Errorf("failed to get current working directory")
}
d.workingMountDir = workingMountDir
// Setup mounter
mounter, err := NewFakeMounter()
if err != nil {
t.Fatalf(fmt.Sprintf("failed to get fake mounter: %v", err))
}
d.mounter = mounter
cases := []struct {
desc string
req *csi.DeleteVolumeRequest
resp *csi.DeleteVolumeResponse
expectedErr error
}{
{
desc: "Volume ID missing",
req: &csi.DeleteVolumeRequest{},
resp: nil,
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
},
{
desc: "Valid request",
req: &csi.DeleteVolumeRequest{
VolumeId: testVolumeID,
Secrets: map[string]string{
usernameField: "test",
passwordField: "test",
domainField: "test_doamin",
},
},
resp: &csi.DeleteVolumeResponse{},
expectedErr: nil,
},
}
for _, test := range cases {
test := test //pin
t.Run(test.desc, func(t *testing.T) {
// Setup
_ = os.MkdirAll(filepath.Join(d.workingMountDir, testCSIVolume), os.ModePerm)
_, _ = os.Create(filepath.Join(d.workingMountDir, testCSIVolume, testCSIVolume))
// Run
resp, err := d.DeleteVolume(context.TODO(), test.req)
// Verify
if runtime.GOOS == "windows" {
// skip checks
fmt.Println("Skipping checks on Windows ENV")
} else {
if test.expectedErr == nil && err != nil {
t.Errorf("test %q failed: %v", test.desc, err)
}
if test.expectedErr != nil && err == nil {
t.Errorf("test %q failed; expected error %v, got success", test.desc, test.expectedErr)
}
if !reflect.DeepEqual(resp, test.resp) {
t.Errorf("test %q failed: got resp %+v, expected %+v", test.desc, resp, test.resp)
}
if _, err := os.Stat(filepath.Join(d.workingMountDir, testCSIVolume, testCSIVolume)); test.expectedErr == nil && !os.IsNotExist(err) {
t.Errorf("test %q failed: expected volume subdirectory deleted, it still exists", test.desc)
}
}
})
}
}
func TestValidateVolumeCapabilities(t *testing.T) {
d := NewFakeDriver()
stdVolCap := []*csi.VolumeCapability{
{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
},
},
}
tests := []struct {
desc string
req csi.ValidateVolumeCapabilitiesRequest
expectedErr error
}{
{
desc: "Volume ID missing",
req: csi.ValidateVolumeCapabilitiesRequest{},
expectedErr: status.Error(codes.InvalidArgument, "Volume ID missing in request"),
},
{
desc: "Volume capabilities missing",
req: csi.ValidateVolumeCapabilitiesRequest{VolumeId: "vol_1"},
expectedErr: status.Error(codes.InvalidArgument, "Volume capabilities missing in request"),
},
{
desc: "Valid request",
req: csi.ValidateVolumeCapabilitiesRequest{
VolumeId: "vol_1#f5713de20cde511e8ba4900#fileshare#diskname#",
VolumeCapabilities: stdVolCap,
},
expectedErr: nil,
},
}
for _, test := range tests {
_, err := d.ValidateVolumeCapabilities(context.Background(), &test.req)
if !reflect.DeepEqual(err, test.expectedErr) {
t.Errorf("Unexpected error: %v", err)
}
}
}
func TestControllerPublishVolume(t *testing.T) {
d := NewFakeDriver()
req := csi.ControllerPublishVolumeRequest{}
resp, err := d.ControllerPublishVolume(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestControllerUnpublishVolume(t *testing.T) {
d := NewFakeDriver()
req := csi.ControllerUnpublishVolumeRequest{}
resp, err := d.ControllerUnpublishVolume(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestGetCapacity(t *testing.T) {
d := NewFakeDriver()
req := csi.GetCapacityRequest{}
resp, err := d.GetCapacity(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestListVolumes(t *testing.T) {
d := NewFakeDriver()
req := csi.ListVolumesRequest{}
resp, err := d.ListVolumes(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestControllerExpandVolume(t *testing.T) {
d := NewFakeDriver()
req := csi.ControllerExpandVolumeRequest{}
resp, err := d.ControllerExpandVolume(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestControllerGetVolume(t *testing.T) {
d := NewFakeDriver()
req := csi.ControllerGetVolumeRequest{}
resp, err := d.ControllerGetVolume(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestCreateSnapshot(t *testing.T) {
d := NewFakeDriver()
req := csi.CreateSnapshotRequest{}
resp, err := d.CreateSnapshot(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestDeleteSnapshot(t *testing.T) {
d := NewFakeDriver()
req := csi.DeleteSnapshotRequest{}
resp, err := d.DeleteSnapshot(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestListSnapshots(t *testing.T) {
d := NewFakeDriver()
req := csi.ListSnapshotsRequest{}
resp, err := d.ListSnapshots(context.Background(), &req)
assert.Nil(t, resp)
if !reflect.DeepEqual(err, status.Error(codes.Unimplemented, "")) {
t.Errorf("Unexpected error: %v", err)
}
}
func TestGetSmbVolFromID(t *testing.T) {
cases := []struct {
desc string
volumeID string
source string
subDir string
expectErr bool
}{
{
desc: "correct volume id",
volumeID: "smb-server.default.svc.cluster.local/share#pvc-4729891a-f57e-4982-9c60-e9884af1be2f",
source: "//smb-server.default.svc.cluster.local/share",
subDir: "pvc-4729891a-f57e-4982-9c60-e9884af1be2f",
expectErr: false,
},
{
desc: "correct volume id with multiple base directories",
volumeID: "smb-server.default.svc.cluster.local/share/dir1/dir2#pvc-4729891a-f57e-4982-9c60-e9884af1be2f",
source: "//smb-server.default.svc.cluster.local/share/dir1/dir2",
subDir: "pvc-4729891a-f57e-4982-9c60-e9884af1be2f",
expectErr: false,
},
{
desc: "incorrect volume id",
volumeID: "smb-server.default.svc.cluster.local/share",
source: "//smb-server.default.svc.cluster.local/share",
subDir: "pvc-4729891a-f57e-4982-9c60-e9884af1be2f",
expectErr: true,
},
}
for _, test := range cases {
test := test //pin
t.Run(test.desc, func(t *testing.T) {
smbVolume, err := getSmbVolFromID(test.volumeID)
if !test.expectErr {
assert.Equal(t, smbVolume.sourceField, test.source)
assert.Equal(t, smbVolume.subDir, test.subDir)
assert.Nil(t, err)
} else {
assert.NotNil(t, err)
}
})
}
}
|
package main
import "fmt"
func main() {
upSpeed := 6
downSpeed := 5
desiredHeight := 10
count := 0
//oneDay := upSpeed - downSpeed
total := upSpeed
count++
for total < desiredHeight {
total -= downSpeed
//fmt.Println(oneDay)
total += upSpeed
count++
}
fmt.Println(count)
}
|
package async
import "net/http"
//NewClient 함수는 새 클라이언트를 만들고
//적절한 채널을 설정한다.
func NewClient(client *http.Client, bufferSize int) *Client {
respch := make(chan *http.Response, bufferSize)
errch := make(chan error, bufferSize)
return &Client{
Client: client,
Resp: respch,
Err: errch,
}
}
//Client 구조체는 클라이언트를 저장하고 있으며
//응답과 에러를 처리할 채널 두 개를 갖고 있다.
type Client struct {
*http.Client
Resp chan *http.Response
Err chan error
}
//AsyncGet 은 Get 요청을 수행한 다음
//적절한 채널로 응답 또는 에러를 리턴한다.
func (c *Client) AsyncGet(url string) {
resp, err := c.Get(url)
if err != nil {
c.Err <- err
return
}
c.Resp <- resp
} |
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package implementation
import (
plannercore "github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/planner/memo"
)
type baseImpl struct {
cost float64
plan plannercore.PhysicalPlan
}
func (impl *baseImpl) CalcCost(_ float64, children ...memo.Implementation) float64 {
impl.cost = 0
for _, child := range children {
impl.cost += child.GetCost()
}
return impl.cost
}
func (impl *baseImpl) SetCost(cost float64) {
impl.cost = cost
}
func (impl *baseImpl) GetCost() float64 {
return impl.cost
}
func (impl *baseImpl) GetPlan() plannercore.PhysicalPlan {
return impl.plan
}
func (impl *baseImpl) AttachChildren(children ...memo.Implementation) memo.Implementation {
childrenPlan := make([]plannercore.PhysicalPlan, len(children))
for i, child := range children {
childrenPlan[i] = child.GetPlan()
}
impl.plan.SetChildren(childrenPlan...)
return impl
}
func (*baseImpl) ScaleCostLimit(costLimit float64) float64 {
return costLimit
}
func (*baseImpl) GetCostLimit(costLimit float64, children ...memo.Implementation) float64 {
childrenCost := 0.0
for _, child := range children {
childrenCost += child.GetCost()
}
return costLimit - childrenCost
}
|
package sms
import (
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/zhuiyi1997/go-gin-api/app/config"
"github.com/zhuiyi1997/go-gin-api/app/model"
"github.com/zhuiyi1997/go-gin-api/app/util/request"
"log"
"math/rand"
"net/url"
"strconv"
"time"
)
func SendSms(tp,phone string) (bool,error) {
rand.Seed(time.Now().UnixNano())
code := strconv.Itoa(rand.Intn(999999-100000)+100000)
mysql := model.NewGorm()
var sms_data model.Sms = model.Sms{Phone:phone,SmsCode: code,SendTime: time.Now().Format("2006-01-02 15:04:05"),Type: tp}
var (
content string
boolean bool
arr interface{}
)
boolean = true
// 发送验证码
if tp == "forget_password" {
content = "【"+ config.SmsConf["sms_sign"] +"】您正在进行修改密码操作,您的验证码是:"+ code +",10分钟后失效,请及时验证。";
} else {
content = "【"+ config.SmsConf["sms_sign"] +"】您的验证码是:"+ code +",10分钟内有效,请勿告诉他人!";
}
escapeContent := url.QueryEscape(content)
gateway := "http://sh2.ipyy.com/smsJson.aspx?action=send&userid="+config.SmsConf["userid"]+"&account="+config.SmsConf["account"]+"&password="+config.SmsConf["password"]+"&mobile="+phone+"&content="+escapeContent+"&sendTime=";
log.Println(gateway)
resp,err := request.HttpGet(gateway,&gin.Context{})
if err != nil {
boolean = false;
} else {
err = json.Unmarshal([]byte(resp), &arr)
if err != nil {
boolean = false;
}
if arr != nil && arr.(map[string]interface{})["returnstatus"] == "Success" {
mysql.Db.Create(&sms_data)
}
}
defer func(){
mysql.Db.Close();
}()
return boolean,err
}
|
package chartjs
import (
"github.com/gopherjs/gopherjs/js"
)
type Chart struct {
*js.Object
}
func NewChart(ctx *js.Object, config *Config) *Chart {
return &Chart{Object: js.Global.Get("Chart").New(ctx, config)}
}
func GetChart(name string) *Chart {
return &Chart{Object: js.Global.Get("document").Get(name)}
}
func (c *Chart) Destroy() {
c.Call("destroy", nil)
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/improbable-eng/thanos/pkg/alert"
"github.com/improbable-eng/thanos/pkg/block"
"github.com/improbable-eng/thanos/pkg/cluster"
"github.com/improbable-eng/thanos/pkg/objstore/client"
"github.com/improbable-eng/thanos/pkg/runutil"
"github.com/improbable-eng/thanos/pkg/shipper"
"github.com/improbable-eng/thanos/pkg/store"
"github.com/improbable-eng/thanos/pkg/store/storepb"
"github.com/improbable-eng/thanos/pkg/tracing"
"github.com/improbable-eng/thanos/pkg/ui"
"github.com/oklog/run"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
promlabels "github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/storage/tsdb"
"github.com/prometheus/prometheus/util/strutil"
"github.com/prometheus/tsdb/labels"
"google.golang.org/grpc"
"gopkg.in/alecthomas/kingpin.v2"
)
// registerRule registers a rule command.
func registerRule(m map[string]setupFunc, app *kingpin.Application, name string) {
cmd := app.Command(name, "ruler evaluating Prometheus rules against given Query nodes, exposing Store API and storing old blocks in bucket")
grpcBindAddr, httpBindAddr, cert, key, clientCA, newPeerFn := regCommonServerFlags(cmd)
labelStrs := cmd.Flag("label", "Labels to be applied to all generated metrics (repeated).").
PlaceHolder("<name>=\"<value>\"").Strings()
dataDir := cmd.Flag("data-dir", "data directory").Default("data/").String()
ruleFiles := cmd.Flag("rule-file", "Rule files that should be used by rule manager. Can be in glob format (repeated).").
Default("rules/").Strings()
evalInterval := modelDuration(cmd.Flag("eval-interval", "The default evaluation interval to use.").
Default("30s"))
tsdbBlockDuration := modelDuration(cmd.Flag("tsdb.block-duration", "Block duration for TSDB block.").
Default("2h"))
tsdbRetention := modelDuration(cmd.Flag("tsdb.retention", "Block retention time on local disk.").
Default("48h"))
alertmgrs := cmd.Flag("alertmanagers.url", "Alertmanager URLs to push firing alerts to. The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect Alertmanager IPs through respective DNS lookups. The port defaults to 9093 or the SRV record's value. The URL path is used as a prefix for the regular Alertmanager API path.").
Strings()
alertQueryURL := cmd.Flag("alert.query-url", "The external Thanos Query URL that would be set in all alerts 'Source' field").String()
objStoreConfig := regCommonObjStoreFlags(cmd, "")
m[name] = func(g *run.Group, logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, _ bool) error {
lset, err := parseFlagLabels(*labelStrs)
if err != nil {
return errors.Wrap(err, "parse labels")
}
peer, err := newPeerFn(logger, reg, false, "", false)
if err != nil {
return errors.Wrap(err, "new cluster peer")
}
alertQueryURL, err := url.Parse(*alertQueryURL)
if err != nil {
return errors.Wrap(err, "parse alert query url")
}
tsdbOpts := &tsdb.Options{
MinBlockDuration: *tsdbBlockDuration,
MaxBlockDuration: *tsdbBlockDuration,
Retention: *tsdbRetention,
NoLockfile: true,
WALFlushInterval: 30 * time.Second,
}
return runRule(g,
logger,
reg,
tracer,
lset,
*alertmgrs,
*grpcBindAddr,
*cert,
*key,
*clientCA,
*httpBindAddr,
time.Duration(*evalInterval),
*dataDir,
*ruleFiles,
peer,
objStoreConfig,
tsdbOpts,
name,
alertQueryURL,
)
}
}
// runRule runs a rule evaluation component that continuously evaluates alerting and recording
// rules. It sends alert notifications and writes TSDB data for results like a regular Prometheus server.
func runRule(
g *run.Group,
logger log.Logger,
reg *prometheus.Registry,
tracer opentracing.Tracer,
lset labels.Labels,
alertmgrURLs []string,
grpcBindAddr string,
cert string,
key string,
clientCA string,
httpBindAddr string,
evalInterval time.Duration,
dataDir string,
ruleFiles []string,
peer *cluster.Peer,
objStoreConfig *pathOrContent,
tsdbOpts *tsdb.Options,
component string,
alertQueryURL *url.URL,
) error {
configSuccess := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "thanos_config_last_reload_successful",
Help: "Whether the last configuration reload attempt was successful.",
})
configSuccessTime := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "thanos_config_last_reload_success_timestamp_seconds",
Help: "Timestamp of the last successful configuration reload.",
})
reg.MustRegister(configSuccess)
reg.MustRegister(configSuccessTime)
db, err := tsdb.Open(dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts)
if err != nil {
return errors.Wrap(err, "open TSDB")
}
{
done := make(chan struct{})
g.Add(func() error {
<-done
return db.Close()
}, func(error) {
close(done)
})
}
// Hit the HTTP query API of query peers in randomized order until we get a result
// back or the context get canceled.
queryFn := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) {
peers := peer.PeerStates(cluster.PeerTypeQuery)
var ids []string
for id := range peers {
ids = append(ids, id)
}
sort.Slice(ids, func(i int, j int) bool {
return strings.Compare(ids[i], ids[j]) < 0
})
for _, i := range rand.Perm(len(ids)) {
vec, err := queryPrometheusInstant(ctx, logger, peers[ids[i]].QueryAPIAddr, q, t)
if err != nil {
return nil, err
}
return vec, nil
}
return nil, errors.Errorf("no query peer reachable")
}
// Run rule evaluation and alert notifications.
var (
alertmgrs = newAlertmanagerSet(alertmgrURLs, nil)
alertQ = alert.NewQueue(logger, reg, 10000, 100, labelsTSDBToProm(lset))
mgr *rules.Manager
)
{
ctx, cancel := context.WithCancel(context.Background())
ctx = tracing.ContextWithTracer(ctx, tracer)
notify := func(ctx context.Context, expr string, alerts ...*rules.Alert) error {
res := make([]*alert.Alert, 0, len(alerts))
for _, alrt := range alerts {
// Only send actually firing alerts.
if alrt.State == rules.StatePending {
continue
}
a := &alert.Alert{
StartsAt: alrt.FiredAt,
Labels: alrt.Labels,
Annotations: alrt.Annotations,
GeneratorURL: alertQueryURL.String() + strutil.TableLinkForExpression(expr),
}
if !alrt.ResolvedAt.IsZero() {
a.EndsAt = alrt.ResolvedAt
}
res = append(res, a)
}
alertQ.Push(res)
return nil
}
mgr = rules.NewManager(&rules.ManagerOptions{
Context: ctx,
QueryFunc: queryFn,
NotifyFunc: notify,
Logger: log.With(logger, "component", "rules"),
Appendable: tsdb.Adapter(db, 0),
Registerer: reg,
ExternalURL: nil,
})
g.Add(func() error {
mgr.Run()
<-ctx.Done()
mgr.Stop()
return nil
}, func(error) {
cancel()
})
}
{
var storeLset []storepb.Label
for _, l := range lset {
storeLset = append(storeLset, storepb.Label{Name: l.Name, Value: l.Value})
}
ctx, cancel := context.WithCancel(context.Background())
g.Add(func() error {
// New gossip cluster.
if err = peer.Join(cluster.PeerTypeSource, cluster.PeerMetadata{
Labels: storeLset,
// Start out with the full time range. The shipper will constrain it later.
// TODO(fabxc): minimum timestamp is never adjusted if shipping is disabled.
MinTime: 0,
MaxTime: math.MaxInt64,
}); err != nil {
return errors.Wrap(err, "join cluster")
}
<-ctx.Done()
return nil
}, func(error) {
cancel()
peer.Close(5 * time.Second)
})
}
{
sdr := alert.NewSender(logger, reg, alertmgrs.get, nil)
ctx, cancel := context.WithCancel(context.Background())
g.Add(func() error {
for {
// TODO(bplotka): Investigate what errors it can return and if just "sdr.Send" retry is enough.
if err := sdr.Send(ctx, alertQ.Pop(ctx.Done())); err != nil {
level.Warn(logger).Log("msg", "sending alerts failed", "err", err)
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
}
}, func(error) {
cancel()
})
}
{
ctx, cancel := context.WithCancel(context.Background())
g.Add(func() error {
return runutil.Repeat(30*time.Second, ctx.Done(), func() error {
if err := alertmgrs.update(ctx); err != nil {
level.Warn(logger).Log("msg", "refreshing Alertmanagers failed", "err", err)
}
return nil
})
}, func(error) {
cancel()
})
}
// Handle reload and termination interrupts.
reload := make(chan struct{}, 1)
{
cancel := make(chan struct{})
reload <- struct{}{} // initial reload
g.Add(func() error {
for {
select {
case <-cancel:
return errors.New("canceled")
case <-reload:
}
level.Debug(logger).Log("msg", "configured rule files", "files", strings.Join(ruleFiles, ","))
var files []string
for _, pat := range ruleFiles {
fs, err := filepath.Glob(pat)
if err != nil {
// The only error can be a bad pattern.
level.Error(logger).Log("msg", "retrieving rule files failed. Ignoring file.", "pattern", pat, "err", err)
continue
}
files = append(files, fs...)
}
level.Info(logger).Log("msg", "reload rule files", "numFiles", len(files))
if err := mgr.Update(evalInterval, files); err != nil {
configSuccess.Set(0)
level.Error(logger).Log("msg", "reloading rules failed", "err", err)
continue
}
configSuccess.Set(1)
configSuccessTime.Set(float64(time.Now().UnixNano()) / 1e9)
}
}, func(error) {
close(cancel)
})
}
{
cancel := make(chan struct{})
g.Add(func() error {
c := make(chan os.Signal, 1)
for {
signal.Notify(c, syscall.SIGHUP)
select {
case <-c:
select {
case reload <- struct{}{}:
default:
}
case <-cancel:
return errors.New("canceled")
}
}
}, func(error) {
close(cancel)
})
}
// Start gRPC server.
{
l, err := net.Listen("tcp", grpcBindAddr)
if err != nil {
return errors.Wrap(err, "listen API address")
}
logger := log.With(logger, "component", "store")
store := store.NewTSDBStore(logger, reg, db, lset)
opts, err := defaultGRPCServerOpts(logger, reg, tracer, cert, key, clientCA)
if err != nil {
return errors.Wrap(err, "setup gRPC options")
}
s := grpc.NewServer(opts...)
storepb.RegisterStoreServer(s, store)
g.Add(func() error {
return errors.Wrap(s.Serve(l), "serve gRPC")
}, func(error) {
s.Stop()
runutil.CloseWithLogOnErr(logger, l, "store gRPC listener")
})
}
// Start UI & metrics HTTP server.
{
router := route.New()
router.Post("/-/reload", func(w http.ResponseWriter, r *http.Request) {
reload <- struct{}{}
})
ui.NewRuleUI(logger, mgr, alertQueryURL.String()).Register(router)
mux := http.NewServeMux()
registerMetrics(mux, reg)
registerProfile(mux)
mux.Handle("/", router)
l, err := net.Listen("tcp", httpBindAddr)
if err != nil {
return errors.Wrapf(err, "listen HTTP on address %s", httpBindAddr)
}
g.Add(func() error {
level.Info(logger).Log("msg", "Listening for ui requests", "address", httpBindAddr)
return errors.Wrap(http.Serve(l, mux), "serve query")
}, func(error) {
runutil.CloseWithLogOnErr(logger, l, "query and metric listener")
})
}
var uploads = true
bucketConfig, err := objStoreConfig.Content()
if err != nil {
return err
}
// The background shipper continuously scans the data directory and uploads
// new blocks to Google Cloud Storage or an S3-compatible storage service.
bkt, err := client.NewBucket(logger, bucketConfig, reg, component)
if err != nil && err != client.ErrNotFound {
return err
}
if err == client.ErrNotFound {
level.Info(logger).Log("msg", "No supported bucket was configured, uploads will be disabled")
uploads = false
}
if uploads {
// Ensure we close up everything properly.
defer func() {
if err != nil {
runutil.CloseWithLogOnErr(logger, bkt, "bucket client")
}
}()
s := shipper.New(logger, nil, dataDir, bkt, func() labels.Labels { return lset }, block.RulerSource)
ctx, cancel := context.WithCancel(context.Background())
g.Add(func() error {
defer runutil.CloseWithLogOnErr(logger, bkt, "bucket client")
return runutil.Repeat(30*time.Second, ctx.Done(), func() error {
s.Sync(ctx)
minTime, _, err := s.Timestamps()
if err != nil {
level.Warn(logger).Log("msg", "reading timestamps failed", "err", err)
} else {
peer.SetTimestamps(minTime, math.MaxInt64)
}
return nil
})
}, func(error) {
cancel()
})
}
level.Info(logger).Log("msg", "starting rule node", "peer", peer.Name())
return nil
}
func queryPrometheusInstant(ctx context.Context, logger log.Logger, addr, query string, t time.Time) (promql.Vector, error) {
u, err := url.Parse(fmt.Sprintf("http://%s/api/v1/query", addr))
if err != nil {
return nil, err
}
params := url.Values{}
params.Add("query", query)
params.Add("time", t.Format(time.RFC3339Nano))
params.Add("dedup", "true")
u.RawQuery = params.Encode()
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
span, ctx := tracing.StartSpan(ctx, "/rule_instant_query HTTP[client]")
defer span.Finish()
req = req.WithContext(ctx)
client := &http.Client{
Transport: tracing.HTTPTripperware(logger, http.DefaultTransport),
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer runutil.CloseWithLogOnErr(logger, resp.Body, "query body")
// Always try to decode a vector. Scalar rules won't work for now and arguably
// have no relevant use case.
var m struct {
Data struct {
Result model.Vector `json:"result"`
} `json:"data"`
}
if err := json.NewDecoder(resp.Body).Decode(&m); err != nil {
return nil, err
}
vec := make(promql.Vector, 0, len(m.Data.Result))
for _, e := range m.Data.Result {
lset := make(promlabels.Labels, 0, len(e.Metric))
for k, v := range e.Metric {
lset = append(lset, promlabels.Label{
Name: string(k),
Value: string(v),
})
}
sort.Sort(lset)
vec = append(vec, promql.Sample{
Metric: lset,
Point: promql.Point{T: int64(e.Timestamp), V: float64(e.Value)},
})
}
return vec, nil
}
type alertmanagerSet struct {
resolver *net.Resolver
addrs []string
mtx sync.Mutex
current []*url.URL
}
func newAlertmanagerSet(addrs []string, resolver *net.Resolver) *alertmanagerSet {
if resolver == nil {
resolver = net.DefaultResolver
}
return &alertmanagerSet{
resolver: resolver,
addrs: addrs,
}
}
func (s *alertmanagerSet) get() []*url.URL {
s.mtx.Lock()
defer s.mtx.Unlock()
return s.current
}
const defaultAlertmanagerPort = 9093
func (s *alertmanagerSet) update(ctx context.Context) error {
var res []*url.URL
for _, addr := range s.addrs {
u, err := url.Parse(addr)
if err != nil {
return errors.Wrapf(err, "parse URL %q", addr)
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
host, port = u.Host, ""
}
var (
hosts []string
proto = u.Scheme
lookup = "none"
)
if ps := strings.SplitN(u.Scheme, "+", 2); len(ps) == 2 {
lookup, proto = ps[0], ps[1]
}
switch lookup {
case "dns":
if port == "" {
port = strconv.Itoa(defaultAlertmanagerPort)
}
ips, err := s.resolver.LookupIPAddr(ctx, host)
if err != nil {
return errors.Wrapf(err, "lookup IP addresses %q", host)
}
for _, ip := range ips {
hosts = append(hosts, net.JoinHostPort(ip.String(), port))
}
case "dnssrv":
_, recs, err := s.resolver.LookupSRV(ctx, "", proto, host)
if err != nil {
return errors.Wrapf(err, "lookup SRV records %q", host)
}
for _, rec := range recs {
// Only use port from SRV record if no explicit port was specified.
if port == "" {
port = strconv.Itoa(int(rec.Port))
}
hosts = append(hosts, net.JoinHostPort(rec.Target, port))
}
case "none":
if port == "" {
port = strconv.Itoa(defaultAlertmanagerPort)
}
hosts = append(hosts, net.JoinHostPort(host, port))
default:
return errors.Errorf("invalid lookup scheme %q", lookup)
}
for _, h := range hosts {
res = append(res, &url.URL{
Scheme: proto,
Host: h,
Path: u.Path,
User: u.User,
})
}
}
s.mtx.Lock()
s.current = res
s.mtx.Unlock()
return nil
}
func parseFlagLabels(s []string) (labels.Labels, error) {
var lset labels.Labels
for _, l := range s {
parts := strings.SplitN(l, "=", 2)
if len(parts) != 2 {
return nil, errors.Errorf("unrecognized label %q", l)
}
val, err := strconv.Unquote(parts[1])
if err != nil {
return nil, errors.Wrap(err, "unquote label value")
}
lset = append(lset, labels.Label{Name: parts[0], Value: val})
}
return lset, nil
}
func labelsTSDBToProm(lset labels.Labels) (res promlabels.Labels) {
for _, l := range lset {
res = append(res, promlabels.Label{
Name: l.Name,
Value: l.Value,
})
}
return res
}
|
package main
import "fmt"
func main() {
a := "James"
num := 45
foo()
bar(a)
add(&num)
fmt.Println("My name is still", a)
fmt.Println("Main: The num:", num)
}
func foo() {
fmt.Println("My name is James")
}
func bar(s string) {
s = "John"
fmt.Println("My name is", s)
}
func add(x *int) {
fmt.Println("In add func - num:", *x)
*x++
fmt.Println(*x)
}
|
package main
import "github.com/gofiber/fiber/v2"
func main() {
app := fiber.New()
app.Post("/get_into_db", func(c *fiber.Ctx) error {
return c.SendString("Hello")
})
}
|
/*
* @lc app=leetcode.cn id=83 lang=golang
*
* [83] 删除排序链表中的重复元素
*/
// @lc code=start
/**
* Definition for singly-linked list.
*/
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func deleteDuplicates(head *ListNode) *ListNode {
// nums := map[int]int{}
// cur := head
// var prev *ListNode
// for cur != nil {
// if _, ok := nums[cur.Val]; ok {
// cur = cur.Next
// prev.Next = cur
// } else {
// nums[cur.Val] = 1
// prev = cur
// cur = cur.Next
// }
// }
// return head
cur := head
for cur != nil && cur.Next != nil {
// fmt.Printf("cur's value is %d\n", cur.Val)
if cur.Val == cur.Next.Val {
// fmt.Printf("if logic, cur's value is %d\n", cur.Val)
cur.Next = cur.Next.Next
// cur = cur.Next
} else {
cur = cur.Next
}
}
return head
}
// @lc code=end
func print(head *ListNode){
cur := head
for cur != nil {
fmt.Printf("%d ", cur.Val)
cur = cur.Next
}
fmt.Println()
}
func main(){
l11 := ListNode{Val: 1}
l12 := ListNode{Val: 1}
l13 := ListNode{Val: 2}
l14 := ListNode{Val: 3}
l15 := ListNode{Val: 3}
l16 := ListNode{Val: 3}
l11.Next = &l12
l12.Next = &l13
l13.Next = &l14
l14.Next = &l15
l15.Next = &l16
print(&l11)
l21 := deleteDuplicates(&l11)
print(l21)
}
|
package relax
import (
"math"
"math/rand"
"strings"
"sync"
"time"
)
var (
rnd = rand.New(rand.NewSource(time.Now().Unix()))
)
type VolumeSlider struct {
stop chan bool
ticker *time.Ticker
mtx sync.Mutex
target float64
val float64
speed float64
}
func NewVolumeSlider() *VolumeSlider {
t := time.NewTicker(10 * time.Millisecond)
return &VolumeSlider{
stop: make(chan bool),
ticker: t,
speed: 0.000001,
val: 0,
target: 0,
}
}
func (vs *VolumeSlider) Val() float64 {
vs.mtx.Lock()
if vs.foundTarget() {
vs.setTarget()
}
vs.moveTowardsTarget()
vs.mtx.Unlock()
return vs.val
}
func (vs *VolumeSlider) foundTarget() bool {
return math.Abs(vs.val-vs.target) < 0.01
}
func (vs *VolumeSlider) setTarget() {
vs.target = rnd.Float64()
}
func (vs *VolumeSlider) moveTowardsTarget() {
if vs.val > vs.target {
vs.val = vs.val - vs.speed
} else {
vs.val = vs.val + vs.speed
}
}
func (vs *VolumeSlider) String() string {
str := "-----------------------------------------------------------------------------------------"
idx := float64(len(str)) * vs.val
return strings.Replace(str, "-", "*", int(idx))
}
|
package main
import (
"encoding/json"
"fmt"
"html/template"
"math/rand"
"net/http"
"os"
"time"
)
// NumberOfNumbers of each Combination
var NumberOfNumbers int = 5
// NumberOfStars of each Combination
var NumberOfStars int = 2
// Combination of a euromilions key
type Combination struct {
Numbers []int
Stars []int
}
func (c Combination) String() string {
return fmt.Sprintf("%v\n%v", c.Numbers, c.Stars)
}
// SaveToFile the combination as JSON
func (c Combination) SaveToFile() error {
json, err := c.ToJson()
if err != nil {
return err
}
f, err := os.OpenFile("temp.json", os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
return err
}
defer f.Close()
_, err = f.Write(json)
return err
}
// ToJson a combination
func (c *Combination) ToJson() ([]byte, error) {
return json.Marshal(c)
}
func newCombination() Combination {
comb := Combination{}
//Numbers
for i := 0; i < NumberOfNumbers; i++ {
num := 0
for num == 0 {
seed := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(seed)
num = r1.Intn(51)
}
comb.Numbers = append(comb.Numbers, num)
}
for i := 0; i < NumberOfStars; i++ {
num := 0
for num == 0 {
seed := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(seed)
num = r1.Intn(13)
}
comb.Stars = append(comb.Stars, num)
}
return comb
}
func handlerHTML(w http.ResponseWriter, r *http.Request) {
tmpl := template.Must(template.ParseFiles("html/index.html"))
nComb := newCombination()
tmpl.Execute(w, nComb)
}
//TODO:
func main() {
http.HandleFunc("/", handlerHTML)
http.Handle("/css/", http.StripPrefix("/css/", http.FileServer(http.Dir("css"))))
http.Handle("/js/", http.StripPrefix("/js/", http.FileServer(http.Dir("js"))))
http.ListenAndServe(":9090", nil)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.