text stringlengths 11 4.05M |
|---|
package main
import (
"testing"
)
func TestParseLine(t *testing.T) {
if c := parseLine("#1 @ 829,837: 11x22"); c.id != 1 ||
c.x != 829 ||
c.y != 837 ||
c.w != 11 ||
c.h != 22 {
t.Fatalf("Failed to parse: %v", c)
}
if c := parseLine("#583 @ 110,564: 10x23"); c.id != 583 ||
c.x != 110 ||
c.y != 564 ||
c.w != 10 ||
c.h != 23 {
t.Fatalf("Failed to parse: %v", c)
}
}
|
package HumorChecker // "cirello.io/HumorChecker"
import (
"bufio"
"regexp"
"strings"
)
type Score struct {
// Score is the sum of the sentiment points of the analyzed text.
// Negativity will render negative points only, and vice-versa.
Score float64
// Comparative establishes a ratio of sentiment per word
Comparative float64
// List of words for a given sentiment.
Words []string
}
type FullScore struct {
// Score is the difference between positive and negative sentiment
// scores.
Score float64
// Comparative is the difference between positive and negative sentiment
// comparative scores.
Comparative float64
// Positive score object
Positive Score
// Negative score object
Negative Score
}
var lettersAndSpaceOnly = regexp.MustCompile(`[^a-zA-Z ]+`)
// Negativity calculates the negative sentiment of a sentence
func Negativity(phrase string) Score {
var hits float64
var words []string
addPush := func(t string, score float64) {
hits -= score
words = append(words, t)
}
scanner := bufio.NewScanner(strings.NewReader(strings.ToLower(lettersAndSpaceOnly.ReplaceAllString(phrase, " "))))
scanner.Split(bufio.ScanWords)
var count float64
for scanner.Scan() {
count++
word := scanner.Text()
if v, ok := afinn[word]; ok && v < 0 {
addPush(word, v)
}
}
return Score{
Score: hits,
Comparative: hits / count,
Words: words,
}
}
// Positiviy calculates the positive sentiment of a sentence
func Positivity(phrase string) Score {
var hits float64
var words []string
addPush := func(t string, score float64) {
hits += score
words = append(words, t)
}
scanner := bufio.NewScanner(strings.NewReader(strings.ToLower(lettersAndSpaceOnly.ReplaceAllString(phrase, " "))))
scanner.Split(bufio.ScanWords)
var count float64
for scanner.Scan() {
count++
word := scanner.Text()
if v, ok := afinn[word]; ok && v > 0 {
addPush(word, v)
}
}
return Score{
Score: hits,
Comparative: hits / count,
Words: words,
}
}
// Analyze calculates overall sentiment
func Analyze(phrase string) FullScore {
pos := Positivity(phrase)
neg := Negativity(phrase)
return FullScore{
Score: pos.Score - neg.Score,
Comparative: pos.Comparative - neg.Comparative,
Positive: pos,
Negative: neg,
}
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ghcache
import (
"net/http"
"github.com/go-logr/logr"
"github.com/gregjones/httpcache"
)
var _ http.RoundTripper = &rateLimitLogger{}
type rateLimitLogger struct {
log logr.Logger
delegate http.RoundTripper
}
func (l *rateLimitLogger) RoundTrip(req *http.Request) (*http.Response, error) {
resp, err := l.delegate.RoundTrip(req)
if err != nil {
return nil, err
}
total := resp.Header.Get("X-RateLimit-Limit")
remaining := resp.Header.Get("X-RateLimit-Remaining")
hit := resp.Header.Get(httpcache.XFromCache)
l.log.V(5).Info("GitHub rate limit", "hit", hit, "total", total, "remaining", remaining, "url", req.URL.String())
if remaining == "0" {
l.log.Error(nil, "GitHub request limit exceeded", "total", total, "remaining", remaining, "url", req.URL.String())
}
return resp, nil
}
|
package parspack
import (
"testing"
"github.com/DataDrake/cuppa/version"
"github.com/autamus/go-parspack/pkg"
)
func TestEncode(t *testing.T) {
packg := pkg.Package{
BlockComment: `# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)`,
Name: "Beast2",
PackageType: "Package",
Description: `BEAST is a cross-platform program for Bayesian inference using MCMC of molecular sequences. It is entirely orientated towards rooted, time-measured phylogenies inferred using strict or relaxed molecular clock models. It can be used as a method of reconstructing phylogenies but is also a framework for testing evolutionary hypotheses without conditioning on a single tree topology.`,
Homepage: "http://beast2.org/",
URL: "https://github.com/CompEvol/beast2/releases/download/v2.4.6/BEAST.v2.4.6.Linux.tgz",
Versions: []pkg.Version{{Value: version.NewVersion("master"), Branch: "main", Submodules: "True"}, {Value: version.NewVersion("2.5.2"), Checksum: "sha256='2feb2281b4f7cf8f7de1a62de50f52a8678ed0767fc72f2322e77dde9b8cd45f'"},
{Value: version.NewVersion("2.4.6"), Checksum: "sha256='84029c5680cc22f95bef644824130090f5f12d3d7f48d45cb4efc8e1d6b75e93'", URL: "https://github.com/CompEvol/beast2/releases/download/v2.4.6/BEAST.v2.4.6.Linux.tgz"}},
LatestVersion: pkg.Version{Value: version.NewVersion("2.5.2"), Checksum: "sha256='2feb2281b4f7cf8f7de1a62de50f52a8678ed0767fc72f2322e77dde9b8cd45f'"},
Dependencies: []string{"java"},
BuildInstructions: ` def setup_run_environment(self, env):
env.set('BEAST', self.prefix)
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('examples', join_path(self.prefix, 'examples'))
install_tree('images', join_path(self.prefix, 'images'))
install_tree('lib', prefix.lib)
install_tree('templates', join_path(self.prefix, 'templates'))
`,
}
expected := `# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Beast2(Package):
"""BEAST is a cross-platform program for Bayesian inference using MCMC of molecular sequences. It is entirely orientated towards rooted, time-measured phylogenies inferred using strict or relaxed molecular clock models. It can be used as a method of reconstructing phylogenies but is also a framework for testing evolutionary hypotheses without conditioning on a single tree topology."""
homepage = "http://beast2.org/"
url = "https://github.com/CompEvol/beast2/releases/download/v2.4.6/BEAST.v2.4.6.Linux.tgz"
version('master', branch='main', submodules=True)
version('2.5.2', sha256='2feb2281b4f7cf8f7de1a62de50f52a8678ed0767fc72f2322e77dde9b8cd45f')
version('2.4.6', sha256='84029c5680cc22f95bef644824130090f5f12d3d7f48d45cb4efc8e1d6b75e93', url='https://github.com/CompEvol/beast2/releases/download/v2.4.6/BEAST.v2.4.6.Linux.tgz')
depends_on('java')
def setup_run_environment(self, env):
env.set('BEAST', self.prefix)
def install(self, spec, prefix):
install_tree('bin', prefix.bin)
install_tree('examples', join_path(self.prefix, 'examples'))
install_tree('images', join_path(self.prefix, 'images'))
install_tree('lib', prefix.lib)
install_tree('templates', join_path(self.prefix, 'templates'))
`
result, err := Encode(packg)
if err != nil {
t.Error(err)
}
if result != expected {
t.Log(result)
t.Log(expected)
t.Error("Result and Expected Do Not Match")
}
}
|
package main
import (
"fmt"
)
type Errno uint
var errors = [...]string{
1: "operation not permitted", // EPERM
2: "no such file or directory", // ENOENT
3: "no such process", // ESRCH
}
func (e Errno) Error() string {
if 0 <= int(e) && int(e) < len(errors) {
return errors[e]
}
return fmt.Sprintf("errno %d", e)
}
func main() {
var err error = Errno(2);
fmt.Println(err)
}
|
package main
import (
"fmt"
"time"
pg "github.com/test_go_pg/pg"
"go.uber.org/zap"
)
func main() {
fmt.Println("Starting go-pg-migrations...")
// Bootstrap check pg
if err := pg.PGDBWrite.Ping(); err != nil {
fmt.Println(pg.DBWriteConnectionError, zap.Error(err))
return
}
fmt.Println("PostgreSQL is running",
zap.String("user", pg.PGDBWrite.Options().User),
zap.String("addr", pg.PGDBWrite.Options().Addr),
zap.String("db", pg.PGDBWrite.Options().Database))
// Migrate to latest pg schema
if err := pg.PGDBWrite.Migrate(); err != nil {
fmt.Println(pg.DBMigrationError, zap.Error(err))
return
}
time.Sleep(time.Hour)
fmt.Println("go-pg-migrations is stopping...")
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package example
import (
"context"
"fmt"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/browser"
"chromiumos/tast/local/chrome/browser/browserfixt"
"chromiumos/tast/local/chrome/lacros"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: BrowserWithNewChrome,
LacrosStatus: testing.LacrosVariantExists,
Desc: "Tests SetUpWithNewChrome in the browserfixt package. See http://go/lacros-tast-porting for the guidelines on how to use",
Contacts: []string{"hyungtaekim@chromium.org", "lacros-team@google.com", "chromeos-sw-engprod@google.com"},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"chrome", "lacros"},
Timeout: 4 * time.Minute,
})
}
func BrowserWithNewChrome(ctx context.Context, s *testing.State) {
// Reserve some time for cleanup.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second)
defer cancel()
for _, param := range []struct {
bt browser.Type
cfg *lacrosfixt.Config
}{
{browser.TypeAsh, nil},
{browser.TypeAsh, lacrosfixt.NewConfig()}, // LacrosConfig is a no-op for ash-chrome.
{browser.TypeLacros, lacrosfixt.NewConfig()}, // default config
{browser.TypeLacros, lacrosfixt.NewConfig(
lacrosfixt.Selection(lacros.Rootfs), lacrosfixt.Mode(lacros.LacrosSideBySide))}, // custom config
} {
bt := param.bt
cfg := param.cfg
s.Run(ctx, fmt.Sprintf("BrowserWithNewChrome browser: %v, cfg: %+v", bt, cfg), func(ctx context.Context, s *testing.State) {
// Connect to a fresh ash-chrome instance (cr) and set a browser instance (br) to use browser functionality.
cr, br, closeBrowser, err := browserfixt.SetUpWithNewChrome(ctx, bt, cfg)
if err != nil {
s.Fatalf("Failed to connect to %v browser: %v", bt, err)
}
defer cr.Close(cleanupCtx)
defer closeBrowser(cleanupCtx)
numNewWindows := 0
if bt == browser.TypeLacros {
numNewWindows = 1 // Lacros opens an extra window in browserfixt.SetUp*.
}
// Open a new window.
const url = "chrome://newtab"
conn, err := br.NewConn(ctx, url, browser.WithNewWindow())
if err != nil {
s.Fatalf("Failed to open new window with url: %v, %v", url, err)
}
defer conn.Close()
numNewWindows++
// Verify that the expected number of browser windows are open.
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create test API connection: ", err)
}
if err := testing.Poll(ctx, func(ctx context.Context) error {
ws, err := ash.FindAllWindows(ctx, tconn, func(w *ash.Window) bool {
return (bt == browser.TypeAsh && w.WindowType == ash.WindowTypeBrowser) ||
(bt == browser.TypeLacros && w.WindowType == ash.WindowTypeLacros)
})
if err != nil {
return errors.Wrap(err, "failed to get all browser windows")
}
if len(ws) != numNewWindows {
return errors.Wrapf(err, "failed to find open browser windows. expected: %v, got: %v", numNewWindows, len(ws))
}
return nil
}, &testing.PollOptions{Timeout: 10 * time.Second, Interval: time.Second}); err != nil {
s.Fatalf("Failed to find %v browser windows: %v", bt, err)
}
})
}
}
|
package udwSqlite3Test
import (
"github.com/tachyon-protocol/udw/udwSqlite3"
"github.com/tachyon-protocol/udw/udwSync"
"github.com/tachyon-protocol/udw/udwTest"
)
func TestRangeCallback2() {
db := udwSqlite3.MustNewMemoryDb()
defer db.Close()
num := udwSync.NewInt(0)
db.MustGetRangeCallback(udwSqlite3.GetRangeReq{
K1: "test",
}, func(key string, value string) {
num.Add(1)
return
})
udwTest.Equal(num.Get(), 0)
db.MustGetRangeKeyListCallback(udwSqlite3.GetRangeReq{
K1: "test",
}, func(key string) {
num.Add(1)
return
})
udwTest.Equal(num.Get(), 0)
db.MustSet("test", "3", "v3")
db.MustSet("test", "1", "v1")
db.MustSet("test", "2", "v2")
valueList := []string{}
db.MustGetRangeCallback(udwSqlite3.GetRangeReq{
K1: "test",
}, func(key string, value string) {
valueList = append(valueList, value)
return
})
udwTest.Equal(valueList, []string{
"v1", "v2", "v3",
})
}
|
// +build all common pkg api proxy
// Package api :: proxy_test.go
package api
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
// MockProxyClient struct
type MockProxyClient struct {
Request *http.Request
ResponseText string
Err error
}
// Do implements ProxyClient interface
func (mpc *MockProxyClient) Do(r *http.Request) (*http.Response, error) {
mpc.Request = r
response := &http.Response{
Body: ioutil.NopCloser(bytes.NewBuffer([]byte(mpc.ResponseText))),
}
return response, mpc.Err
}
// TestProxyFunc tests func Proxy
func TestProxyFunc(t *testing.T) {
tests := []struct {
prefix string
redirectURL string
reqURL string
restURL string
response string
errText string
errFunc func(io.Reader) ([]byte, error)
}{
{
"/prefix1", "http://redir1", "/prefix1/test", "http://redir1/test",
"good", "", nil,
},
{
"/pre2", "http://re2", "/pre2/test/value", "http://re2/test/value",
"BAD GATEWAY", "bad gateway err test", nil,
},
{
"/pre3", "http://re3", "/pre3/test/3", "http://re3/test/3",
"read err test", "", func(_ io.Reader) ([]byte, error) {
return []byte{}, errors.New("read err test")
},
},
}
savedClient := proxyClient
savedReadAll := bodyReadAll
defer func() {
// restoring proxyClient (see definition in proxy.go)
proxyClient = savedClient
}()
for idx, test := range tests {
var err error
if test.errText != "" {
err = errors.New(test.errText)
}
if test.errFunc == nil {
bodyReadAll = savedReadAll
} else {
bodyReadAll = test.errFunc
}
client := &MockProxyClient{
ResponseText: test.response, Err: err,
}
req, _ := http.NewRequest("GET", test.reqURL, nil)
req.Header.Set("Foobar", "Foobar header test")
msg := fmt.Sprintf("%s => %s", test.reqURL, test.restURL)
t.Logf("Test %2d: %s\n", idx+1, msg)
proxyClient = client
rwr := httptest.NewRecorder()
// here start to test the function
Proxy(test.prefix, test.redirectURL, rwr, req)
data, err := savedReadAll(rwr.Body)
proxyHeader := client.Request.Header
proxyReq := client.Request
// t.Logf("request: %#v\n", req)
// t.Logf("proxy: %#v (%s)\n", proxyReq, proxyReq.URL)
// t.Logf("record: %#v\n", rwr)
assert.Equal(t, req.URL.Path, test.reqURL)
assert.Equal(t, req.Method, proxyReq.Method)
assert.Equal(t, req.Host, proxyHeader.Get("Host"))
assert.Equal(t, "Foobar header test", proxyHeader.Get("Foobar"))
assert.Equal(t, "gzip;q=0,deflate;q=0", proxyHeader.Get("Accept-Encoding"))
assert.Equal(t, req.RemoteAddr, proxyHeader.Get("X-Forwarded-For"))
assert.Equal(t, test.restURL, proxyReq.URL.String())
if test.errFunc != nil {
assert.Equal(t, test.response+"\n", string(data))
assert.Equal(t, http.StatusInternalServerError, rwr.Code)
} else {
if test.errText != "" {
assert.Equal(t, test.errText+"\n", string(data))
assert.Equal(t, http.StatusBadGateway, rwr.Code)
} else {
assert.Equal(t, test.response, string(data), err)
assert.Equal(t, http.StatusOK, rwr.Code)
}
}
}
}
// TestProxyHandler tests Proxy constructor
func TestProxyHandler(t *testing.T) {
proxyRoute := &ProxyRoute{
Prefix: "/prefix", RedirectURL: "http://redirect",
}
handler := ProxyHandler(proxyRoute.Prefix, proxyRoute.RedirectURL)
switch v := handler.(type) {
case http.HandlerFunc:
log.Println("ProxyHandler is an http.HandlerFunc")
default:
msg := fmt.Sprintf("%v is not http.HandlerFunc", v)
assert.Fail(t, msg)
}
tests := []struct {
requestURL string
expectedCode int
}{
{"", http.StatusBadRequest},
{"http://test/foo", http.StatusBadGateway},
}
for idx, test := range tests {
req, _ := http.NewRequest("GET", test.requestURL, nil)
rwr := httptest.NewRecorder()
log.Printf("Test %2d: %s\n", idx, test.requestURL)
proxyRoute.ServeHTTP(rwr, req)
handler.ServeHTTP(rwr, req)
// log.Printf("Test %2d - request: %s - %#v\n", idx, req.URL, req)
// log.Printf("Test %2d - response: %#v\n", idx, rwr)
assert.Equal(t, test.expectedCode, rwr.Code)
}
}
|
package testing
import (
"context"
"time"
"github.com/cloudfoundry/metric-store-release/src/pkg/persistence/transform"
rpc "github.com/cloudfoundry/metric-store-release/src/pkg/rpc/metricstore_v1"
"github.com/prometheus/prometheus/pkg/labels"
"github.com/prometheus/prometheus/storage"
)
type SpyDataReader struct {
ReadStarts []int64
ReadEnds []int64
ReadResults []*rpc.PromQL_Matrix
ReadErrs []error
LabelsResponse *rpc.PromQL_LabelsQueryResult
LabelsError error
LabelValuesResponse *rpc.PromQL_LabelValuesQueryResult
LabelValuesError error
}
func (s *SpyDataReader) Read(ctx context.Context, params *storage.SelectParams, labelMatchers ...*labels.Matcher) (storage.SeriesSet, error) {
s.ReadStarts = append(s.ReadStarts, params.Start)
s.ReadEnds = append(s.ReadEnds, params.End)
if len(s.ReadResults) != len(s.ReadErrs) {
panic("readResults and readErrs are out of sync")
}
if len(s.ReadResults) == 0 {
panic("there are no more ReadResults to provide, please add in setup")
}
r := s.ReadResults[0]
err := s.ReadErrs[0]
s.ReadResults = s.ReadResults[1:]
s.ReadErrs = s.ReadErrs[1:]
builder := transform.NewSeriesBuilder()
for _, series := range r.GetSeries() {
builder.AddPromQLSeries(series)
}
// Give ourselves some time to capture runtime metrics
time.Sleep(time.Millisecond)
return builder.SeriesSet(), err
}
func (s *SpyDataReader) Labels(ctx context.Context, in *rpc.PromQL_LabelsQueryRequest) (*rpc.PromQL_LabelsQueryResult, error) {
return s.LabelsResponse, s.LabelsError
}
func (s *SpyDataReader) LabelValues(ctx context.Context, in *rpc.PromQL_LabelValuesQueryRequest) (*rpc.PromQL_LabelValuesQueryResult, error) {
return s.LabelValuesResponse, s.LabelValuesError
}
func NewSpyDataReader() *SpyDataReader {
return &SpyDataReader{}
}
|
package internal
import (
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
)
// ContainerUserName is the username of the user created in the container
const ContainerUserName = "ahab"
// Container contains all information regarding a container's configuration
type Container struct {
Fields *Configuration
FilePath string
}
// GetContainer retrieves all container info relative to the working directory
func GetContainer() (*Container, error) {
curDir, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("Failed to get working directory: %s", err)
}
container := new(Container)
container.FilePath, err = findConfigPath(curDir)
if err != nil {
return nil, fmt.Errorf("Failed to find config file: %s", err)
}
configFile, err := os.Open(container.FilePath)
if err != nil {
return nil, fmt.Errorf("Failed to open config file '%s': %s", container.FilePath, err)
}
defer configFile.Close()
decoder := json.NewDecoder(configFile)
if err = decoder.Decode(&container.Fields); err != nil {
return nil, fmt.Errorf("Failed to parse config file '%s': %s", container.FilePath, err)
}
if err := container.Fields.validateConfig(); err != nil {
return nil, fmt.Errorf("Config file '%s' invalid: %s", container.FilePath, err)
}
return container, checkConfigVersion(container.Fields.AhabVersion)
}
// Cmd runs a container command in the form docker [command] [container name]
func (container *Container) Cmd(command string) error {
containerOpts := append([]string{command}, container.Name())
return DockerCmd(&containerOpts)
}
// Create creates & prepares the container, leaving at "created" status if start is false
func (container *Container) Create(startContainer bool) error {
launchOpts, err := container.creationOpts()
if err != nil {
return err
}
launchOpts = append([]string{"create", "-t"}, launchOpts...)
if container.Fields.Command != "" {
launchOpts = append(launchOpts, container.Fields.Command)
} else {
launchOpts = append(launchOpts, "top", "-b")
}
if err = DockerCmd(&launchOpts); err != nil {
return err
}
// set up user permissions and start container
if !container.Fields.Permissions.Disable {
if err = container.Cmd("start"); err != nil {
return err
}
if err = container.prep(); err != nil {
return err
}
} else if startContainer || len(container.Fields.Init) > 0 {
if err := container.Cmd("start"); err != nil {
return err
}
}
// execute init commands - if there are any, the container will have been started above
for _, initCmd := range container.Fields.Init {
initCmdSplit := rootExec(container, strings.Split(initCmd, " ")...)
if err := DockerCmd(&initCmdSplit); err != nil {
return nil
}
}
// optionally stop/restart initial process after setup
if s, _ := container.Status(); !startContainer && s == 3 {
return container.Cmd("stop")
} else if container.Fields.RestartAfterSetup {
return container.Cmd("restart")
}
return nil
}
// Down stops and removes the container
func (container *Container) Down() (err error) {
status, err := container.Status()
if err == nil && (status == 2 || status == 3 || status == 5) {
err = container.Cmd("stop")
if err == nil {
err = container.Cmd("rm")
}
} else if err == nil && (status == 1 || status == 6 || status == 7) {
err = container.Cmd("rm")
}
return
}
// Name fetches the container name
func (container *Container) Name() string {
if container.Fields.Name == "" {
name := filepath.Dir(container.FilePath)
name = strings.TrimPrefix(name, "/")
return strings.ReplaceAll(name, "/", "_")
}
return container.Fields.Name
}
// Prop fetches a container property using Docker
func (container *Container) Prop(fieldID string) (string, error) {
output, err := DockerOutput(&[]string{"inspect", "-f", "{{." + fieldID + "}}", container.Name()})
if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 1 {
return "", nil
} else if err != nil {
return "", err
} else {
idString := string(output)
return strings.Trim(idString, " \n"), nil
}
}
// Status returns a code corresponding to the status of the container
// 0 - not found
// 1 - created
// 2 - restarting
// 3 - running
// 4 - removing
// 5 - paused
// 6 - exited
// 7 - dead
func (container *Container) Status() (int, error) {
status, err := container.Prop("State.Status")
if err != nil {
return 0, fmt.Errorf("Error checking container status: %s", err)
}
switch status {
case "":
return 0, nil
case "created":
return 1, nil
case "restarting":
return 2, nil
case "running":
return 3, nil
case "removing":
return 4, nil
case "paused":
return 5, nil
case "exited":
return 6, nil
case "dead":
return 7, nil
default:
return 0, fmt.Errorf("Unexpected container status: %s", status)
}
}
// Up creates and starts the container
func (container *Container) Up() error {
status, err := container.Status()
if err != nil {
return err
} else if status == 0 {
return container.Create(true)
} else if status == 1 || status == 6 || status == 7 {
return container.Cmd("start")
} else if status == 5 {
return container.Cmd("unpause")
}
return nil
}
// return a slice of options used when creating the container
func (container *Container) creationOpts() (opts []string, err error) {
userConfig, err := UserConfig()
if err != nil {
return
}
// initial (idle) process runs as root user
opts = []string{"-u", "root"}
// user-specified options
opts = append(opts, expandEnvs(&container.Fields.Options)...)
opts = append(opts, expandEnvs(&userConfig.Options)...)
// environment
envStrings := expandEnvs(&container.Fields.Environment)
envStrings = append(envStrings, expandEnvs(&userConfig.Environment)...)
for _, envString := range envStrings {
opts = append(opts, "-e", envString)
}
// volumes
for _, vol := range container.Fields.Volumes {
volString, err := prepVolumeString(vol, container.FilePath)
if err != nil {
return nil, err
}
opts = append(opts, "-v", volString)
}
for _, vol := range userConfig.Volumes {
volString, err := prepVolumeString(vol, container.FilePath)
if err != nil {
return nil, err
}
opts = append(opts, "-v", volString)
}
// entrypoint
if container.Fields.Entrypoint != "" {
entrypointPath, err := prepVolumeString(container.Fields.Entrypoint, container.FilePath)
if err != nil {
return nil, err
}
opts = append(opts, []string{"--entrypoint", entrypointPath}...)
}
// workdir
if container.Fields.Workdir != "" {
opts = append(opts, "-w", os.ExpandEnv(container.Fields.Workdir))
}
// hostname
if container.Fields.Hostname != "" {
opts = append(opts, "-h", os.ExpandEnv(container.Fields.Hostname))
} else {
// hostname = name of parent dir of the container file
opts = append(opts, "-h", filepath.Base(filepath.Dir(container.FilePath)))
}
// display sharing
if container.Fields.ShareDisplay {
// get and append options
opts = append(opts, displayOptions()...)
// for x11 sessions, run xhost command to set Docker xserver permissions
if DisplaySessionType() == "x11" {
if err := DockerXHostAuth(); err != nil {
return nil, err
}
}
}
// container name and image
imageName, err := container.prepImage()
if err != nil {
return nil, err
}
return append(opts, "--name", container.Name(), imageName), err
}
func (container *Container) prepImage() (imageString string, err error) {
if container.Fields.ImageURI != "" {
imageString = os.ExpandEnv(container.Fields.ImageURI)
} else if container.Fields.Dockerfile != "" {
dockerfilePath, dfileErr := prepVolumeString(container.Fields.Dockerfile, container.FilePath)
if dfileErr != nil {
return "", dfileErr
}
_, dfileErr = os.Stat(dockerfilePath)
if dfileErr != nil {
return "", fmt.Errorf("Dockerfile not found: '%s'", dockerfilePath)
}
imageString = strings.ToLower(container.Name() + "_" + filepath.Base(dockerfilePath))
dockerBuildCtx := container.Fields.BuildContext
if dockerBuildCtx == "" {
dockerBuildCtx = filepath.Dir(container.FilePath)
}
err = DockerCmd(&[]string{"build", "-t", imageString, "-f", dockerfilePath, dockerBuildCtx})
} else {
err = fmt.Errorf("Either `image` or `dockerfile` must be present")
}
return
}
// run commands to prepare users/permissions in the container
func (container *Container) prep() error {
homeDir := "/home/" + ContainerUserName
uid := strconv.Itoa(os.Getuid())
// commands which need to be executed after user creation
// not using defer because it complicates error throwing
extraCmds := [][]string{
rootExec(container, "chown", ContainerUserName+":", homeDir),
rootExec(container, "chmod", "700", homeDir),
}
// split out groups marked as "new" by a prefixed !, create them, and add the user (deferred)
groups, newGroups := splitGroups(&container.Fields.Permissions.Groups)
// create non-root user
userAddCmd := rootExec(container)
switch container.Fields.Permissions.CmdSet {
case "", "default":
userAddCmd = append(userAddCmd, []string{"useradd", "-o", "-m", "-d", homeDir}...)
if len(groups) != 0 {
userAddCmd = append(userAddCmd, []string{"-G", strings.Join(groups, ",")}...)
}
for _, group := range newGroups {
extraCmds = append(extraCmds, rootExec(container, "groupadd", group))
extraCmds = append(extraCmds, rootExec(container, "usermod", "-G", group, ContainerUserName))
}
case "busybox":
userAddCmd = append(userAddCmd, []string{"adduser", "-D", "-h", homeDir}...)
for _, group := range newGroups {
extraCmds = append(extraCmds, rootExec(container, "addgroup", group))
extraCmds = append(extraCmds, rootExec(container, "addgroup", ContainerUserName, group))
}
for _, group := range groups {
extraCmds = append(extraCmds, rootExec(container, "addgroup", ContainerUserName, group))
}
default:
return fmt.Errorf("Unsupported command set specified in container: %s", container.Fields.Permissions.CmdSet)
}
userAddCmd = append(userAddCmd, []string{"-u", uid, ContainerUserName}...)
if err := DockerCmd(&userAddCmd); err != nil {
return err
}
// run post-user-creation commands
for _, userCmd := range extraCmds {
if err := DockerCmd(&userCmd); err != nil {
return err
}
}
return nil
}
|
package limiter
import (
"context"
"math"
"net/http"
"testing"
"time"
"github.com/m-zajac/goprojectdemo/internal/mock"
)
func TestLimitedHTTPDoerRate(t *testing.T) {
maxRate := 500.0
testTime := 200 * time.Millisecond
doer := &mock.HTTPDoer{}
limitedDoer := NewHTTPDoer(doer, maxRate)
req, _ := http.NewRequest(http.MethodGet, "fakeurl", nil)
startTime := time.Now()
var dos int
for startTime.Add(testTime).After(time.Now()) {
if _, err := limitedDoer.Do(req); err != nil {
t.Fatalf("Do() returned error: %v", err)
}
dos++
}
expectedDos := float64(maxRate) * float64(testTime) / float64(time.Second)
diff := math.Abs(float64(dos)-expectedDos) / expectedDos
if diff > 0.1 {
t.Errorf("unexpected number of Dos: %d, want %d", dos, int(expectedDos))
}
}
func TestLimitedHTTPDoerTimeout(t *testing.T) {
doer := &mock.HTTPDoer{}
limitedDoer := NewHTTPDoer(doer, 1)
req, _ := http.NewRequest(http.MethodGet, "fakeurl", nil)
ctx, cancel := context.WithTimeout(req.Context(), 10*time.Millisecond)
defer cancel()
req = req.WithContext(ctx)
if _, err := limitedDoer.Do(req); err != nil {
t.Fatalf("first Do() returned error: %v", err)
}
// Error is expected because of short ctx timeout and low rate limit.
_, err := limitedDoer.Do(req)
if err == nil {
t.Fatal("second Do() didn't return error")
}
}
|
package hems
import (
"errors"
"strings"
"github.com/evcc-io/evcc/core/site"
"github.com/evcc-io/evcc/hems/ocpp"
"github.com/evcc-io/evcc/hems/semp"
"github.com/evcc-io/evcc/server"
)
// HEMS describes the HEMS system interface
type HEMS interface {
Run()
}
// NewFromConfig creates new HEMS from config
func NewFromConfig(typ string, other map[string]interface{}, site site.API, httpd *server.HTTPd) (HEMS, error) {
switch strings.ToLower(typ) {
case "sma", "shm", "semp":
return semp.New(other, site, httpd)
case "ocpp":
return ocpp.New(other, site)
default:
return nil, errors.New("unknown hems: " + typ)
}
}
|
package machine
import (
"errors"
"fmt"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
//"github.com/aglyzov/log15"
Log "github.com/sirupsen/logrus"
)
//var Log = log15.New("pkg", "machine")
type State byte
type Command byte
type (
Machine struct {
URL string
Headers http.Header
Input <-chan []byte
Output chan<- []byte
Status <-chan Status
Command chan<- Command
}
Status struct {
State State
Error error
}
)
const (
// states
DISCONNECTED State = iota
CONNECTING
CONNECTED
WAITING
)
const (
// commands
QUIT Command = 16 + iota
PING
USE_TEXT
USE_BINARY
)
func init() {
// disable the logger by default
//Log.SetHandler(log15.DiscardHandler())
}
func (s State) String() string {
switch s {
case DISCONNECTED:
return "DISCONNECTED"
case CONNECTING:
return "CONNECTING"
case CONNECTED:
return "CONNECTED"
case WAITING:
return "WAITING"
}
return fmt.Sprintf("UNKNOWN STATUS %v", s)
}
func (c Command) String() string {
switch c {
case QUIT:
return "QUIT"
case PING:
return "PING"
case USE_TEXT:
return "USE_TEXT"
case USE_BINARY:
return "USE_BINARY"
}
return fmt.Sprintf("UNKNOWN COMMAND %v", c)
}
func New(url string, headers http.Header) *Machine {
inp_ch := make(chan []byte, 8)
out_ch := make(chan []byte, 8)
sts_ch := make(chan Status, 2)
cmd_ch := make(chan Command, 2)
con_return_ch := make(chan *websocket.Conn, 1)
con_cancel_ch := make(chan bool, 1)
r_error_ch := make(chan error, 1)
w_error_ch := make(chan error, 1)
w_control_ch := make(chan Command, 1)
io_event_ch := make(chan bool, 2)
var wg sync.WaitGroup
connect := func() {
wg.Add(1)
defer wg.Done()
Log.Debug("connect has started")
for {
sts_ch <- Status{State: CONNECTING}
dialer := websocket.Dialer{HandshakeTimeout: 5 * time.Second}
conn, _, err := dialer.Dial(url, headers)
if err == nil {
conn.SetPongHandler(func(string) error { io_event_ch <- true; return nil })
con_return_ch <- conn
sts_ch <- Status{State: CONNECTED}
return
} else {
Log.Debug("connect error", "err", err)
sts_ch <- Status{DISCONNECTED, err}
}
sts_ch <- Status{State: WAITING}
select {
case <-time.After(34 * time.Second):
case <-con_cancel_ch:
sts_ch <- Status{DISCONNECTED, errors.New("cancelled")}
return
}
}
}
keep_alive := func() {
wg.Add(1)
defer wg.Done()
Log.Debug("keep_alive has started")
dur := 34 * time.Second
timer := time.NewTimer(dur)
timer.Stop()
loop:
for {
select {
case _, ok := <-io_event_ch:
if ok {
timer.Reset(dur)
} else {
timer.Stop()
break loop
}
case <-timer.C:
timer.Reset(dur)
// non-blocking PING request
select {
case w_control_ch <- PING:
default:
}
}
}
}
read := func(conn *websocket.Conn) {
wg.Add(1)
defer wg.Done()
Log.Debug("read has started")
for {
if _, msg, err := conn.ReadMessage(); err == nil {
Log.Debug("received message", "msg", string(msg))
io_event_ch <- true
inp_ch <- msg
} else {
Log.Debug("read error", "err", err)
r_error_ch <- err
break
}
}
}
write := func(conn *websocket.Conn, msg_type int) {
wg.Add(1)
defer wg.Done()
Log.Debug("write has started")
loop:
for {
select {
case msg, ok := <-out_ch:
if ok {
io_event_ch <- true
if err := conn.SetWriteDeadline(time.Now().Add(3 * time.Second)); err != nil {
w_error_ch <- err
break loop
}
if err := conn.WriteMessage(msg_type, msg); err != nil {
w_error_ch <- err
break loop
}
conn.SetWriteDeadline(time.Time{}) // reset write deadline
} else {
Log.Debug("write error", "err", "out_ch closed")
w_error_ch <- errors.New("out_ch closed")
break loop
}
case cmd, ok := <-w_control_ch:
if !ok {
w_error_ch <- errors.New("w_control_ch closed")
break loop
} else {
switch cmd {
case QUIT:
Log.Debug("write received QUIT command")
w_error_ch <- errors.New("cancelled")
break loop
case PING:
if err := conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(3*time.Second)); err != nil {
Log.Debug("ping error", "err", err)
w_error_ch <- errors.New("cancelled")
break loop
}
case USE_TEXT:
msg_type = websocket.TextMessage
case USE_BINARY:
msg_type = websocket.BinaryMessage
}
}
}
}
}
go func() {
// local state
var conn *websocket.Conn
reading := false
writing := false
msg_type := websocket.BinaryMessage // use Binary messages by default
defer func() {
Log.Debug("cleanup has started")
if conn != nil {
conn.Close()
} // this also makes reader to exit
// close local output channels
close(con_cancel_ch) // this makes connect to exit
close(w_control_ch) // this makes write to exit
close(io_event_ch) // this makes keep_alive to exit
// drain input channels
<-time.After(50 * time.Millisecond) // small pause to let things react
drain_loop:
for {
select {
case _, ok := <-out_ch:
if !ok {
out_ch = nil
}
case _, ok := <-cmd_ch:
if !ok {
inp_ch = nil
}
case conn, ok := <-con_return_ch:
if conn != nil {
conn.Close()
}
if !ok {
con_return_ch = nil
}
case _, ok := <-r_error_ch:
if !ok {
r_error_ch = nil
}
case _, ok := <-w_error_ch:
if !ok {
w_error_ch = nil
}
default:
break drain_loop
}
}
// wait for all goroutines to stop
wg.Wait()
// close output channels
close(inp_ch)
close(sts_ch)
}()
Log.Debug("main loop has started")
go connect()
go keep_alive()
main_loop:
for {
select {
case conn = <-con_return_ch:
if conn == nil {
break main_loop
}
Log.Debug("connected", "local", conn.LocalAddr(), "remote", conn.RemoteAddr())
reading = true
writing = true
go read(conn)
go write(conn, msg_type)
case err := <-r_error_ch:
reading = false
if writing {
// write goroutine is still active
Log.Debug("read error -> stopping write")
w_control_ch <- QUIT // ask write to exit
sts_ch <- Status{DISCONNECTED, err}
} else {
// both read and write goroutines have exited
Log.Debug("read error -> starting connect()")
if conn != nil {
conn.Close()
conn = nil
}
go connect()
}
case err := <-w_error_ch:
// write goroutine has exited
writing = false
if reading {
// read goroutine is still active
Log.Debug("write error -> stopping read")
if conn != nil {
conn.Close() // this also makes read to exit
conn = nil
}
sts_ch <- Status{DISCONNECTED, err}
} else {
// both read and write goroutines have exited
Log.Debug("write error -> starting connect()")
go connect()
}
case cmd, ok := <-cmd_ch:
if ok {
Log.Debug("received command", "cmd", cmd)
}
switch {
case !ok || cmd == QUIT:
if reading || writing || conn != nil {
sts_ch <- Status{DISCONNECTED, nil}
}
break main_loop // defer should clean everything up
case cmd == PING:
if conn != nil && writing {
w_control_ch <- cmd
}
case cmd == USE_TEXT:
msg_type = websocket.TextMessage
if writing {
w_control_ch <- cmd
}
case cmd == USE_BINARY:
msg_type = websocket.BinaryMessage
if writing {
w_control_ch <- cmd
}
default:
panic(fmt.Sprintf("unsupported command: %v", cmd))
}
}
}
}()
return &Machine{url, headers, inp_ch, out_ch, sts_ch, cmd_ch}
}
|
/*
* OFAC API
*
* OFAC (Office of Foreign Assets Control) API is designed to facilitate the enforcement of US government economic sanctions programs required by federal law. This project implements a modern REST HTTP API for companies and organizations to obey federal law and use OFAC data in their applications.
*
* API version: v1
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// Physical address from OFAC list
type Address struct {
EntityID string `json:"entityID,omitempty"`
AddressID string `json:"addressID,omitempty"`
Address string `json:"address,omitempty"`
CityStateProvincePostalCode string `json:"cityStateProvincePostalCode,omitempty"`
Country string `json:"country,omitempty"`
Match float32 `json:"match,omitempty"`
}
|
package main
import "fmt"
import "github.com/kovetskiy/lorg"
import "github.com/kovetskiy/spinner-go"
import "os"
func getLogger() *lorg.Log {
logger := lorg.NewLog()
logger.SetFormat(lorg.NewFormat("${level:[%s]:left:true} %s"))
return logger
}
func fatalf(format string, values ...interface{}) {
if spinner.IsActive() {
spinner.Stop()
}
fmt.Fprintf(os.Stderr, format+"\n", values...)
os.Exit(1)
}
func fatalln(value interface{}) {
fatalf("%s", value)
}
func debugf(format string, values ...interface{}) {
logger.Debugf(format, values...)
}
func tracef(format string, values ...interface{}) {
logger.Tracef(format, values...)
}
func debugln(value interface{}) {
debugf("%s", value)
}
func traceln(value interface{}) {
tracef("%s", value)
}
|
package job
import (
"context"
"time"
"github.com/mylxsw/adanos-alert/internal/repository"
"github.com/mylxsw/adanos-alert/pkg/misc"
"github.com/mylxsw/asteria/log"
"github.com/mylxsw/glacier/infra"
"go.mongodb.org/mongo-driver/bson/primitive"
)
const RecoveryJobName = "recovery"
type RecoveryJob struct {
app infra.Resolver
executing chan interface{} // 标识当前Job是否在执行中
}
func NewRecoveryJob(app infra.Resolver) *RecoveryJob {
return &RecoveryJob{app: app, executing: make(chan interface{}, 1)}
}
func (a *RecoveryJob) Handle() {
select {
case a.executing <- struct{}{}:
defer func() { <-a.executing }()
a.app.MustResolve(func(recoveryRepo repository.RecoveryRepo, eventRepo repository.EventRepo) {
events, err := recoveryRepo.RecoverableEvents(context.TODO(), time.Now())
if err != nil {
log.Errorf("query recoverable events from mongodb failed: %v", err)
return
}
for _, m := range events {
(func(m repository.Recovery) {
defer func() {
if err := recover(); err != nil {
log.With(m).Errorf("add recovery event failed: %v", err)
} else {
if err := recoveryRepo.Delete(context.TODO(), m.RecoveryID); err != nil {
log.With(m).Errorf("remove recovery event from mongodb failed: %v", err)
}
}
}()
if len(m.RefIDs) == 0 {
return
}
msgSample, err := eventRepo.Get(m.RefIDs[len(m.RefIDs)-1])
if err != nil {
log.With(m).Errorf("get recovery event sample failed: %v", err)
}
msgSample.Type = repository.EventTypeRecovery
msgSample.ID = primitive.NilObjectID
msgSample.GroupID = nil
msgSample.CreatedAt = time.Now()
msgSample.Status = ""
msgSample.Meta["recovery-refs"] = m.RefIDs
msgSample.Tags = append(misc.IfElse(
msgSample.Tags == nil,
make([]string, 0),
msgSample.Tags,
).([]string), "adanos-recovery")
if _, err := eventRepo.AddWithContext(context.TODO(), msgSample); err != nil {
log.With(m).Errorf("add recovery event failed: %v", err)
}
})(m)
}
})
default:
log.Warningf("the last recovery job is not finished yet, skip for this time")
}
}
|
package main
import (
"errors"
"fmt"
"math"
)
type Color int8
type Piece int8
type Direction int8
const (
BLACK_KING Piece = -2
BLACK_MAN Piece = -1
EMPTY Piece = 0
RED_MAN Piece = 1
RED_KING Piece = 2
RED Color = 1
BLACK Color = -1
NONE Color = 0
RED_FORWARD Direction = -1
BLACK_FORWARD Direction = 1
)
type Game struct {
board *Board
redPlayer Player
blackPlayer Player
moves []Move
}
type Player struct {
color Color
}
func (p Piece) Color() Color {
if p < 0 {
return BLACK
} else if p > 0 {
return RED
} else {
return NONE
}
}
func (player *Player) PlayDirection() Direction {
if player.color == RED {
return RED_FORWARD
} else if player.color == BLACK {
return BLACK_FORWARD
} else {
// TODO Fail here?
return -1
}
}
func PlayDirectionOfPiece(piece Piece) Direction {
if piece == BLACK_MAN || piece == BLACK_KING {
return BLACK_FORWARD
} else if piece == RED_MAN || piece == RED_KING {
return RED_FORWARD
}
return 0
}
func (game *Game) NewGame() {
fmt.Println("Starting new game...")
var row, col int8
for row = 0; row < SIZE; row++ {
for col = 0; col < SIZE; col++ {
if row == 3 || row == 4 || (row+col)%2 == 0 {
game.board[row][col] = EMPTY
} else {
game.board[row][col] = startPieceForRow(row)
}
}
}
}
func (game *Game) DoMove(start *Square, end *Square, player *Player) error {
move := &Move{*start, *end, *player}
moveType, kingMove := game.board.MoveType(move)
if moveType != ILLEGAL {
game.board.MovePiece(move)
if moveType == JUMP {
game.board.CapturePiece(move)
}
if kingMove {
game.board.MakeKing(&move.finish)
}
game.moves = append(game.moves, *move)
return nil
}
color := "Black"
if player.color == RED {
color = "Red"
}
return errors.New(fmt.Sprintf("Illegal move attempted by %v player: %v -> %v", color, *start, *end))
}
func (board *Board) CapturePiece(move *Move) bool {
captureRow := (move.finish.row + move.start.row) / 2
captureCol := (move.finish.col + move.start.col) / 2
captureSquare := &Square{captureRow, captureCol}
if !areOpponents(move.player.color, board.PieceAtSquare(captureSquare).Color()) {
return false
}
board[captureRow][captureCol] = EMPTY
return true
}
func (board *Board) MakeKing(square *Square) bool {
currentPiece := board.PieceAtSquare(square)
if isKing(currentPiece) {
return false
} else if currentPiece == EMPTY {
return false
} else {
// TODO Refactor
board[square.row][square.col] = currentPiece * 2
return true
}
}
func (board *Board) AvailableMoves(start *Square, playerColor Color) []*Square {
var playDirection Direction
if playerColor == RED {
playDirection = RED_FORWARD
} else {
playDirection = BLACK_FORWARD
}
options := make([]*Square, 0)
var destination *Square
if destination = board.FindMoveInDirection(int8(playDirection), -1, start, playerColor); destination != nil {
options = append(options, destination)
}
if destination = board.FindMoveInDirection(int8(playDirection), 1, start, playerColor); destination != nil {
options = append(options, destination)
}
if board.KingMove(start, playerColor) {
if destination = board.FindMoveInDirection(-1*int8(playDirection), -1, start, playerColor); destination != nil {
options = append(options, destination)
}
if destination = board.FindMoveInDirection(-1*int8(playDirection), 1, start, playerColor); destination != nil {
options = append(options, destination)
}
}
return options
}
func (board *Board) FindMoveInDirection(dRow int8, dCol int8, start *Square, playerColor Color) *Square {
adjacentSquare := &Square{start.row + dRow, start.col + dCol}
if board.PlayableSquare(adjacentSquare) {
piece := board.PieceAtSquare(adjacentSquare)
if piece == EMPTY {
return adjacentSquare
} else if areOpponents(playerColor, piece.Color()) {
jumpSquare := &Square{start.row + 2*dRow, start.col + 2*dCol}
if board.PlayableSquare(jumpSquare) {
piece = board.PieceAtSquare(jumpSquare)
if piece == EMPTY {
return jumpSquare
}
}
}
}
return nil
}
func (board *Board) MoveType(move *Move) (MoveType, bool) {
if !board.PlayableSquare(&move.start) || !board.PlayableSquare(&move.finish) {
return ILLEGAL, false
}
startPiece := board.PieceAtSquare(&move.start)
endPiece := board.PieceAtSquare(&move.finish)
if !areTeammates(startPiece.Color(), move.player.color) {
return ILLEGAL, false
}
if endPiece != EMPTY {
return ILLEGAL, false
}
moveSize := math.Abs(float64(move.start.row - move.finish.row))
if moveSize != math.Abs(float64(move.start.col-move.finish.col)) {
return ILLEGAL, false
}
var moveType MoveType = ILLEGAL
if moveSize == 1 {
moveType = SINGLE
} else if moveSize == 2 {
moveType = JUMP
}
kingMove := false
// Check for reaching opponent's back line
if moveType != ILLEGAL && !isKing(startPiece) {
playerColor := playerColorOf(startPiece)
playDirection := PlayDirectionOfPiece(Piece(playerColor))
if playDirection < 0 && move.finish.row == 0 {
kingMove = true
} else if playDirection > 0 && move.finish.row == SIZE-1 {
kingMove = true
}
}
return moveType, kingMove
}
func (game *Game) ValidMove(move *Move) bool {
playerColor := move.player.color
kingMove := game.board.KingMove(&move.start, playerColor)
board := game.board
if !board.ValidSquare(&move.start) || !board.ValidSquare(&move.finish) {
return false
}
if board.PieceAtSquare(&move.start) == EMPTY {
return false
} else if board.PieceAtSquare(&move.finish) != EMPTY {
return false
}
if move.Direction() != move.player.PlayDirection() && !kingMove {
return false
}
availableMoves := board.AvailableMoves(&move.start, playerColor)
for _, option := range availableMoves {
if &move.finish == option {
return true
}
}
return false
}
func playerColorOf(piece Piece) Color {
if piece == BLACK_MAN || piece == BLACK_KING {
return BLACK
} else if piece == RED_MAN || piece == RED_KING {
return RED
} else {
return NONE
}
}
func areOpponents(color1, color2 Color) bool {
if color1 > NONE {
return color2 < NONE
} else if color1 < NONE {
return color2 > NONE
}
return false
}
func areTeammates(color1, color2 Color) bool {
if color1 > NONE {
return color2 > NONE
} else if color1 < NONE {
return color2 < NONE
}
return false
}
func (game *Game) Print() {
game.board.Print()
}
func (game *Game) PrintMoves() {
fmt.Println()
for i, m := range game.moves {
fmt.Print(i+1, ": ")
if m.player.color == BLACK {
fmt.Print("Black ")
} else {
fmt.Print("Red ")
}
fmt.Println(m.start, "->", m.finish)
}
}
|
package Search_a_2D_Matrix
func searchMatrix(matrix [][]int, target int) bool {
if len(matrix) < 1 {
return false
}
row, column := len(matrix), len(matrix[0])
pMin, pMax := 0, row*column-1
for pMin <= pMax {
mid := (pMin + pMax) / 2
x := mid / column
y := mid % column
if matrix[x][y] == target {
return true
}
if matrix[x][y] > target {
pMax = mid - 1
} else {
pMin = mid + 1
}
}
return false
}
|
// Tags handling
// =================================================
package main
import (
"gopkg.in/yaml.v2"
// "fmt"
"strings"
"log"
"io/ioutil"
"path/filepath"
)
type TagsData struct {
Tags map[string][]string
}
var tagsData TagsData
func populateTagsMap(foldersMap map[string]string) {
tagsFile := directory + "/config.yaml"
if ! fileExists(tagsFile) {
log.Println("config.yaml: file not found. skipping populating tags")
return
}
filename, _ := filepath.Abs(tagsFile)
yamlFile, err := ioutil.ReadFile(filename)
if err != nil {
log.Println("config.yaml: error reading file. skipping populating tags")
return
}
err = yaml.Unmarshal(yamlFile, &tagsData)
if err != nil {
log.Println("config.yaml: error parsing file. skipping populating tags")
return
}
// make empty value map with available packages as keys
availablePackages := make(map[string]struct{})
for _, val := range foldersMap {
availablePackages[val] = struct{}{}
}
// show loaded tags
log.Println("Loading tags from config.yaml file...")
for tagkey, tagval := range tagsData.Tags {
// print tag
log.Println(tagkey + ": " + strings.Join(tagval,", "))
// find and delete not found packages
// make temporary list with validated packages
tempList := make([]string,0)
// for each element in tag
for _, pack := range tagval {
// check if corespondend package exists
if _, ok := availablePackages[pack]; ok {
tempList = append(tempList, pack)
} else {
log.Println("Warning! Package " + pack + " not found! Skipping.")
}
}
// if no element in validates list delete tag, otherwise reassign validated packages
if len(tempList) == 0 {
delete(tagsData.Tags, tagkey)
} else {
tagsData.Tags[tagkey] = tempList
}
}
}
|
package models
import (
"time"
"github.com/juliotorresmoreno/unravel-server/db"
)
// Profile modelo de usuario
type Profile struct {
Id uint `xorm:"bigint not null autoincr pk" json:"id"`
Usuario string `xorm:"varchar(100) not null unique index" valid:"required" json:"usuario"`
Email string `xorm:"varchar(200)" valid:"email" json:"email"`
PermisoEmail string `xorm:"varchar(20)" json:"permiso_email" valid:"matches(^(private|friends|public)$)"`
NacimientoDia string `xorm:"varchar(2)" json:"nacimiento_dia"`
NacimientoMes string `xorm:"varchar(2)" json:"nacimiento_mes"`
PermisoNacimientoDia string `xorm:"varchar(20)" json:"permiso_nacimiento_dia" valid:"matches(^(private|friends|public)$)"`
NacimientoAno string `xorm:"varchar(4)" json:"nacimiento_ano"`
PermisoNacimientoAno string `xorm:"varchar(20)" json:"permiso_nacimiento_ano" valid:"matches(^(private|friends|public)$)"`
Sexo string `xorm:"varchar(4)" json:"sexo"`
PermisoSexo string `xorm:"varchar(20)" json:"permiso_sexo" valid:"matches(^(private|friends|public)$)"`
NacimientoPais string `xorm:"varchar(200)" json:"nacimiento_pais"`
PermisoNacimientoPais string `xorm:"varchar(20)" json:"permiso_nacimiento_pais" valid:"matches(^(private|friends|public)$)"`
NacimientoCiudad string `xorm:"varchar(200)" json:"nacimiento_ciudad"`
PermisoNacimientoCiudad string `xorm:"varchar(20)" json:"permiso_nacimiento_ciudad" valid:"matches(^(private|friends|public)$)"`
ResidenciaPais string `xorm:"varchar(200)" json:"residencia_pais"`
PermisoResidenciaPais string `xorm:"varchar(20)" json:"permiso_residencia_pais" valid:"matches(^(private|friends|public)$)"`
ResidenciaCiudad string `xorm:"varchar(200)" json:"residencia_ciudad"`
PermisoResidenciaCiudad string `xorm:"varchar(20)" json:"permiso_residencia_ciudad" valid:"matches(^(private|friends|public)$)"`
Direccion string `xorm:"varchar(20)" json:"direccion"`
PermisoDireccion string `xorm:"varchar(20)" json:"permiso_direccion" valid:"matches(^(private|friends|public)$)"`
Telefono string `xorm:"varchar(20)" json:"telefono"`
PermisoTelefono string `xorm:"varchar(20)" json:"permiso_telefono" valid:"matches(^(private|friends|public)$)"`
Celular string `xorm:"varchar(20)" json:"celular"`
PermisoCelular string `xorm:"varchar(20)" json:"permiso_celular" valid:"matches(^(private|friends|public)$)"`
Personalidad string `xorm:"text" json:"personalidad"`
PermisoPersonalidad string `xorm:"varchar(20)" json:"permiso_personalidad" valid:"matches(^(private|friends|public)$)"`
Intereses string `xorm:"text" json:"intereses"`
PermisoIntereses string `xorm:"varchar(20)" json:"permiso_intereses" valid:"matches(^(private|friends|public)$)"`
Series string `xorm:"text" json:"series"`
PermisoSeries string `xorm:"varchar(20)" json:"permiso_series" valid:"matches(^(private|friends|public)$)"`
Musica string `xorm:"text" json:"musica"`
PermisoMusica string `xorm:"varchar(20)" json:"permiso_musica" valid:"matches(^(private|friends|public)$)"`
CreenciasReligiosas string `xorm:"text" json:"creencias_religiosas"`
PermisoCreenciasReligiosas string `xorm:"varchar(20)" json:"permiso_creencias_religiosas" valid:"matches(^(private|friends|public)$)"`
CreenciasPoliticas string `xorm:"text" json:"creencias_politicas"`
PermisoCreenciasPoliticas string `xorm:"varchar(20)" json:"permiso_creencias_politicas" valid:"matches(^(private|friends|public)$)"`
Legenda string `xorm:"varchar(400) not null" json:"legenda"`
Descripcion string `xorm:"varchar(400) not null" json:"descripcion"`
PrecioHora string `xorm:"int not null" json:"precio_hora" valid:"precio_hora"`
CreateAt time.Time `xorm:"created" json:"create_at"`
UpdateAt time.Time `xorm:"updated" json:"update_at"`
}
//TableName establece el nombre de la tabla que usara el modelo
func (el Profile) TableName() string {
return "profile"
}
func init() {
var orm = db.GetXORM()
orm.Sync2(new(Profile))
orm.Close()
}
|
package controllers
import (
"context"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
marin3rv1alpha1 "github.com/3scale-ops/marin3r/apis/marin3r/v1alpha1"
operatorv1alpha1 "github.com/3scale-ops/marin3r/apis/operator.marin3r/v1alpha1"
)
var _ = Describe("EnvoyBootstrap controller", func() {
var namespace string
BeforeEach(func() {
// Create a namespace for each block
namespace = "test-ns-" + nameGenerator.Generate()
// Add any setup steps that needs to be executed before each test
testNamespace := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Namespace"},
ObjectMeta: metav1.ObjectMeta{Name: namespace},
}
err := k8sClient.Create(context.Background(), testNamespace)
Expect(err).ToNot(HaveOccurred())
n := &corev1.Namespace{}
Eventually(func() bool {
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: namespace}, n)
return err == nil
}, 60*time.Second, 5*time.Second).Should(BeTrue())
})
AfterEach(func() {
// Delete the namespace
testNamespace := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "Namespace"},
ObjectMeta: metav1.ObjectMeta{Name: namespace},
}
// Add any teardown steps that needs to be executed after each test
err := k8sClient.Delete(context.Background(), testNamespace, client.PropagationPolicy(metav1.DeletePropagationForeground))
Expect(err).ToNot(HaveOccurred())
n := &corev1.Namespace{}
Eventually(func() bool {
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: namespace}, n)
if err != nil && errors.IsNotFound(err) {
return false
}
return true
}, 60*time.Second, 5*time.Second).Should(BeTrue())
})
Context("an EnvoyBootstrap is created", func() {
var eb *marin3rv1alpha1.EnvoyBootstrap
var ds *operatorv1alpha1.DiscoveryService
BeforeEach(func() {
By("Creating a DiscoveryService instance")
ds = &operatorv1alpha1.DiscoveryService{
ObjectMeta: metav1.ObjectMeta{
Name: "instance",
Namespace: namespace,
},
Spec: operatorv1alpha1.DiscoveryServiceSpec{
Image: pointer.StringPtr("image"),
},
}
err := k8sClient.Create(context.Background(), ds)
Expect(err).ToNot(HaveOccurred())
eb = &marin3rv1alpha1.EnvoyBootstrap{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: namespace},
Spec: marin3rv1alpha1.EnvoyBootstrapSpec{
DiscoveryService: "instance",
ClientCertificate: marin3rv1alpha1.ClientCertificate{
Directory: "/tls",
SecretName: "my-cert",
Duration: metav1.Duration{
Duration: func() time.Duration {
d, _ := time.ParseDuration("5m")
return d
}(),
},
},
EnvoyStaticConfig: marin3rv1alpha1.EnvoyStaticConfig{
ConfigMapNameV2: "bootstrap-v2",
ConfigMapNameV3: "bootstrap-v3",
ConfigFile: "/config.json",
ResourcesDir: "/resources",
RtdsLayerResourceName: "runtime",
AdminBindAddress: "127.0.0.1:9901",
AdminAccessLogPath: "/dev/null",
},
},
}
err = k8sClient.Create(context.Background(), eb)
Expect(err).ToNot(HaveOccurred())
Eventually(func() bool {
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "test", Namespace: namespace}, eb)
return err == nil
}, 60*time.Second, 5*time.Second).Should(BeTrue())
})
It("should create the ConfigMaps and the DiscoveryServiceCertificate for the envoy client", func() {
By("Checking that the DiscoveryServiceCertificate has been created")
{
dsc := &operatorv1alpha1.DiscoveryServiceCertificate{}
Eventually(func() bool {
if err := k8sClient.Get(context.Background(), types.NamespacedName{Name: "my-cert", Namespace: namespace}, dsc); err != nil {
return false
}
return true
}, 60*time.Second, 5*time.Second).Should(BeTrue())
Expect(dsc.Spec.SecretRef.Name).To(Equal(eb.Spec.ClientCertificate.SecretName))
Expect(dsc.Spec.ValidFor).To(Equal(int64(eb.Spec.ClientCertificate.Duration.Seconds())))
}
By("Checking that the v2 bootstrap ConfigMap has been created")
{
cm := &corev1.ConfigMap{}
Eventually(func() bool {
if err := k8sClient.Get(context.Background(), types.NamespacedName{Name: eb.Spec.EnvoyStaticConfig.ConfigMapNameV2, Namespace: namespace}, cm); err != nil {
return false
}
return true
}, 60*time.Second, 5*time.Second).Should(BeTrue())
}
By("Checking that the v3 bootstrap ConfigMap has been created")
{
cm := &corev1.ConfigMap{}
Eventually(func() bool {
if err := k8sClient.Get(context.Background(), types.NamespacedName{Name: eb.Spec.EnvoyStaticConfig.ConfigMapNameV3, Namespace: namespace}, cm); err != nil {
return false
}
return true
}, 60*time.Second, 5*time.Second).Should(BeTrue())
}
By("Checking that the hashes of the configs are set in the status")
{
err := k8sClient.Get(context.Background(), types.NamespacedName{Name: eb.GetName(), Namespace: eb.GetNamespace()}, eb)
Expect(err).ToNot(HaveOccurred())
Expect(eb.Status.GetConfigHashV2()).ToNot(Equal(""))
Expect(eb.Status.GetConfigHashV3()).ToNot(Equal(""))
}
})
})
})
|
/*
* Quay Frontend
*
* This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations. You can find out more at <a href=\"https://quay.io\">Quay</a>.
*
* API version: v1
* Contact: support@quay.io
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package quay
// Description of a new repository build.
type RepositoryBuildRequest struct {
// Subdirectory in which the Dockerfile can be found. You can only specify this or dockerfile_path
Subdirectory string `json:"subdirectory,omitempty"`
// The URL of the .tar.gz to build. Must start with \"http\" or \"https\".
ArchiveUrl string `json:"archive_url,omitempty"`
// The tags to which the built images will be pushed. If none specified, \"latest\" is used.
DockerTags []string `json:"docker_tags,omitempty"`
// Username of a Quay robot account to use as pull credentials
PullRobot string `json:"pull_robot,omitempty"`
// The file id that was generated when the build spec was uploaded
FileId string `json:"file_id,omitempty"`
// Pass in the context for the dockerfile. This is optional.
Context string `json:"context,omitempty"`
// Path to a dockerfile. You can only specify this or subdirectory.
DockerfilePath string `json:"dockerfile_path,omitempty"`
}
|
package main
import (
"flag"
"log"
"github.com/sahlinet/go-tumbo/pkg/app"
"github.com/sahlinet/go-tumbo/pkg/client"
"github.com/sahlinet/go-tumbo/pkg/config"
)
func main() {
var server = flag.Bool("server", false, "huhu")
flag.Parse()
log.Printf("Running as server: %t", *server)
/*if *server {
srv.Start()
}
*/
config := config.GetConfig()
app := &app.App{}
app.Initialize(config)
app.Run("0.0.0.0:3000")
client.GetWorker()
}
|
package DbService
import (
"fmt"
"ledger/DbDao"
)
//func InsertRegister_PreExe(username string, password string, idNumber string, phoneNumber string) (bool, error) {
// //admin WorkEntry admin
// err := DbDao.InsertToDb_PreExe("INSERT INTO tb_User(username,password,idNumber,phoneNumber) VALUES (?,?,?,?)", username, password, idNumber, phoneNumber)
// // err := DbDao.InsertToDb("INSERT INTO tb_User(username,password,idNumber,phoneNumber) VALUES (?,?,?,?)", username, password, idNumber, phoneNumber)
// fmt.Println("err is :", err)
// if err == nil {
// return true, err
// }
// return false, err
//}
func InsertRegister(username string, password string, idNumber string, phoneNumber string) (bool, error) {
//admin WorkEntry admin
err := DbDao.InsertToDb("INSERT INTO tb_User(username,password,idNumber,phoneNumber) VALUES (?,?,?,?)", username, password, idNumber, phoneNumber)
// err := DbDao.InsertToDb("INSERT INTO tb_User(username,password,idNumber,phoneNumber) VALUES (?,?,?,?)", username, password, idNumber, phoneNumber)
fmt.Println("err is :", err)
if err == nil {
return true, err
}
return false, err
}
|
package lbricks
type Event chan interface{}
type Predicate func(interface{}) bool
type Mapper func(interface{}) interface{}
type MultiMapper func(...interface{}) interface{}
type Reducer func(memo interface{}, element interface{}) interface{}
type Subscriber func(interface{})
type Signal struct {
event Event
}
func (s Signal) Map(fn Mapper) Signal {
signal := Signal{make(Event)}
go func() {
for el := range s.event {
signal.event <- fn(el)
}
close(signal.event)
}()
return signal
}
func (s Signal) Filter(pred Predicate) Signal {
signal := Signal{make(Event)}
go func() {
for el := range s.event {
if keep := pred(el); keep {
signal.event <- el
}
}
close(signal.event)
}()
return signal
}
func (s Signal) Reduce(red Reducer, memo interface{}) interface{} {
for el := range s.event {
memo = red(memo, el)
}
return memo
}
func (s Signal) Subscribe(fn Subscriber) {
go func() {
for el := range s.event {
fn(el)
}
}()
}
func FromValues(els ... interface{}) Signal {
c := make(Event)
go func() {
for _, el := range els {
c <- el
}
close(c)
}()
return Signal{c}
}
|
package access
import (
"context"
"fmt"
"net/http"
"sync"
"time"
log "github.com/cihub/seelog"
"github.com/jinzhu/gorm"
httpr "github.com/julienschmidt/httprouter"
"github.com/ok-borg/api/ctxext"
"github.com/ok-borg/api/domain"
)
type AccessKinds int
type UserAccess struct {
Update int
Create int
}
// FIXME(jeremy): should be in config
// maximum access for write and updates in 24 hours
const (
maxCreate = 100
maxUpdate = 50
)
// acces kings
const (
Create AccessKinds = iota
Update
)
var (
accessControl map[string]UserAccess
mtx = &sync.Mutex{}
lastAccessControlReset = time.Now()
)
func init() {
accessControl = map[string]UserAccess{}
}
func updateTimer() {
mtx.Lock()
if time.Since(lastAccessControlReset) >= (time.Hour * 24) {
lastAccessControlReset = time.Now()
accessControl = map[string]UserAccess{}
}
mtx.Unlock()
}
func Control(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, p httpr.Params), ctrl AccessKinds) func(ctx context.Context, w http.ResponseWriter, r *http.Request, p httpr.Params) {
return func(ctx context.Context, w http.ResponseWriter, r *http.Request, p httpr.Params) {
// get the token from the context
token := ctx.Value("token").(string)
// check if we need to reset the map
mtx.Lock()
// check if the user can still write
if ctrl == Create {
if ac, ok := accessControl[token]; !ok {
newAc := UserAccess{Create: 1}
accessControl[token] = newAc
} else {
if ac.Create >= maxCreate {
writeResponse(w, http.StatusUnauthorized, "borg-api: api max create reached")
return
}
ac.Create += 1
accessControl[token] = ac
}
}
if ctrl == Update {
if ac, ok := accessControl[token]; !ok {
newAc := UserAccess{Update: 1}
accessControl[token] = newAc
} else {
if ac.Create >= maxUpdate {
writeResponse(w, http.StatusUnauthorized, "borg-api: api max update reached")
return
}
ac.Create += 1
accessControl[token] = ac
}
}
// just log some shit
log.Infof("[user access control] token: %s -> %#v", token, accessControl[token])
mtx.Unlock()
// then call the handler
handler(ctx, w, r, p)
}
}
// simple helper to check if the user is auth in the application,
// if logged process the handler, or return directly
func IfAuth(db *gorm.DB, handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, p httpr.Params)) func(w http.ResponseWriter, r *http.Request, p httpr.Params) {
return func(w http.ResponseWriter, r *http.Request, p httpr.Params) {
var token string
if token = r.FormValue("token"); token == "" {
if token = r.Header.Get("Authorization"); token == "" {
if token = r.Header.Get("authorization"); token == "" {
writeResponse(w, http.StatusUnauthorized, "borg-api: Missing access token")
return
}
}
}
accessTokenDao := domain.NewAccessTokenDao(db)
at, err := accessTokenDao.GetByToken(token)
if err != nil {
writeResponse(w, http.StatusUnauthorized, "borg-api: Invalid access token")
return
}
// get or create it in mysql
userDao := domain.NewUserDao(db)
user, err := userDao.GetById(at.UserId)
if err != nil {
writeResponse(w, http.StatusUnauthorized, "borg-api: Invalid access token")
return
}
// no errors, process the handler
ctx := ctxext.WithTokenString(context.Background(), token)
ctx = ctxext.WithUserId(ctx, user.Id)
ctx = ctxext.WithUser(ctx, user)
ctx = ctxext.WithIsAuth(ctx, true)
handler(ctx, w, r, p)
}
}
// simple helper to check if the user is auth in the application,
// if logged process the handler, or return directly
func MaybeAuth(db *gorm.DB, handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, p httpr.Params)) func(w http.ResponseWriter, r *http.Request, p httpr.Params) {
return func(w http.ResponseWriter, r *http.Request, p httpr.Params) {
var token string
if token = r.FormValue("token"); token == "" {
if token = r.Header.Get("Authorization"); token == "" {
if token = r.Header.Get("authorization"); token == "" {
// no token just add IsAuth value in the ctx and call the handler
ctx := ctxext.WithIsAuth(context.Background(), false)
handler(ctx, w, r, p)
}
}
}
if len(token) > 0 {
accessTokenDao := domain.NewAccessTokenDao(db)
at, err := accessTokenDao.GetByToken(token)
if err != nil {
writeResponse(w, http.StatusUnauthorized, "borg-api: Invalid access token")
return
}
// get or create it in mysql
userDao := domain.NewUserDao(db)
user, err := userDao.GetById(at.UserId)
if err != nil {
writeResponse(w, http.StatusUnauthorized, "borg-api: Invalid access token")
return
}
// no errors, process the handler
ctx := ctxext.WithTokenString(context.Background(), token)
ctx = ctxext.WithUserId(ctx, user.Id)
ctx = ctxext.WithUser(ctx, user)
ctx = ctxext.WithIsAuth(ctx, true)
handler(ctx, w, r, p)
}
}
}
// FIXME: this is duplicated in main.go
func writeResponse(w http.ResponseWriter, status int, body string) {
w.Header().Set("Content-Length", fmt.Sprintf("%v", len(body)))
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(status)
fmt.Fprintf(w, `%v`, body)
}
|
package accounts
import (
"testing"
"github.com/acrossmounation/redpack/services"
"github.com/go-spring/spring-boot"
"github.com/segmentio/ksuid"
"github.com/shopspring/decimal"
. "github.com/smartystreets/goconvey/convey"
)
type TestAccountServiceCreate struct {
_ SpringBoot.JUnitSuite `export:""`
Service services.AccountService `autowire:""`
}
func (am *TestAccountServiceCreate) Test(t *testing.T) {
dto := services.AccountCreatedDTO{
UserId: ksuid.New().Next().String(),
Username: "测试用户",
Amount: "100",
AccountName: "测试资金账户111",
AccountType: int(services.AccountTypeEnvelope),
CurrencyCode: "CNY",
}
Convey("创建账户", t, func() {
ndto, err := am.Service.CreateAccount(dto)
So(err, ShouldBeNil)
So(ndto, ShouldNotBeNil)
So(ndto.Username, ShouldEqual, dto.Username)
So(ndto.UserId, ShouldEqual, dto.UserId)
})
}
type TestAccountServiceTransfer struct {
_ SpringBoot.JUnitSuite `export:""`
Service services.AccountService `autowire:""`
}
func (am *TestAccountServiceTransfer) Test(t *testing.T) {
dto1 := services.AccountCreatedDTO{
UserId: ksuid.New().Next().String(),
Username: "测试用户1",
Amount: "100",
AccountName: "测试资金账户1",
AccountType: int(services.AccountTypeEnvelope),
CurrencyCode: "CNY",
}
dto2 := services.AccountCreatedDTO{
UserId: ksuid.New().Next().String(),
Username: "测试用户2",
Amount: "100",
AccountName: "测试资金账户2",
AccountType: int(services.AccountTypeEnvelope),
CurrencyCode: "CNY",
}
Convey("创建测试账户", t, func() {
account1, err := am.Service.CreateAccount(dto1)
So(err, ShouldBeNil)
So(account1, ShouldNotBeNil)
So(account1.Balance.String(), ShouldEqual, dto1.Amount)
So(account1.Username, ShouldEqual, dto1.Username)
So(account1.UserId, ShouldEqual, dto1.UserId)
account2, err := am.Service.CreateAccount(dto2)
So(err, ShouldBeNil)
So(account2, ShouldNotBeNil)
So(account2.Balance.String(), ShouldEqual, dto2.Amount)
So(account2.Username, ShouldEqual, dto2.Username)
So(account2.UserId, ShouldEqual, dto2.UserId)
// 转账(余额充足)
Convey("转账(余额充足)", func() {
amount := decimal.NewFromFloat(1)
body := services.TradeParticipator{
AccountNo: account1.AccountNo,
UserId: account1.UserId,
Username: account1.Username,
}
target := services.TradeParticipator{
AccountNo: account2.AccountNo,
UserId: account2.UserId,
Username: account2.Username,
}
dto := services.AccountTransferDTO{
TradeNo: ksuid.New().Next().String(),
TradeBody: body,
TradeTarget: target,
AmountStr: "1",
ChangeType: services.ChangeType(-1),
ChangeFlag: services.ChangeFlagTransferOut,
Desc: "转账",
}
status, err := am.Service.Transfer(dto)
So(err, ShouldBeNil)
So(status, ShouldEqual, services.TransferStatusSuccess)
// 验证资金
no1 := am.Service.GetAccountByNo(account1.AccountNo)
So(no1, ShouldNotBeNil)
So(no1.Balance.String(), ShouldEqual, account1.Balance.Sub(amount).String())
no2 := am.Service.GetAccountByNo(account2.AccountNo)
So(no2, ShouldNotBeNil)
So(no2.Balance.String(), ShouldEqual, account2.Balance.Add(amount).String())
})
// 转账(余额不足)
Convey("转账(余额不足)", func() {
body := services.TradeParticipator{
AccountNo: account1.AccountNo,
UserId: account1.UserId,
Username: account1.Username,
}
target := services.TradeParticipator{
AccountNo: account2.AccountNo,
UserId: account2.UserId,
Username: account2.Username,
}
dto := services.AccountTransferDTO{
TradeNo: ksuid.New().Next().String(),
TradeBody: body,
TradeTarget: target,
AmountStr: "110",
ChangeType: services.ChangeType(-1),
ChangeFlag: services.ChangeFlagTransferOut,
Desc: "转账",
}
status, err := am.Service.Transfer(dto)
So(err, ShouldNotBeNil)
So(status, ShouldEqual, services.TransferStatusSufficientFunds)
// 验证资金
no1 := am.Service.GetAccountByNo(account1.AccountNo)
So(no1, ShouldNotBeNil)
So(no1.Balance.String(), ShouldEqual, account1.Balance.String())
no2 := am.Service.GetAccountByNo(account2.AccountNo)
So(no2, ShouldNotBeNil)
So(no2.Balance.String(), ShouldEqual, account2.Balance.String())
})
// 储值
Convey("储值", func() {
amount := decimal.NewFromFloat(100)
body := services.TradeParticipator{
AccountNo: account1.AccountNo,
UserId: account1.UserId,
Username: account1.Username,
}
dto := services.AccountTransferDTO{
TradeNo: ksuid.New().Next().String(),
TradeBody: body,
AmountStr: "100",
ChangeType: services.ChangeTypeAccountStoreValue,
ChangeFlag: services.ChangeFlagTransferIn,
Desc: "储值",
}
status, err := am.Service.StoreValue(dto)
So(err, ShouldBeNil)
So(status, ShouldEqual, services.TransferStatusSuccess)
// 验证资金
no := am.Service.GetAccountByNo(account1.AccountNo)
So(no, ShouldNotBeNil)
So(no.Balance.String(), ShouldEqual, account1.Balance.Add(amount).String())
})
})
}
|
package main
import (
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"fmt"
)
// 对字符串进行MD5哈希
func md5Str(data string) string {
m := md5.New()
m.Write([]byte(data))
my_md5 := m.Sum(nil)
return fmt.Sprintf("%x", my_md5)
}
// 对字符串进行MD5哈希
func md5Str2(data string) string {
my_md5 := md5.Sum([]byte(data))
return fmt.Sprintf("%x", my_md5)
}
// 对字符串进行SHA1哈希
func sha1Str(data string) string {
h := sha1.New()
h.Write([]byte(data))
my_sha1 := h.Sum(nil)
// 使用`%x`来将散列结果格式化为16进制的字符串
return fmt.Sprintf("%x", my_sha1)
}
func hmacsha1Str(key string, data string) string {
h_key := []byte(key)
mac := hmac.New(sha1.New, h_key)
mac.Write([]byte(data))
my_mac := mac.Sum(nil)
// 使用`%x`来将散列结果格式化为16进制的字符串
return fmt.Sprintf("%x", my_mac)
}
func main() {
s := "hello world"
fmt.Println(md5Str(s))
fmt.Println(md5Str2(s))
fmt.Println(sha1Str(s))
fmt.Println(hmacsha1Str("abc", s))
// 5eb63bbbe01eeed093cb22bb8f5acdc3
// 5eb63bbbe01eeed093cb22bb8f5acdc3
// 2aae6c35c94fcfb415dbe95f408b9ce91ee846ed
// faf32544e39b2c626bd8c17cd6c54d79ba86d8a0
}
|
package zy_logs
import (
"bytes"
"fmt"
"runtime"
)
type LogLevel int
/*获取日志等级字符串*/
func getLevelText(level LogLevel) string{
switch level {
case LogLevelAccess:
return "ACCESS"
case LogLevelDebug:
return "DEBUG"
case LogLevelTrace:
return "TRACE"
case LogLevelInfo:
return "INFO"
case LogLevelWarn:
return "WARN"
case LogLevelError:
return "ERROR"
}
return "UNKNOWN"
}
/*根据日志等级字符串返回日志等级*/
func GetLogLevel(level string) LogLevel {
switch level {
case "debug":
return LogLevelDebug
case "trace":
return LogLevelTrace
case "info":
return LogLevelInfo
case "warn":
return LogLevelWarn
case "error":
return LogLevelError
}
return LogLevelDebug
}
/*获取生成日志的文件名*/
func GetLineInfo() (fileName string,lineNo int) {
_, fileName, lineNo, _ = runtime.Caller(3)
return
}
/*将字段写入到buffer缓存进行拼接*/
func writeField(buffer *bytes.Buffer,field,sep string) {
buffer.WriteString(field)
buffer.WriteString(sep)
}
/*将结构体的日志数据转化为字节数组*/
func (l *LogData)Bytes() []byte {
var buffer bytes.Buffer
levelStr := getLevelText(l.level)
writeField(&buffer,l.timeStr,SpaceSep)
writeField(&buffer,levelStr,SpaceSep)
writeField(&buffer,l.serviceName,SpaceSep)
writeField(&buffer,l.fileName,ColonSep)
writeField(&buffer,fmt.Sprintf("%d",l.lineNo),SpaceSep)
writeField(&buffer,l.traceId,SpaceSep)
if l.level == LogLevelAccess && l.fields != nil {
for _,field := range l.fields.kvs {
writeField(&buffer, fmt.Sprintf("%v=%v",field.key,field.val),SpaceSep)
}
}
writeField(&buffer,l.message,LineSep)
return buffer.Bytes()
}
/*根据日志级别获取不同颜色*/
func getLevelColor(level LogLevel) Color {
switch level {
case LogLevelAccess:
return Blue
case LogLevelDebug:
return White
case LogLevelTrace:
return Cyan
case LogLevelInfo:
return Green
case LogLevelWarn:
return Yellow
case LogLevelError:
return Red
}
return Magenta
}
/*日志文件的切分时段*/
type LogFileSeg int
/*获取日志等级字符串*/
func getSegText(seg LogFileSeg) string{
switch seg {
case YearSeg:
return "year"
case MonthSeg:
return "month"
case WeekSeg:
return "week"
case DaySeg:
return "day"
case HourSeg:
return "hour"
}
return "hour"
}
/*根据日志等级字符串返回日志等级*/
func GetFileSeg(seg string) LogFileSeg {
switch seg {
case "year":
return YearSeg
case "month":
return MonthSeg
case "week":
return WeekSeg
case "day":
return DaySeg
}
return HourSeg
}
|
package main
import (
"os"
"fmt"
"github.com/spf13/cobra"
//"github.com/sonataruby/smart-blockchain/cli"
//"net/http"
)
func main() {
var smartCmd = &cobra.Command{
Use: "smart",
Short: "The SMART Blockchain CLI",
Run: func(cmd *cobra.Command, args []string) {
},
}
smartCmd.AddCommand(walletCmd());
/*
defer os.Exit(0)
cmd := cli.CommandLine{}
cmd.Run()
*/
err := smartCmd.Execute()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func incorrectUsageErr() error {
return fmt.Errorf("incorrect usage")
} |
package main
type CPUSpec struct {
Request string
Limit string
}
|
package requests
import (
"fmt"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
)
// RemoveUsageRightsGroups Removes copyright and license information associated with one or more files
// https://canvas.instructure.com/doc/api/files.html
//
// Path Parameters:
// # Path.GroupID (Required) ID
//
// Query Parameters:
// # Query.FileIDs (Required) List of ids of files to remove associated usage rights from.
// # Query.FolderIDs (Optional) List of ids of folders. Usage rights will be removed from all files in these folders.
//
type RemoveUsageRightsGroups struct {
Path struct {
GroupID string `json:"group_id" url:"group_id,omitempty"` // (Required)
} `json:"path"`
Query struct {
FileIDs []string `json:"file_ids" url:"file_ids,omitempty"` // (Required)
FolderIDs []string `json:"folder_ids" url:"folder_ids,omitempty"` // (Optional)
} `json:"query"`
}
func (t *RemoveUsageRightsGroups) GetMethod() string {
return "DELETE"
}
func (t *RemoveUsageRightsGroups) GetURLPath() string {
path := "groups/{group_id}/usage_rights"
path = strings.ReplaceAll(path, "{group_id}", fmt.Sprintf("%v", t.Path.GroupID))
return path
}
func (t *RemoveUsageRightsGroups) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *RemoveUsageRightsGroups) GetBody() (url.Values, error) {
return nil, nil
}
func (t *RemoveUsageRightsGroups) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *RemoveUsageRightsGroups) HasErrors() error {
errs := []string{}
if t.Path.GroupID == "" {
errs = append(errs, "'Path.GroupID' is required")
}
if t.Query.FileIDs == nil {
errs = append(errs, "'Query.FileIDs' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *RemoveUsageRightsGroups) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"context"
"testing"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func TestCLI(t *testing.T) {
const (
kubeContext = "some-kubecontext"
output = "this is the expected output"
)
tests := []struct {
name string
kubeconfig string
namespace string
expectedCommand string
}{
{
name: "without namespace or kubeconfig",
expectedCommand: "kubectl --context some-kubecontext exec arg1 arg2",
},
{
name: "only namespace, no kubeconfig",
namespace: "some-namespace",
expectedCommand: "kubectl --context some-kubecontext --namespace some-namespace exec arg1 arg2",
},
{
name: "only kubeconfig, no namespace",
kubeconfig: "some-kubeconfig",
expectedCommand: "kubectl --context some-kubecontext --kubeconfig some-kubeconfig exec arg1 arg2",
},
{
name: "with namespace and kubeconfig",
kubeconfig: "some-kubeconfig",
namespace: "some-namespace",
expectedCommand: "kubectl --context some-kubecontext --namespace some-namespace --kubeconfig some-kubeconfig exec arg1 arg2",
},
}
// test cli.Run()
for _, test := range tests {
testutil.Run(t, test.name, func(t *testutil.T) {
t.Override(&util.DefaultExecCommand, testutil.CmdRun(
test.expectedCommand,
))
cli := NewCLI(&mockConfig{
kubeContext: kubeContext,
kubeConfig: test.kubeconfig,
namespace: test.namespace,
}, "")
err := cli.Run(context.Background(), nil, nil, "exec", "arg1", "arg2")
t.CheckNoError(err)
})
}
// test cli.RunOut()
for _, test := range tests {
testutil.Run(t, test.name, func(t *testutil.T) {
t.Override(&util.DefaultExecCommand, testutil.CmdRunOut(
test.expectedCommand,
output,
))
cli := NewCLI(&mockConfig{
kubeContext: kubeContext,
kubeConfig: test.kubeconfig,
namespace: test.namespace,
}, "")
out, err := cli.RunOut(context.Background(), "exec", "arg1", "arg2")
t.CheckNoError(err)
t.CheckDeepEqual(string(out), output)
})
}
// test cli.CommandWithStrictCancellation()
for _, test := range tests {
testutil.Run(t, test.name, func(t *testutil.T) {
t.Override(&util.DefaultExecCommand, testutil.CmdRunOut(
test.expectedCommand,
output,
))
cli := NewCLI(&mockConfig{
kubeContext: kubeContext,
kubeConfig: test.kubeconfig,
namespace: test.namespace,
}, "")
cmd := cli.CommandWithStrictCancellation(context.Background(), "exec", "arg1", "arg2")
out, err := util.RunCmdOut(context.Background(), cmd.Cmd)
t.CheckNoError(err)
t.CheckDeepEqual(string(out), output)
})
}
}
type mockConfig struct {
kubeContext string
kubeConfig string
namespace string
}
func (c *mockConfig) GetKubeContext() string { return c.kubeContext }
func (c *mockConfig) GetKubeConfig() string { return c.kubeConfig }
func (c *mockConfig) GetKubeNamespace() string { return c.namespace }
|
package main
import (
"fmt"
"sync"
)
//test 123
//test 123
//test 123
//test 123
//test 123
//test 123
//test 123
var ch1 chan int = make(chan int,1) //声明并 初始化channel 变量
var ch2 chan int = make(chan int,1) //声明并初始化channel变量
var chs = []chan int{ch1, ch2}
var numbers = []int{1, 2, 3, 4, 5}
func main() {
wg := &sync.WaitGroup{}
wg.Add(1)
go func () {
defer wg.Done()
getChan(0)<-1
getChan(1)<-2
}()
wg.Wait()
select {
case <-getChan(0):
fmt.Println("1th case is selected.")
case <-getChan(1):
fmt.Println("2th case is selected.")
default:
fmt.Println("default!.")
}
}
func getNumber(i int) int {
fmt.Printf("numbers[%d]\n", i)
return numbers[i]
}
func getChan(i int) chan int {
fmt.Printf("chs[%d]\n", i)
return chs[i]
}
|
// SPDX-License-Identifier: Apache-2.0
// Copyright(c) 2018-2019 Saaras Inc.
package webhttp
import (
"bytes"
"github.com/labstack/echo/v4"
"github.com/saarasio/enroute/enroute-dp/saaras"
"net/http"
"github.com/sirupsen/logrus"
)
type Proxy struct {
Name string `json:"name" xml:"name" form:"name" query:"name"`
}
type Service struct {
Service_name string `json:"service_name" xml:"service_name" form:"service_name" query:"service_name"`
Fqdn string `json:"fqdn" xml:"fqdn" form:"fqdn" query:"fqdn"`
}
type Route struct {
Route_name string `json:"route_name" xml:"route_name" form:"route_name" query:"route_name"`
Route_prefix string `json:"route_prefix" xml:"route_prefix" form:"route_prefix" query:"route_prefix"`
}
type Upstream struct {
Upstream_name string `json:"upstream_name" xml:"upstream_name" form:"upstream_name" query:"upstream_name"`
Upstream_ip string `json:"upstream_ip" xml:"upstream_ip" form:"upstream_ip" query:"upstream_ip"`
Upstream_port string `json:"upstream_port" xml:"upstream_port" form:"upstream_port" query:"upstream_port"`
Upstream_hc_path string `json:"upstream_hc_path" xml:"upstream_hc_path" form:"upstream_hc_path" query:"upstream_hc_path"`
Upstream_hc_host string `json:"upstream_hc_host" xml:"upstream_hc_host" form:"upstream_hc_host" query:"upstream_hc_host"`
Upstream_weight string `json:"upstream_weight" xml:"upstream_weight" form:"upstream_weight" query:"upstream_weight"`
Upstream_hc_intervalseconds string `json:"upstream_hc_intervalseconds" xml:"upstream_hc_intervalseconds" form:"upstream_hc_intervalseconds" query:"upstream_hc_intervalseconds"`
Upstream_hc_unhealthythresholdcount string `json:"upstream_hc_unhealthythresholdcount" xml:"upstream_hc_unhealthythresholdcount" form:"upstream_hc_unhealthythresholdcount" query:"upstream_hc_unhealthythresholdcount"`
Upstream_hc_healthythresholdcount string `json:"upstream_hc_healthythresholdcount" xml:"upstream_hc_healthythresholdcount" form:"upstream_hc_healthythresholdcount" query:"upstream_hc_healthythresholdcount"`
Upstream_strategy string `json:"upstream_strategy" xml:"upstream_strategy" form:"upstream_strategy" query:"upstream_strategy"`
Upstream_validation_cacertificate string `json:"upstream_validation_cacertificate" xml:"upstream_validation_cacertificate" form:"upstream_validation_cacertificate" query:"upstream_validation_cacertificate"`
Upstream_validation_subjectname string `json:"upstream_validation_subjectname" xml:"upstream_validation_subjectname" form:"upstream_validation_subjectname" query:"upstream_validation_subjectname"`
Upstream_protocol string `json:"upstream_protocol" xml:"upstream_protocol" form:"upstream_protocol" query:"upstream_protocol"`
Upstream_hc_timeoutseconds string `json:"upstream_hc_timeoutseconds" xml:"upstream_hc_timeoutseconds" form:"upstream_hc_timeoutseconds" query:"upstream_hc_timeoutseconds"`
}
type Secret struct {
Secret_name string `json:"secret_name" xml:"secret_name" form:"secret_name" query:"secret_name"`
Secret_key string `json:"secret_key" xml:"secret_key" form:"secret_key" query:"secret_key"`
Secret_cert string `json:"secret_cert" xml:"secret_cert" form:"secret_cert" query:"secret_cert"`
Secret_sni string `json:"secret_sni" xml:"secret_sni" form:"secret_sni" query:"secret_sni"`
}
var QCreateProxy string = `
mutation create_proxy($proxy_name : String!){
insert_saaras_db_proxy(objects: {proxy_name: $proxy_name},
on_conflict: {constraint: proxy_proxy_name_key, update_columns: create_ts}) {
affected_rows
}
}
`
var QGetProxy string = `
query get_proxies {
saaras_db_proxy {
proxy_id
proxy_name
create_ts
update_ts
}
}
`
var QGetOneProxy string = `
query get_proxies($proxy_name: String!) {
saaras_db_proxy(where: {proxy_name: {_eq: $proxy_name}}) {
proxy_id
proxy_name
create_ts
update_ts
}
}
`
var QDeleteProxy string = `
mutation delete_proxy($proxy_name: String!) {
delete_saaras_db_proxy(where: {proxy_name: {_eq: $proxy_name}}) {
affected_rows
}
}
`
var QCreateProxyService string = `
mutation create_proxy_service($proxy_name : String!, $fqdn : String!, $service_name : String!) {
# Create service
insert_saaras_db_service
(
objects:
{
fqdn: $fqdn,
service_name: $service_name
} on_conflict: {constraint: service_service_name_key, update_columns:[fqdn, service_name]}
)
{
returning
{
create_ts
}
}
# Associate a service to a proxy
insert_saaras_db_proxy_service(
objects:
{
proxy:
{
data:
{
proxy_name: $proxy_name
}, on_conflict: {constraint: proxy_proxy_name_key, update_columns: update_ts}
},
service:
{
data:
{
service_name: $service_name
}, on_conflict: {constraint: service_service_name_key, update_columns: update_ts}
}
}
)
{
affected_rows
}
}
`
var QGetProxyService string = `
query get_proxy_service($proxy_name: String!) {
saaras_db_service(where: {proxy_services: {proxy: {proxy_name: {_eq: $proxy_name}}}}) {
service_id
service_name
fqdn
create_ts
update_ts
}
}
`
var QGetProxyServiceAssociation string = `
query get_proxy_service($proxy_name: String!, $service_name: String!) {
saaras_db_service(where: {_and:
{proxy_services: {proxy: {proxy_name: {_eq: $proxy_name}}}}, service_name: {_eq: $service_name}}) {
service_id
service_name
fqdn
create_ts
update_ts
}
}
`
var QDeleteProxyService = `
mutation delete_proxy_service($service_name: String!, $proxy_name: String!) {
delete_saaras_db_proxy_service(where: {
_and:
{
proxy: {proxy_name: {_eq: $proxy_name}},
service: {service_name: {_eq: $service_name}}
}
}) {
affected_rows
}
delete_saaras_db_service(where: {service_name: {_eq: $service_name}}) {
affected_rows
}
}
`
var QDeleteProxyServiceAssociation = `
mutation delete_proxy_service($service_name: String!, $proxy_name: String!) {
delete_saaras_db_proxy_service(where: {
_and:
{
proxy: {proxy_name: {_eq: $proxy_name}},
service: {service_name: {_eq: $service_name}}
}
}) {
affected_rows
}
}
`
var QCreateProxyServiceAssociation = `
mutation create_proxy_service($proxy_name : String!, $service_name : String!) {
# Associate a service to a proxy
insert_saaras_db_proxy_service(
objects:
{
proxy:
{
data:
{
proxy_name: $proxy_name
}, on_conflict: {constraint: proxy_proxy_name_key, update_columns: update_ts}
},
service:
{
data:
{
service_name: $service_name
}, on_conflict: {constraint: service_service_name_key, update_columns: update_ts}
}
}
)
{
affected_rows
}
}
`
var QGetAllProxyDetail = `
query get_proxy_detail {
saaras_db_proxy {
proxy_id
proxy_name
create_ts
update_ts
proxy_services {
service {
service_id
service_name
fqdn
create_ts
update_ts
service_secrets {
secret {
secret_id
secret_name
secret_key
secret_cert
create_ts
update_ts
}
}
routes {
route_id
route_name
route_prefix
create_ts
update_ts
route_upstreams {
upstream {
upstream_id
upstream_name
upstream_ip
upstream_port
upstream_hc_healthythresholdcount
upstream_hc_host
upstream_hc_intervalseconds
upstream_hc_path
upstream_hc_timeoutseconds
upstream_hc_unhealthythresholdcount
upstream_strategy
upstream_validation_cacertificate
upstream_validation_subjectname
upstream_weight
}
}
}
}
}
}
}
`
var QGetOneProxyDetail = `
query get_one_proxy_detail($proxy_name:String!) {
saaras_db_proxy(where: {proxy_name: {_eq: $proxy_name}}) {
proxy_id
proxy_name
create_ts
update_ts
proxy_services {
service {
service_id
service_name
fqdn
create_ts
update_ts
service_secrets {
secret {
secret_id
secret_name
secret_key
secret_cert
create_ts
update_ts
}
}
routes {
route_id
route_name
route_prefix
create_ts
update_ts
route_upstreams {
upstream {
upstream_id
upstream_name
upstream_ip
upstream_port
upstream_hc_path
upstream_hc_host
upstream_hc_intervalseconds
upstream_hc_timeoutseconds
upstream_hc_unhealthythresholdcount
upstream_hc_healthythresholdcount
upstream_strategy
upstream_validation_cacertificate
upstream_validation_subjectname
upstream_weight
create_ts
update_ts
}
}
}
}
}
}
}
`
// Read from DB_HOST and DB_PORT environment variables
var HOST string
var PORT string
var SECRET string
// @Summary Create a proxy
// @Description Create a proxy
// @Tags proxy
// @Accept json
// @Produce json
// @Param Name body webhttp.Proxy true "Name of proxy to create"
// @Success 201 {} integer OK
// @Router /proxy [post]
// @Security ApiKeyAuth
func POST_Proxy(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
p := new(Proxy)
if err := c.Bind(p); err != nil {
return err
}
if len(p.Name) == 0 {
return c.JSON(http.StatusBadRequest, "Please provide name of proxy using Name field")
}
args["proxy_name"] = p.Name
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
if err := saaras.RunDBQuery(url, QCreateProxy, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSON(http.StatusCreated, p)
}
// @Summary List proxies
// @Description Get a list of all proxies
// @Tags proxy
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy [get]
// @Security ApiKeyAuth
func GET_Proxy(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
if err := saaras.RunDBQuery(url, QGetProxy, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
// @Summary Get all proxy details
// @Description Get a detailed version of list of proxies
// @Tags proxy, operational-verbs
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy/dump [get]
// @Security ApiKeyAuth
func GET_Proxy_Detail(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
if err := saaras.RunDBQuery(url, QGetAllProxyDetail, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
// @Summary Get details of specified proxy
// @Description Get a detailed version of specified proxy
// @Tags proxy, operational-verbs
// @Accept json
// @Produce json
// @Param proxy_name path string true "Name of proxy for which to list services"
// @Success 200 {} integer OK
// @Router /proxy/dump/{proxy_name} [get]
// @Security ApiKeyAuth
func GET_One_Proxy_Detail(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
proxy_name := c.Param("proxy_name")
args["proxy_name"] = proxy_name
if err := saaras.RunDBQuery(url, QGetOneProxyDetail, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
// @Summary List services associated with proxy
// @Description Get all services associated with a proxy
// @Tags proxy
// @Param proxy_name path string true "Name of proxy for which to list services"
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy/{proxy_name}/service [get]
// @Security ApiKeyAuth
func GET_Proxy_Service(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
proxy_name := c.Param("proxy_name")
args["proxy_name"] = proxy_name
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
if err := saaras.RunDBQuery(url, QGetProxyService, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
// @Summary Delete a proxy
// @Description Delete a proxy
// @Tags proxy
// @Param proxy_name path string true "Name of proxy to delete"
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy/{proxy_name} [delete]
// @Security ApiKeyAuth
func DELETE_Proxy(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
proxy_name := c.Param("proxy_name")
args["proxy_name"] = proxy_name
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
if err := saaras.RunDBQuery(url, QDeleteProxy, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
// @Summary Get information about a proxy
// @Description Get information about a proxy
// @Tags proxy
// @Param proxy_name path string true "Name of proxy to delete"
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy/{proxy_name} [get]
// @Security ApiKeyAuth
func GET_One_Proxy(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
proxy_name := c.Param("proxy_name")
args["proxy_name"] = proxy_name
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
if err := saaras.RunDBQuery(url, QGetOneProxy, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
// @Summary Associate a service with proxy
// @Description Associate a service with proxy
// @Tags proxy
// @Param proxy_name path string true "Name of proxy for which to list service"
// @Param service_name path string true "Name of service to list"
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy/{proxy_name}/service/{service_name} [post]
// @Security ApiKeyAuth
func POST_Proxy_Service_Association(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
proxy_name := c.Param("proxy_name")
service_name := c.Param("service_name")
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
args["proxy_name"] = proxy_name
args["service_name"] = service_name
if err := saaras.RunDBQuery(url, QCreateProxyServiceAssociation, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusCreated, buf.Bytes())
}
// @Summary Disassociate a service from proxy
// @Description Disassociate a service from proxy
// @Tags proxy
// @Param proxy_name path string true "Name of proxy for which to list service"
// @Param service_name path string true "Name of service to list"
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy/{proxy_name}/service/{service_name} [delete]
// @Security ApiKeyAuth
func DELETE_Proxy_Service_Association(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
proxy_name := c.Param("proxy_name")
service_name := c.Param("service_name")
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
args["proxy_name"] = proxy_name
args["service_name"] = service_name
if err := saaras.RunDBQuery(url, QDeleteProxyServiceAssociation, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
// @Summary Return specified service associated with this proxy
// @Description Return specified service associated with this proxy
// @Tags proxy
// @Param proxy_name path string true "Name of proxy for which to list service"
// @Param service_name path string true "Name of service to list"
// @Accept json
// @Produce json
// @Success 200 {} integer OK
// @Router /proxy/{proxy_name}/service/{service_name} [get]
// @Security ApiKeyAuth
func GET_Proxy_Service_Association(c echo.Context) error {
var buf bytes.Buffer
var args map[string]string
args = make(map[string]string)
log2 := logrus.StandardLogger()
log := log2.WithField("context", "web-http")
proxy_name := c.Param("proxy_name")
service_name := c.Param("service_name")
args["proxy_name"] = proxy_name
args["service_name"] = service_name
url := "http://" + HOST + ":" + PORT + "/v1/graphql"
if err := saaras.RunDBQuery(url, QGetProxyServiceAssociation, &buf, args, log); err != nil {
log.Errorf("Error when running http request [%v]\n", err)
}
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
func GET_Health_Check(c echo.Context) error {
var buf bytes.Buffer
return c.JSONBlob(http.StatusOK, buf.Bytes())
}
func Add_proxy_routes(e *echo.Echo) {
// Proxy CRUD
e.GET("/proxy", GET_Proxy)
e.POST("/proxy", POST_Proxy)
e.DELETE("/proxy/:proxy_name", DELETE_Proxy)
e.GET("/proxy/:proxy_name", GET_One_Proxy)
// Proxy to Service association with implied service CRUD
// Only the GET makes sense here?
e.GET("/proxy/:proxy_name/service", GET_Proxy_Service)
// Proxy to Service association
e.POST("/proxy/:proxy_name/service/:service_name", POST_Proxy_Service_Association)
e.GET("/proxy/:proxy_name/service/:service_name", GET_Proxy_Service_Association)
e.DELETE("/proxy/:proxy_name/service/:service_name", DELETE_Proxy_Service_Association)
// Support for operational-verbs
e.GET("/proxy/dump", GET_Proxy_Detail)
e.GET("/proxy/dump/:proxy_name", GET_One_Proxy_Detail)
e.GET("/health", GET_Health_Check)
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dualbutton
import (
"fmt"
"github.com/dirkjabl/bricker"
"github.com/dirkjabl/bricker/device"
"github.com/dirkjabl/bricker/net/packet"
)
// GetButtonState creates a subscriber to get the button states.
func GetButtonState(id string, uid uint32, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "GetButtonState"),
Fid: function_get_button_state,
Uid: uid,
Result: &ButtonState{},
Handler: handler,
WithPacket: true}.CreateDevice()
}
// GetButtonStateFuture is a future pattern version for a synchronized all of the subscriber.
// If an error occur, the result is nil.
func GetButtonStateFuture(brick *bricker.Bricker, connectorname string, uid uint32) *ButtonState {
future := make(chan *ButtonState)
defer close(future)
sub := GetButtonState("getbuttonstatefuture"+device.GenId(), uid,
func(r device.Resulter, err error) {
var v *ButtonState = nil
if err == nil {
if value, ok := r.(*ButtonState); ok {
v = value
}
}
future <- v
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return nil
}
return <-future
}
/*
ButtonState is the type for the state of the buttons.
0 - button pressed
1 - button released
*/
type ButtonState struct {
ButtonLeft uint8 // button left
ButtonRight uint8 // button right
}
// FromPacket creates a ButtonState from a packet.
func (bs *ButtonState) FromPacket(p *packet.Packet) error {
if err := device.CheckForFromPacket(bs, p); err != nil {
return err
}
return p.Payload.Decode(bs)
}
// String fullfill the stringer interface.
func (bs *ButtonState) String() string {
txt := "Button state "
if bs == nil {
txt += "[nil]"
} else {
txt += fmt.Sprintf("[Button left: %s (%d), Button right: %s (%d)]",
ButtonStateName(bs.ButtonLeft), bs.ButtonLeft,
ButtonStateName(bs.ButtonRight), bs.ButtonRight)
}
return txt
}
// Copy creates a copy of the content.
func (bs *ButtonState) Copy() device.Resulter {
if bs == nil {
return nil
}
return &ButtonState{
ButtonLeft: bs.ButtonLeft,
ButtonRight: bs.ButtonRight}
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package analogin
import (
"fmt"
"github.com/dirkjabl/bricker"
"github.com/dirkjabl/bricker/device"
"github.com/dirkjabl/bricker/net/packet"
)
/*
SetRange creates a subscriber to set the measurement range.
The default value is 0.
0: Automatically switched
1: 0V - 6.05V, 1.48mV resolution
2: 0V - 10.32V, 2.52mV resolution
3: 0V - 36.30V, 8.86mV resolution
4: 0V - 45.00V, 11.25mV resolution
5: 0V - 3.3V, 0.81mV resolution,
*/
func SetRange(id string, uid uint32, r *Range, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "SetRange"),
Fid: function_set_range,
Uid: uid,
Data: r,
Handler: handler,
WithPacket: true}.CreateDevice()
}
// SetRangeFuture is a future pattern version for a synchronized call of the subscriber.
// If an error occur, the result is false.
func SetRangeFuture(brick bricker.Bricker, connectorname string, uid uint32, r *Range) bool {
future := make(chan bool)
defer close(future)
sub := SetRange("setrangefuture"+device.GenId(), uid, r,
func(r device.Resulter, err error) {
future <- device.IsEmptyResultOk(r, err)
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return false
}
return <-future
}
// GetRange creates a subscriber to get the measurement range value.
func GetRange(id string, uid uint32, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "GetRange"),
Fid: function_get_range,
Uid: uid,
Result: &Range{},
Handler: handler,
WithPacket: true}.CreateDevice()
}
// GetRangeFuture is a future pattern version for a synchronized call of the subscriber.
// If an error occur, the result is nil.
func GetRangeFuture(brick bricker.Bricker, connectorname string, uid uint32) *Range {
future := make(chan *Range)
defer close(future)
sub := GetRange("getrangefuture"+device.GenId(), uid,
func(r device.Resulter, err error) {
var v *Range = nil
if err == nil {
if value, ok := r.(*Range); ok {
v = value
}
}
future <- v
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return nil
}
return <-future
}
// Constants for the range.
const (
RangeAutomaticallySwitched = 0
Range0V6_05V1_48mV = 1
Range0V10_32V2_52mV = 2
Range0V36_30V8_86mv = 3
Range0V45V11_25mv = 4
Range0V3_3V0_81mV = 5
)
// Range result type
type Range struct {
Value uint8 // range identifer
}
// FromPacket creates a Range from a packet.
func (r *Range) FromPacket(p *packet.Packet) error {
if err := device.CheckForFromPacket(r, p); err != nil {
return err
}
return p.Payload.Decode(r)
}
// Name converts the range identifer value to a readable string.
func (r *Range) Name() string {
switch r.Value {
case RangeAutomaticallySwitched:
return "Automatically switched"
case Range0V6_05V1_48mV:
return "0V - 6.05V, 1.48mV resolution"
case Range0V10_32V2_52mV: // String fullfill the stringer interface.
return "0V - 10.32V, 2.52mV resolution"
case Range0V36_30V8_86mv:
return "0V - 36.30V, 8.86mV resolution"
case Range0V45V11_25mv:
return "0V - 45.00V, 11.25mV resolution"
case Range0V3_3V0_81mV:
return "0V - 3.3V, 0.81mV resolution"
default:
return "Unknown"
}
}
// String fullfill the stringer interface.
func (r *Range) String() string {
txt := "Range "
if r == nil {
txt += "[nil]"
} else {
txt += fmt.Sprintf("[Value: %s (%d)]", r.Name(), r.Value)
}
return txt
}
// Copy creates a copy of the content.
func (r *Range) Copy() device.Resulter {
if r == nil {
return nil
}
return &Range{Value: r.Value}
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package transform
import (
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
)
// IsAggregateVisitor checks if walked expressions contain aggregate functions.
type IsAggregateVisitor struct {
Aggregated bool
// searchPath is used to search for unqualified function names.
searchPath sessiondata.SearchPath
}
var _ tree.Visitor = &IsAggregateVisitor{}
// VisitPre satisfies the Visitor interface.
func (v *IsAggregateVisitor) VisitPre(expr tree.Expr) (recurse bool, newExpr tree.Expr) {
switch t := expr.(type) {
case *tree.FuncExpr:
if t.IsWindowFunctionApplication() {
// A window function application of an aggregate builtin is not an
// aggregate function, but it can contain aggregate functions.
return true, expr
}
fd, err := t.Func.Resolve(v.searchPath)
if err != nil {
return false, expr
}
if fd.Class == tree.AggregateClass {
v.Aggregated = true
return false, expr
}
case *tree.Subquery:
return false, expr
}
return true, expr
}
// VisitPost satisfies the Visitor interface.
func (*IsAggregateVisitor) VisitPost(expr tree.Expr) tree.Expr { return expr }
|
package main
import (
"flag"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
)
const tagSymbol string = "+"
const indexFileName string = "_filetags.md"
var whiteSpaceDelim = [...]string{" ", "_", ".", "[", "]"}
type tagFiles struct {
tag string
file []string
}
type tags struct {
tf []tagFiles
}
func initialize() (dir string, short bool) {
flag.BoolVar(&short, "s", true, "Include files from subdirectories")
flag.StringVar(&dir, "t", ".", "Target directory path")
flag.Parse()
dir, err := filepath.Abs(dir)
check(err)
return
}
func (tt *tags) append(tag, fileName string) {
tag = strings.ToLower(tag)
//record found tag and filename
tagFound := false
for i := range tt.tf {
if tt.tf[i].tag == tag {
tagFound = true
fileFound := false
for _, cf := range tt.tf[i].file {
if cf == fileName {
fileFound = true
break
}
}
if !fileFound {
tt.tf[i].file = append(tt.tf[i].file, fileName)
}
break
}
}
if !tagFound {
tt.tf = append(tt.tf, tagFiles{tag, []string{fileName}})
}
}
func check(err error) {
if err != nil {
panic(err)
}
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func formatRes(tfs tags) string {
var str strings.Builder
for _, ctf := range tfs.tf {
str.WriteString("<details markdown='1'><summary markdown='1'>" + ctf.tag + "</summary>\n")
sort.Slice(ctf.file, func(i, j int) bool {
return ctf.file[i] < ctf.file[j]
})
for _, cf := range ctf.file {
str.WriteString("<li><a href=\"" + strings.Replace(cf, " ", "%20", -1) + "\">" + strings.TrimRight(strings.Replace(cf, "_", " ", -1), ".md") + "</a>\n")
}
str.WriteString("</details>\n\n")
}
return str.String()
}
func getTags(source string) []string {
res := make([]string, 0)
source = " " + strings.ToLower(source)
flds := strings.FieldsFunc(source, func(r rune) bool {
if contains(whiteSpaceDelim[:], string(r)) {
return true
}
return false
})
for _, fld := range flds {
fld = strings.Trim(fld, " ")
if fld == "" || fld == tagSymbol {
continue
}
if string([]rune(fld)[0:1]) == tagSymbol {
tag := string([]rune(fld)[1:])
if !contains(res, tag) {
res = append(res, tag)
}
}
}
return res
}
func appendSubdirTags(source, newTags tags, subdir string) tags {
for _, curTag := range newTags.tf {
for _, curFile := range curTag.file {
source.append(curTag.tag, subdir+"/"+curFile)
source.append(filepath.Base(subdir), subdir+"/"+curFile)
}
}
return source
}
func writeIndexFile(tfs tags, dir string) {
dstFName := filepath.Join(dir, indexFileName)
if len(tfs.tf) == 0 {
os.Remove(dstFName)
return
}
newInfo := formatRes(tfs)
dstClean := false
s, err := os.Stat(dstFName)
if err == nil && s.Size() == int64(len(newInfo)) {
if err == nil {
rawExs, _ := ioutil.ReadFile(dstFName)
if newInfo == string(rawExs) {
dstClean = true
}
}
}
if !dstClean {
os.Remove(dstFName)
f, err := os.Create(dstFName)
check(err)
defer f.Close()
_, err = f.WriteString(formatRes(tfs))
check(err)
}
}
func processDir(dir string, includeSubdirTags bool) tags {
var tfs tags
lst, err := ioutil.ReadDir(dir)
check(err)
for _, file := range lst {
if file.IsDir() {
subdirTags := processDir(filepath.Join(dir, file.Name()), includeSubdirTags)
if includeSubdirTags {
tfs = appendSubdirTags(tfs, subdirTags, file.Name())
}
continue
} else if filepath.Ext(strings.TrimSpace(file.Name())) != ".md" || file.Name() == indexFileName {
continue
}
for _, curTag := range getTags(file.Name()) {
tfs.append(curTag, file.Name())
}
}
// sort tags
sort.Slice(tfs.tf, func(i, j int) bool {
return tfs.tf[i].tag < tfs.tf[j].tag
})
writeIndexFile(tfs, dir)
return tfs
}
func main() {
processDir(initialize())
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stack provides the glue between networking protocols and the
// consumers of the networking stack.
//
// For consumers, the only function of interest is New(), everything else is
// provided by the tcpip/public package.
package stack
import (
"encoding/binary"
"fmt"
"io"
"math/rand"
"sync/atomic"
"time"
"golang.org/x/time/rate"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/buffer"
"gvisor.dev/gvisor/pkg/log"
cryptorand "gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/ports"
"gvisor.dev/gvisor/pkg/waiter"
)
const (
// DefaultTOS is the default type of service value for network endpoints.
DefaultTOS = 0
)
type transportProtocolState struct {
proto TransportProtocol
defaultHandler func(id TransportEndpointID, pkt PacketBufferPtr) bool
}
// ResumableEndpoint is an endpoint that needs to be resumed after restore.
type ResumableEndpoint interface {
// Resume resumes an endpoint after restore. This can be used to restart
// background workers such as protocol goroutines. This must be called after
// all indirect dependencies of the endpoint has been restored, which
// generally implies at the end of the restore process.
Resume(*Stack)
}
// uniqueIDGenerator is a default unique ID generator.
type uniqueIDGenerator atomicbitops.Uint64
func (u *uniqueIDGenerator) UniqueID() uint64 {
return ((*atomicbitops.Uint64)(u)).Add(1)
}
var netRawMissingLogger = log.BasicRateLimitedLogger(time.Minute)
// Stack is a networking stack, with all supported protocols, NICs, and route
// table.
//
// LOCK ORDERING: mu > routeMu.
type Stack struct {
transportProtocols map[tcpip.TransportProtocolNumber]*transportProtocolState
networkProtocols map[tcpip.NetworkProtocolNumber]NetworkProtocol
// rawFactory creates raw endpoints. If nil, raw endpoints are
// disabled. It is set during Stack creation and is immutable.
rawFactory RawFactory
packetEndpointWriteSupported bool
demux *transportDemuxer
stats tcpip.Stats
// routeMu protects annotated fields below.
routeMu routeStackRWMutex
// +checklocks:routeMu
routeTable []tcpip.Route
mu stackRWMutex
// +checklocks:mu
nics map[tcpip.NICID]*nic
defaultForwardingEnabled map[tcpip.NetworkProtocolNumber]struct{}
// cleanupEndpointsMu protects cleanupEndpoints.
cleanupEndpointsMu cleanupEndpointsMutex
// +checklocks:cleanupEndpointsMu
cleanupEndpoints map[TransportEndpoint]struct{}
*ports.PortManager
// If not nil, then any new endpoints will have this probe function
// invoked everytime they receive a TCP segment.
tcpProbeFunc atomic.Value // TCPProbeFunc
// clock is used to generate user-visible times.
clock tcpip.Clock
// handleLocal allows non-loopback interfaces to loop packets.
handleLocal bool
// tables are the iptables packet filtering and manipulation rules.
// TODO(gvisor.dev/issue/4595): S/R this field.
tables *IPTables
// resumableEndpoints is a list of endpoints that need to be resumed if the
// stack is being restored.
resumableEndpoints []ResumableEndpoint
// icmpRateLimiter is a global rate limiter for all ICMP messages generated
// by the stack.
icmpRateLimiter *ICMPRateLimiter
// seed is a one-time random value initialized at stack startup.
//
// TODO(gvisor.dev/issue/940): S/R this field.
seed uint32
// nudConfigs is the default NUD configurations used by interfaces.
nudConfigs NUDConfigurations
// nudDisp is the NUD event dispatcher that is used to send the netstack
// integrator NUD related events.
nudDisp NUDDispatcher
// uniqueIDGenerator is a generator of unique identifiers.
uniqueIDGenerator UniqueID
// randomGenerator is an injectable pseudo random generator that can be
// used when a random number is required.
randomGenerator *rand.Rand
// secureRNG is a cryptographically secure random number generator.
secureRNG io.Reader
// sendBufferSize holds the min/default/max send buffer sizes for
// endpoints other than TCP.
sendBufferSize tcpip.SendBufferSizeOption
// receiveBufferSize holds the min/default/max receive buffer sizes for
// endpoints other than TCP.
receiveBufferSize tcpip.ReceiveBufferSizeOption
// tcpInvalidRateLimit is the maximal rate for sending duplicate
// acknowledgements in response to incoming TCP packets that are for an existing
// connection but that are invalid due to any of the following reasons:
//
// a) out-of-window sequence number.
// b) out-of-window acknowledgement number.
// c) PAWS check failure (when implemented).
//
// This is required to prevent potential ACK loops.
// Setting this to 0 will disable all rate limiting.
tcpInvalidRateLimit time.Duration
// tsOffsetSecret is the secret key for generating timestamp offsets
// initialized at stack startup.
tsOffsetSecret uint32
}
// UniqueID is an abstract generator of unique identifiers.
type UniqueID interface {
UniqueID() uint64
}
// NetworkProtocolFactory instantiates a network protocol.
//
// NetworkProtocolFactory must not attempt to modify the stack, it may only
// query the stack.
type NetworkProtocolFactory func(*Stack) NetworkProtocol
// TransportProtocolFactory instantiates a transport protocol.
//
// TransportProtocolFactory must not attempt to modify the stack, it may only
// query the stack.
type TransportProtocolFactory func(*Stack) TransportProtocol
// Options contains optional Stack configuration.
type Options struct {
// NetworkProtocols lists the network protocols to enable.
NetworkProtocols []NetworkProtocolFactory
// TransportProtocols lists the transport protocols to enable.
TransportProtocols []TransportProtocolFactory
// Clock is an optional clock used for timekeeping.
//
// If Clock is nil, tcpip.NewStdClock() will be used.
Clock tcpip.Clock
// Stats are optional statistic counters.
Stats tcpip.Stats
// HandleLocal indicates whether packets destined to their source
// should be handled by the stack internally (true) or outside the
// stack (false).
HandleLocal bool
// UniqueID is an optional generator of unique identifiers.
UniqueID UniqueID
// NUDConfigs is the default NUD configurations used by interfaces.
NUDConfigs NUDConfigurations
// NUDDisp is the NUD event dispatcher that an integrator can provide to
// receive NUD related events.
NUDDisp NUDDispatcher
// RawFactory produces raw endpoints. Raw endpoints are enabled only if
// this is non-nil.
RawFactory RawFactory
// AllowPacketEndpointWrite determines if packet endpoints support write
// operations.
AllowPacketEndpointWrite bool
// RandSource is an optional source to use to generate random
// numbers. If omitted it defaults to a Source seeded by the data
// returned by the stack secure RNG.
//
// RandSource must be thread-safe.
RandSource rand.Source
// IPTables are the initial iptables rules. If nil, DefaultIPTables will be
// used to construct the initial iptables rules.
// all traffic.
IPTables *IPTables
// DefaultIPTables is an optional iptables rules constructor that is called
// if IPTables is nil. If both fields are nil, iptables will allow all
// traffic.
DefaultIPTables func(clock tcpip.Clock, rand *rand.Rand) *IPTables
// SecureRNG is a cryptographically secure random number generator.
SecureRNG io.Reader
}
// TransportEndpointInfo holds useful information about a transport endpoint
// which can be queried by monitoring tools.
//
// +stateify savable
type TransportEndpointInfo struct {
// The following fields are initialized at creation time and are
// immutable.
NetProto tcpip.NetworkProtocolNumber
TransProto tcpip.TransportProtocolNumber
// The following fields are protected by endpoint mu.
ID TransportEndpointID
// BindNICID and bindAddr are set via calls to Bind(). They are used to
// reject attempts to send data or connect via a different NIC or
// address
BindNICID tcpip.NICID
BindAddr tcpip.Address
// RegisterNICID is the default NICID registered as a side-effect of
// connect or datagram write.
RegisterNICID tcpip.NICID
}
// AddrNetProtoLocked unwraps the specified address if it is a V4-mapped V6
// address and returns the network protocol number to be used to communicate
// with the specified address. It returns an error if the passed address is
// incompatible with the receiver.
//
// Preconditon: the parent endpoint mu must be held while calling this method.
func (t *TransportEndpointInfo) AddrNetProtoLocked(addr tcpip.FullAddress, v6only bool) (tcpip.FullAddress, tcpip.NetworkProtocolNumber, tcpip.Error) {
netProto := t.NetProto
switch addr.Addr.BitLen() {
case header.IPv4AddressSizeBits:
netProto = header.IPv4ProtocolNumber
case header.IPv6AddressSizeBits:
if header.IsV4MappedAddress(addr.Addr) {
netProto = header.IPv4ProtocolNumber
addr.Addr = tcpip.AddrFrom4Slice(addr.Addr.AsSlice()[header.IPv6AddressSize-header.IPv4AddressSize:])
if addr.Addr == header.IPv4Any {
addr.Addr = tcpip.Address{}
}
}
}
switch t.ID.LocalAddress.BitLen() {
case header.IPv4AddressSizeBits:
if addr.Addr.BitLen() == header.IPv6AddressSizeBits {
return tcpip.FullAddress{}, 0, &tcpip.ErrInvalidEndpointState{}
}
case header.IPv6AddressSizeBits:
if addr.Addr.BitLen() == header.IPv4AddressSizeBits {
return tcpip.FullAddress{}, 0, &tcpip.ErrNetworkUnreachable{}
}
}
switch {
case netProto == t.NetProto:
case netProto == header.IPv4ProtocolNumber && t.NetProto == header.IPv6ProtocolNumber:
if v6only {
return tcpip.FullAddress{}, 0, &tcpip.ErrHostUnreachable{}
}
default:
return tcpip.FullAddress{}, 0, &tcpip.ErrInvalidEndpointState{}
}
return addr, netProto, nil
}
// IsEndpointInfo is an empty method to implement the tcpip.EndpointInfo
// marker interface.
func (*TransportEndpointInfo) IsEndpointInfo() {}
// New allocates a new networking stack with only the requested networking and
// transport protocols configured with default options.
//
// Note, NDPConfigurations will be fixed before being used by the Stack. That
// is, if an invalid value was provided, it will be reset to the default value.
//
// Protocol options can be changed by calling the
// SetNetworkProtocolOption/SetTransportProtocolOption methods provided by the
// stack. Please refer to individual protocol implementations as to what options
// are supported.
func New(opts Options) *Stack {
clock := opts.Clock
if clock == nil {
clock = tcpip.NewStdClock()
}
if opts.UniqueID == nil {
opts.UniqueID = new(uniqueIDGenerator)
}
if opts.SecureRNG == nil {
opts.SecureRNG = cryptorand.Reader
}
randSrc := opts.RandSource
if randSrc == nil {
var v int64
if err := binary.Read(opts.SecureRNG, binary.LittleEndian, &v); err != nil {
panic(err)
}
// Source provided by rand.NewSource is not thread-safe so
// we wrap it in a simple thread-safe version.
randSrc = &lockedRandomSource{src: rand.NewSource(v)}
}
randomGenerator := rand.New(randSrc)
if opts.IPTables == nil {
if opts.DefaultIPTables == nil {
opts.DefaultIPTables = DefaultTables
}
opts.IPTables = opts.DefaultIPTables(clock, randomGenerator)
}
opts.NUDConfigs.resetInvalidFields()
s := &Stack{
transportProtocols: make(map[tcpip.TransportProtocolNumber]*transportProtocolState),
networkProtocols: make(map[tcpip.NetworkProtocolNumber]NetworkProtocol),
nics: make(map[tcpip.NICID]*nic),
packetEndpointWriteSupported: opts.AllowPacketEndpointWrite,
defaultForwardingEnabled: make(map[tcpip.NetworkProtocolNumber]struct{}),
cleanupEndpoints: make(map[TransportEndpoint]struct{}),
PortManager: ports.NewPortManager(),
clock: clock,
stats: opts.Stats.FillIn(),
handleLocal: opts.HandleLocal,
tables: opts.IPTables,
icmpRateLimiter: NewICMPRateLimiter(clock),
seed: randomGenerator.Uint32(),
nudConfigs: opts.NUDConfigs,
uniqueIDGenerator: opts.UniqueID,
nudDisp: opts.NUDDisp,
randomGenerator: randomGenerator,
secureRNG: opts.SecureRNG,
sendBufferSize: tcpip.SendBufferSizeOption{
Min: MinBufferSize,
Default: DefaultBufferSize,
Max: DefaultMaxBufferSize,
},
receiveBufferSize: tcpip.ReceiveBufferSizeOption{
Min: MinBufferSize,
Default: DefaultBufferSize,
Max: DefaultMaxBufferSize,
},
tcpInvalidRateLimit: defaultTCPInvalidRateLimit,
tsOffsetSecret: randomGenerator.Uint32(),
}
// Add specified network protocols.
for _, netProtoFactory := range opts.NetworkProtocols {
netProto := netProtoFactory(s)
s.networkProtocols[netProto.Number()] = netProto
}
// Add specified transport protocols.
for _, transProtoFactory := range opts.TransportProtocols {
transProto := transProtoFactory(s)
s.transportProtocols[transProto.Number()] = &transportProtocolState{
proto: transProto,
}
}
// Add the factory for raw endpoints, if present.
s.rawFactory = opts.RawFactory
// Create the global transport demuxer.
s.demux = newTransportDemuxer(s)
return s
}
// UniqueID returns a unique identifier.
func (s *Stack) UniqueID() uint64 {
return s.uniqueIDGenerator.UniqueID()
}
// SetNetworkProtocolOption allows configuring individual protocol level
// options. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation or the provided value
// is incorrect.
func (s *Stack) SetNetworkProtocolOption(network tcpip.NetworkProtocolNumber, option tcpip.SettableNetworkProtocolOption) tcpip.Error {
netProto, ok := s.networkProtocols[network]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return netProto.SetOption(option)
}
// NetworkProtocolOption allows retrieving individual protocol level option
// values. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation. E.g.:
//
// var v ipv4.MyOption
// err := s.NetworkProtocolOption(tcpip.IPv4ProtocolNumber, &v)
// if err != nil {
// ...
// }
func (s *Stack) NetworkProtocolOption(network tcpip.NetworkProtocolNumber, option tcpip.GettableNetworkProtocolOption) tcpip.Error {
netProto, ok := s.networkProtocols[network]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return netProto.Option(option)
}
// SetTransportProtocolOption allows configuring individual protocol level
// options. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation or the provided value
// is incorrect.
func (s *Stack) SetTransportProtocolOption(transport tcpip.TransportProtocolNumber, option tcpip.SettableTransportProtocolOption) tcpip.Error {
transProtoState, ok := s.transportProtocols[transport]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return transProtoState.proto.SetOption(option)
}
// TransportProtocolOption allows retrieving individual protocol level option
// values. This method returns an error if the protocol is not supported or
// option is not supported by the protocol implementation.
//
// var v tcp.SACKEnabled
// if err := s.TransportProtocolOption(tcpip.TCPProtocolNumber, &v); err != nil {
// ...
// }
func (s *Stack) TransportProtocolOption(transport tcpip.TransportProtocolNumber, option tcpip.GettableTransportProtocolOption) tcpip.Error {
transProtoState, ok := s.transportProtocols[transport]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
return transProtoState.proto.Option(option)
}
// SetTransportProtocolHandler sets the per-stack default handler for the given
// protocol.
//
// It must be called only during initialization of the stack. Changing it as the
// stack is operating is not supported.
func (s *Stack) SetTransportProtocolHandler(p tcpip.TransportProtocolNumber, h func(TransportEndpointID, PacketBufferPtr) bool) {
state := s.transportProtocols[p]
if state != nil {
state.defaultHandler = h
}
}
// Clock returns the Stack's clock for retrieving the current time and
// scheduling work.
func (s *Stack) Clock() tcpip.Clock {
return s.clock
}
// Stats returns a mutable copy of the current stats.
//
// This is not generally exported via the public interface, but is available
// internally.
func (s *Stack) Stats() tcpip.Stats {
return s.stats
}
// SetNICForwarding enables or disables packet forwarding on the specified NIC
// for the passed protocol.
//
// Returns the previous configuration on the NIC.
func (s *Stack) SetNICForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, enable bool) (bool, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return false, &tcpip.ErrUnknownNICID{}
}
return nic.setForwarding(protocol, enable)
}
// NICForwarding returns the forwarding configuration for the specified NIC.
func (s *Stack) NICForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber) (bool, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return false, &tcpip.ErrUnknownNICID{}
}
return nic.forwarding(protocol)
}
// SetForwardingDefaultAndAllNICs sets packet forwarding for all NICs for the
// passed protocol and sets the default setting for newly created NICs.
func (s *Stack) SetForwardingDefaultAndAllNICs(protocol tcpip.NetworkProtocolNumber, enable bool) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
doneOnce := false
for id, nic := range s.nics {
if _, err := nic.setForwarding(protocol, enable); err != nil {
// Expect forwarding to be settable on all interfaces if it was set on
// one.
if doneOnce {
panic(fmt.Sprintf("nic(id=%d).setForwarding(%d, %t): %s", id, protocol, enable, err))
}
return err
}
doneOnce = true
}
if enable {
s.defaultForwardingEnabled[protocol] = struct{}{}
} else {
delete(s.defaultForwardingEnabled, protocol)
}
return nil
}
// AddMulticastRoute adds a multicast route to be used for the specified
// addresses and protocol.
func (s *Stack) AddMulticastRoute(protocol tcpip.NetworkProtocolNumber, addresses UnicastSourceAndMulticastDestination, route MulticastRoute) tcpip.Error {
netProto, ok := s.networkProtocols[protocol]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
forwardingNetProto, ok := netProto.(MulticastForwardingNetworkProtocol)
if !ok {
return &tcpip.ErrNotSupported{}
}
return forwardingNetProto.AddMulticastRoute(addresses, route)
}
// RemoveMulticastRoute removes a multicast route that matches the specified
// addresses and protocol.
func (s *Stack) RemoveMulticastRoute(protocol tcpip.NetworkProtocolNumber, addresses UnicastSourceAndMulticastDestination) tcpip.Error {
netProto, ok := s.networkProtocols[protocol]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
forwardingNetProto, ok := netProto.(MulticastForwardingNetworkProtocol)
if !ok {
return &tcpip.ErrNotSupported{}
}
return forwardingNetProto.RemoveMulticastRoute(addresses)
}
// MulticastRouteLastUsedTime returns a monotonic timestamp that represents the
// last time that the route that matches the provided addresses and protocol
// was used or updated.
func (s *Stack) MulticastRouteLastUsedTime(protocol tcpip.NetworkProtocolNumber, addresses UnicastSourceAndMulticastDestination) (tcpip.MonotonicTime, tcpip.Error) {
netProto, ok := s.networkProtocols[protocol]
if !ok {
return tcpip.MonotonicTime{}, &tcpip.ErrUnknownProtocol{}
}
forwardingNetProto, ok := netProto.(MulticastForwardingNetworkProtocol)
if !ok {
return tcpip.MonotonicTime{}, &tcpip.ErrNotSupported{}
}
return forwardingNetProto.MulticastRouteLastUsedTime(addresses)
}
// EnableMulticastForwardingForProtocol enables multicast forwarding for the
// provided protocol.
//
// Returns true if forwarding was already enabled on the protocol.
// Additionally, returns an error if:
//
// - The protocol is not found.
// - The protocol doesn't support multicast forwarding.
// - The multicast forwarding event dispatcher is nil.
//
// If successful, future multicast forwarding events will be sent to the
// provided event dispatcher.
func (s *Stack) EnableMulticastForwardingForProtocol(protocol tcpip.NetworkProtocolNumber, disp MulticastForwardingEventDispatcher) (bool, tcpip.Error) {
netProto, ok := s.networkProtocols[protocol]
if !ok {
return false, &tcpip.ErrUnknownProtocol{}
}
forwardingNetProto, ok := netProto.(MulticastForwardingNetworkProtocol)
if !ok {
return false, &tcpip.ErrNotSupported{}
}
return forwardingNetProto.EnableMulticastForwarding(disp)
}
// DisableMulticastForwardingForProtocol disables multicast forwarding for the
// provided protocol.
//
// Returns an error if the provided protocol is not found or if it does not
// support multicast forwarding.
func (s *Stack) DisableMulticastForwardingForProtocol(protocol tcpip.NetworkProtocolNumber) tcpip.Error {
netProto, ok := s.networkProtocols[protocol]
if !ok {
return &tcpip.ErrUnknownProtocol{}
}
forwardingNetProto, ok := netProto.(MulticastForwardingNetworkProtocol)
if !ok {
return &tcpip.ErrNotSupported{}
}
forwardingNetProto.DisableMulticastForwarding()
return nil
}
// SetNICMulticastForwarding enables or disables multicast packet forwarding on
// the specified NIC for the passed protocol.
//
// Returns the previous configuration on the NIC.
//
// TODO(https://gvisor.dev/issue/7338): Implement support for multicast
// forwarding. Currently, setting this value is a no-op and is not ready for
// use.
func (s *Stack) SetNICMulticastForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber, enable bool) (bool, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return false, &tcpip.ErrUnknownNICID{}
}
return nic.setMulticastForwarding(protocol, enable)
}
// NICMulticastForwarding returns the multicast forwarding configuration for
// the specified NIC.
func (s *Stack) NICMulticastForwarding(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber) (bool, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return false, &tcpip.ErrUnknownNICID{}
}
return nic.multicastForwarding(protocol)
}
// PortRange returns the UDP and TCP inclusive range of ephemeral ports used in
// both IPv4 and IPv6.
func (s *Stack) PortRange() (uint16, uint16) {
return s.PortManager.PortRange()
}
// SetPortRange sets the UDP and TCP IPv4 and IPv6 ephemeral port range
// (inclusive).
func (s *Stack) SetPortRange(start uint16, end uint16) tcpip.Error {
return s.PortManager.SetPortRange(start, end)
}
// GROTimeout returns the GRO timeout.
func (s *Stack) GROTimeout(nicID tcpip.NICID) (time.Duration, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[nicID]
if !ok {
return 0, &tcpip.ErrUnknownNICID{}
}
return nic.gro.getInterval(), nil
}
// SetGROTimeout sets the GRO timeout.
func (s *Stack) SetGROTimeout(nicID tcpip.NICID, timeout time.Duration) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[nicID]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
nic.gro.setInterval(timeout)
return nil
}
// SetRouteTable assigns the route table to be used by this stack. It
// specifies which NIC to use for given destination address ranges.
//
// This method takes ownership of the table.
func (s *Stack) SetRouteTable(table []tcpip.Route) {
s.routeMu.Lock()
defer s.routeMu.Unlock()
s.routeTable = table
}
// GetRouteTable returns the route table which is currently in use.
func (s *Stack) GetRouteTable() []tcpip.Route {
s.routeMu.RLock()
defer s.routeMu.RUnlock()
return append([]tcpip.Route(nil), s.routeTable...)
}
// AddRoute appends a route to the route table.
func (s *Stack) AddRoute(route tcpip.Route) {
s.routeMu.Lock()
defer s.routeMu.Unlock()
s.routeTable = append(s.routeTable, route)
}
// RemoveRoutes removes matching routes from the route table.
func (s *Stack) RemoveRoutes(match func(tcpip.Route) bool) {
s.routeMu.Lock()
defer s.routeMu.Unlock()
var filteredRoutes []tcpip.Route
for _, route := range s.routeTable {
if !match(route) {
filteredRoutes = append(filteredRoutes, route)
}
}
s.routeTable = filteredRoutes
}
// NewEndpoint creates a new transport layer endpoint of the given protocol.
func (s *Stack) NewEndpoint(transport tcpip.TransportProtocolNumber, network tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
t, ok := s.transportProtocols[transport]
if !ok {
return nil, &tcpip.ErrUnknownProtocol{}
}
return t.proto.NewEndpoint(network, waiterQueue)
}
// NewRawEndpoint creates a new raw transport layer endpoint of the given
// protocol. Raw endpoints receive all traffic for a given protocol regardless
// of address.
func (s *Stack) NewRawEndpoint(transport tcpip.TransportProtocolNumber, network tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue, associated bool) (tcpip.Endpoint, tcpip.Error) {
if s.rawFactory == nil {
netRawMissingLogger.Infof("A process tried to create a raw socket, but --net-raw was not specified. Should runsc be run with --net-raw?")
return nil, &tcpip.ErrNotPermitted{}
}
if !associated {
return s.rawFactory.NewUnassociatedEndpoint(s, network, transport, waiterQueue)
}
t, ok := s.transportProtocols[transport]
if !ok {
return nil, &tcpip.ErrUnknownProtocol{}
}
return t.proto.NewRawEndpoint(network, waiterQueue)
}
// NewPacketEndpoint creates a new packet endpoint listening for the given
// netProto.
func (s *Stack) NewPacketEndpoint(cooked bool, netProto tcpip.NetworkProtocolNumber, waiterQueue *waiter.Queue) (tcpip.Endpoint, tcpip.Error) {
if s.rawFactory == nil {
return nil, &tcpip.ErrNotPermitted{}
}
return s.rawFactory.NewPacketEndpoint(s, cooked, netProto, waiterQueue)
}
// NICContext is an opaque pointer used to store client-supplied NIC metadata.
type NICContext any
// NICOptions specifies the configuration of a NIC as it is being created.
// The zero value creates an enabled, unnamed NIC.
type NICOptions struct {
// Name specifies the name of the NIC.
Name string
// Disabled specifies whether to avoid calling Attach on the passed
// LinkEndpoint.
Disabled bool
// Context specifies user-defined data that will be returned in stack.NICInfo
// for the NIC. Clients of this library can use it to add metadata that
// should be tracked alongside a NIC, to avoid having to keep a
// map[tcpip.NICID]metadata mirroring stack.Stack's nic map.
Context NICContext
// QDisc is the queue discipline to use for this NIC.
QDisc QueueingDiscipline
// GROTimeout specifies the GRO timeout. Zero bypasses GRO.
GROTimeout time.Duration
}
// CreateNICWithOptions creates a NIC with the provided id, LinkEndpoint, and
// NICOptions. See the documentation on type NICOptions for details on how
// NICs can be configured.
//
// LinkEndpoint.Attach will be called to bind ep with a NetworkDispatcher.
func (s *Stack) CreateNICWithOptions(id tcpip.NICID, ep LinkEndpoint, opts NICOptions) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
// Make sure id is unique.
if _, ok := s.nics[id]; ok {
return &tcpip.ErrDuplicateNICID{}
}
// Make sure name is unique, unless unnamed.
if opts.Name != "" {
for _, n := range s.nics {
if n.Name() == opts.Name {
return &tcpip.ErrDuplicateNICID{}
}
}
}
n := newNIC(s, id, ep, opts)
for proto := range s.defaultForwardingEnabled {
if _, err := n.setForwarding(proto, true); err != nil {
panic(fmt.Sprintf("newNIC(%d, ...).setForwarding(%d, true): %s", id, proto, err))
}
}
s.nics[id] = n
if !opts.Disabled {
return n.enable()
}
return nil
}
// CreateNIC creates a NIC with the provided id and LinkEndpoint and calls
// LinkEndpoint.Attach to bind ep with a NetworkDispatcher.
func (s *Stack) CreateNIC(id tcpip.NICID, ep LinkEndpoint) tcpip.Error {
return s.CreateNICWithOptions(id, ep, NICOptions{})
}
// GetLinkEndpointByName gets the link endpoint specified by name.
func (s *Stack) GetLinkEndpointByName(name string) LinkEndpoint {
s.mu.RLock()
defer s.mu.RUnlock()
for _, nic := range s.nics {
if nic.Name() == name {
linkEP, ok := nic.NetworkLinkEndpoint.(LinkEndpoint)
if !ok {
panic(fmt.Sprintf("unexpected NetworkLinkEndpoint(%#v) is not a LinkEndpoint", nic.NetworkLinkEndpoint))
}
return linkEP
}
}
return nil
}
// EnableNIC enables the given NIC so that the link-layer endpoint can start
// delivering packets to it.
func (s *Stack) EnableNIC(id tcpip.NICID) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.enable()
}
// DisableNIC disables the given NIC.
func (s *Stack) DisableNIC(id tcpip.NICID) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
nic.disable()
return nil
}
// CheckNIC checks if a NIC is usable.
func (s *Stack) CheckNIC(id tcpip.NICID) bool {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return false
}
return nic.Enabled()
}
// RemoveNIC removes NIC and all related routes from the network stack.
func (s *Stack) RemoveNIC(id tcpip.NICID) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
return s.removeNICLocked(id)
}
// removeNICLocked removes NIC and all related routes from the network stack.
//
// +checklocks:s.mu
func (s *Stack) removeNICLocked(id tcpip.NICID) tcpip.Error {
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
delete(s.nics, id)
// Remove routes in-place. n tracks the number of routes written.
s.routeMu.Lock()
n := 0
for i, r := range s.routeTable {
s.routeTable[i] = tcpip.Route{}
if r.NIC != id {
// Keep this route.
s.routeTable[n] = r
n++
}
}
s.routeTable = s.routeTable[:n]
s.routeMu.Unlock()
return nic.remove()
}
// NICInfo captures the name and addresses assigned to a NIC.
type NICInfo struct {
Name string
LinkAddress tcpip.LinkAddress
ProtocolAddresses []tcpip.ProtocolAddress
// Flags indicate the state of the NIC.
Flags NICStateFlags
// MTU is the maximum transmission unit.
MTU uint32
Stats tcpip.NICStats
// NetworkStats holds the stats of each NetworkEndpoint bound to the NIC.
NetworkStats map[tcpip.NetworkProtocolNumber]NetworkEndpointStats
// Context is user-supplied data optionally supplied in CreateNICWithOptions.
// See type NICOptions for more details.
Context NICContext
// ARPHardwareType holds the ARP Hardware type of the NIC. This is the
// value sent in haType field of an ARP Request sent by this NIC and the
// value expected in the haType field of an ARP response.
ARPHardwareType header.ARPHardwareType
// Forwarding holds the forwarding status for each network endpoint that
// supports forwarding.
Forwarding map[tcpip.NetworkProtocolNumber]bool
// MulticastForwarding holds the forwarding status for each network endpoint
// that supports multicast forwarding.
MulticastForwarding map[tcpip.NetworkProtocolNumber]bool
}
// HasNIC returns true if the NICID is defined in the stack.
func (s *Stack) HasNIC(id tcpip.NICID) bool {
s.mu.RLock()
_, ok := s.nics[id]
s.mu.RUnlock()
return ok
}
// NICInfo returns a map of NICIDs to their associated information.
func (s *Stack) NICInfo() map[tcpip.NICID]NICInfo {
s.mu.RLock()
defer s.mu.RUnlock()
type forwardingFn func(tcpip.NetworkProtocolNumber) (bool, tcpip.Error)
forwardingValue := func(forwardingFn forwardingFn, proto tcpip.NetworkProtocolNumber, nicID tcpip.NICID, fnName string) (forward bool, ok bool) {
switch forwarding, err := forwardingFn(proto); err.(type) {
case nil:
return forwarding, true
case *tcpip.ErrUnknownProtocol:
panic(fmt.Sprintf("expected network protocol %d to be available on NIC %d", proto, nicID))
case *tcpip.ErrNotSupported:
// Not all network protocols support forwarding.
default:
panic(fmt.Sprintf("nic(id=%d).%s(%d): %s", nicID, fnName, proto, err))
}
return false, false
}
nics := make(map[tcpip.NICID]NICInfo)
for id, nic := range s.nics {
flags := NICStateFlags{
Up: true, // Netstack interfaces are always up.
Running: nic.Enabled(),
Promiscuous: nic.Promiscuous(),
Loopback: nic.IsLoopback(),
}
netStats := make(map[tcpip.NetworkProtocolNumber]NetworkEndpointStats)
for proto, netEP := range nic.networkEndpoints {
netStats[proto] = netEP.Stats()
}
info := NICInfo{
Name: nic.name,
LinkAddress: nic.NetworkLinkEndpoint.LinkAddress(),
ProtocolAddresses: nic.primaryAddresses(),
Flags: flags,
MTU: nic.NetworkLinkEndpoint.MTU(),
Stats: nic.stats.local,
NetworkStats: netStats,
Context: nic.context,
ARPHardwareType: nic.NetworkLinkEndpoint.ARPHardwareType(),
Forwarding: make(map[tcpip.NetworkProtocolNumber]bool),
MulticastForwarding: make(map[tcpip.NetworkProtocolNumber]bool),
}
for proto := range s.networkProtocols {
if forwarding, ok := forwardingValue(nic.forwarding, proto, id, "forwarding"); ok {
info.Forwarding[proto] = forwarding
}
if multicastForwarding, ok := forwardingValue(nic.multicastForwarding, proto, id, "multicastForwarding"); ok {
info.MulticastForwarding[proto] = multicastForwarding
}
}
nics[id] = info
}
return nics
}
// NICStateFlags holds information about the state of an NIC.
type NICStateFlags struct {
// Up indicates whether the interface is running.
Up bool
// Running indicates whether resources are allocated.
Running bool
// Promiscuous indicates whether the interface is in promiscuous mode.
Promiscuous bool
// Loopback indicates whether the interface is a loopback.
Loopback bool
}
// AddProtocolAddress adds an address to the specified NIC, possibly with extra
// properties.
func (s *Stack) AddProtocolAddress(id tcpip.NICID, protocolAddress tcpip.ProtocolAddress, properties AddressProperties) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.addAddress(protocolAddress, properties)
}
// RemoveAddress removes an existing network-layer address from the specified
// NIC.
func (s *Stack) RemoveAddress(id tcpip.NICID, addr tcpip.Address) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[id]; ok {
return nic.removeAddress(addr)
}
return &tcpip.ErrUnknownNICID{}
}
// SetAddressLifetimes sets informational preferred and valid lifetimes, and
// whether the address should be preferred or deprecated.
func (s *Stack) SetAddressLifetimes(id tcpip.NICID, addr tcpip.Address, lifetimes AddressLifetimes) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[id]; ok {
return nic.setAddressLifetimes(addr, lifetimes)
}
return &tcpip.ErrUnknownNICID{}
}
// AllAddresses returns a map of NICIDs to their protocol addresses (primary
// and non-primary).
func (s *Stack) AllAddresses() map[tcpip.NICID][]tcpip.ProtocolAddress {
s.mu.RLock()
defer s.mu.RUnlock()
nics := make(map[tcpip.NICID][]tcpip.ProtocolAddress)
for id, nic := range s.nics {
nics[id] = nic.allPermanentAddresses()
}
return nics
}
// GetMainNICAddress returns the first non-deprecated primary address and prefix
// for the given NIC and protocol. If no non-deprecated primary addresses exist,
// a deprecated address will be returned. If no deprecated addresses exist, the
// zero value will be returned.
func (s *Stack) GetMainNICAddress(id tcpip.NICID, protocol tcpip.NetworkProtocolNumber) (tcpip.AddressWithPrefix, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return tcpip.AddressWithPrefix{}, &tcpip.ErrUnknownNICID{}
}
return nic.PrimaryAddress(protocol)
}
func (s *Stack) getAddressEP(nic *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) AssignableAddressEndpoint {
if localAddr.BitLen() == 0 {
return nic.primaryEndpoint(netProto, remoteAddr)
}
return nic.findEndpoint(netProto, localAddr, CanBePrimaryEndpoint)
}
// NewRouteForMulticast returns a Route that may be used to forward multicast
// packets.
//
// Returns nil if validation fails.
func (s *Stack) NewRouteForMulticast(nicID tcpip.NICID, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) *Route {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[nicID]
if !ok || !nic.Enabled() {
return nil
}
if addressEndpoint := s.getAddressEP(nic, tcpip.Address{} /* localAddr */, remoteAddr, netProto); addressEndpoint != nil {
return constructAndValidateRoute(netProto, addressEndpoint, nic, nic, tcpip.Address{} /* gateway */, tcpip.Address{} /* localAddr */, remoteAddr, s.handleLocal, false /* multicastLoop */)
}
return nil
}
// findLocalRouteFromNICRLocked is like findLocalRouteRLocked but finds a route
// from the specified NIC.
//
// +checklocksread:s.mu
func (s *Stack) findLocalRouteFromNICRLocked(localAddressNIC *nic, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) *Route {
localAddressEndpoint := localAddressNIC.getAddressOrCreateTempInner(netProto, localAddr, false /* createTemp */, NeverPrimaryEndpoint)
if localAddressEndpoint == nil {
return nil
}
var outgoingNIC *nic
// Prefer a local route to the same interface as the local address.
if localAddressNIC.hasAddress(netProto, remoteAddr) {
outgoingNIC = localAddressNIC
}
// If the remote address isn't owned by the local address's NIC, check all
// NICs.
if outgoingNIC == nil {
for _, nic := range s.nics {
if nic.hasAddress(netProto, remoteAddr) {
outgoingNIC = nic
break
}
}
}
// If the remote address is not owned by the stack, we can't return a local
// route.
if outgoingNIC == nil {
localAddressEndpoint.DecRef()
return nil
}
r := makeLocalRoute(
netProto,
localAddr,
remoteAddr,
outgoingNIC,
localAddressNIC,
localAddressEndpoint,
)
if r.IsOutboundBroadcast() {
r.Release()
return nil
}
return r
}
// findLocalRouteRLocked returns a local route.
//
// A local route is a route to some remote address which the stack owns. That
// is, a local route is a route where packets never have to leave the stack.
//
// +checklocksread:s.mu
func (s *Stack) findLocalRouteRLocked(localAddressNICID tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber) *Route {
if localAddr.BitLen() == 0 {
localAddr = remoteAddr
}
if localAddressNICID == 0 {
for _, localAddressNIC := range s.nics {
if r := s.findLocalRouteFromNICRLocked(localAddressNIC, localAddr, remoteAddr, netProto); r != nil {
return r
}
}
return nil
}
if localAddressNIC, ok := s.nics[localAddressNICID]; ok {
return s.findLocalRouteFromNICRLocked(localAddressNIC, localAddr, remoteAddr, netProto)
}
return nil
}
// HandleLocal returns true if non-loopback interfaces are allowed to loop packets.
func (s *Stack) HandleLocal() bool {
return s.handleLocal
}
func isNICForwarding(nic *nic, proto tcpip.NetworkProtocolNumber) bool {
switch forwarding, err := nic.forwarding(proto); err.(type) {
case nil:
return forwarding
case *tcpip.ErrUnknownProtocol:
panic(fmt.Sprintf("expected network protocol %d to be available on NIC %d", proto, nic.ID()))
case *tcpip.ErrNotSupported:
// Not all network protocols support forwarding.
return false
default:
panic(fmt.Sprintf("nic(id=%d).forwarding(%d): %s", nic.ID(), proto, err))
}
}
// FindRoute creates a route to the given destination address, leaving through
// the given NIC and local address (if provided).
//
// If a NIC is not specified, the returned route will leave through the same
// NIC as the NIC that has the local address assigned when forwarding is
// disabled. If forwarding is enabled and the NIC is unspecified, the route may
// leave through any interface unless the route is link-local.
//
// If no local address is provided, the stack will select a local address. If no
// remote address is provided, the stack wil use a remote address equal to the
// local address.
func (s *Stack) FindRoute(id tcpip.NICID, localAddr, remoteAddr tcpip.Address, netProto tcpip.NetworkProtocolNumber, multicastLoop bool) (*Route, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
isLinkLocal := header.IsV6LinkLocalUnicastAddress(remoteAddr) || header.IsV6LinkLocalMulticastAddress(remoteAddr)
isLocalBroadcast := remoteAddr == header.IPv4Broadcast
isMulticast := header.IsV4MulticastAddress(remoteAddr) || header.IsV6MulticastAddress(remoteAddr)
isLoopback := header.IsV4LoopbackAddress(remoteAddr) || header.IsV6LoopbackAddress(remoteAddr)
needRoute := !(isLocalBroadcast || isMulticast || isLinkLocal || isLoopback)
if s.handleLocal && !isMulticast && !isLocalBroadcast {
if r := s.findLocalRouteRLocked(id, localAddr, remoteAddr, netProto); r != nil {
return r, nil
}
}
// If the interface is specified and we do not need a route, return a route
// through the interface if the interface is valid and enabled.
if id != 0 && !needRoute {
if nic, ok := s.nics[id]; ok && nic.Enabled() {
if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil {
return makeRoute(
netProto,
tcpip.Address{}, /* gateway */
localAddr,
remoteAddr,
nic, /* outboundNIC */
nic, /* localAddressNIC*/
addressEndpoint,
s.handleLocal,
multicastLoop,
), nil
}
}
if isLoopback {
return nil, &tcpip.ErrBadLocalAddress{}
}
return nil, &tcpip.ErrNetworkUnreachable{}
}
onlyGlobalAddresses := !header.IsV6LinkLocalUnicastAddress(localAddr) && !isLinkLocal
// Find a route to the remote with the route table.
var chosenRoute tcpip.Route
if r := func() *Route {
s.routeMu.RLock()
defer s.routeMu.RUnlock()
for _, route := range s.routeTable {
if remoteAddr.BitLen() != 0 && !route.Destination.Contains(remoteAddr) {
continue
}
nic, ok := s.nics[route.NIC]
if !ok || !nic.Enabled() {
continue
}
if id == 0 || id == route.NIC {
if addressEndpoint := s.getAddressEP(nic, localAddr, remoteAddr, netProto); addressEndpoint != nil {
var gateway tcpip.Address
if needRoute {
gateway = route.Gateway
}
r := constructAndValidateRoute(netProto, addressEndpoint, nic /* outgoingNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop)
if r == nil {
panic(fmt.Sprintf("non-forwarding route validation failed with route table entry = %#v, id = %d, localAddr = %s, remoteAddr = %s", route, id, localAddr, remoteAddr))
}
return r
}
}
// If the stack has forwarding enabled and we haven't found a valid route
// to the remote address yet, keep track of the first valid route. We
// keep iterating because we prefer routes that let us use a local
// address that is assigned to the outgoing interface. There is no
// requirement to do this from any RFC but simply a choice made to better
// follow a strong host model which the netstack follows at the time of
// writing.
if onlyGlobalAddresses && chosenRoute.Equal(tcpip.Route{}) && isNICForwarding(nic, netProto) {
chosenRoute = route
}
}
return nil
}(); r != nil {
return r, nil
}
if !chosenRoute.Equal(tcpip.Route{}) {
// At this point we know the stack has forwarding enabled since chosenRoute is
// only set when forwarding is enabled.
nic, ok := s.nics[chosenRoute.NIC]
if !ok {
// If the route's NIC was invalid, we should not have chosen the route.
panic(fmt.Sprintf("chosen route must have a valid NIC with ID = %d", chosenRoute.NIC))
}
var gateway tcpip.Address
if needRoute {
gateway = chosenRoute.Gateway
}
// Use the specified NIC to get the local address endpoint.
if id != 0 {
if aNIC, ok := s.nics[id]; ok {
if addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto); addressEndpoint != nil {
if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil {
return r, nil
}
}
}
// TODO(https://gvisor.dev/issues/8105): This should be ErrNetworkUnreachable.
return nil, &tcpip.ErrHostUnreachable{}
}
if id == 0 {
// If an interface is not specified, try to find a NIC that holds the local
// address endpoint to construct a route.
for _, aNIC := range s.nics {
addressEndpoint := s.getAddressEP(aNIC, localAddr, remoteAddr, netProto)
if addressEndpoint == nil {
continue
}
if r := constructAndValidateRoute(netProto, addressEndpoint, aNIC /* localAddressNIC */, nic /* outgoingNIC */, gateway, localAddr, remoteAddr, s.handleLocal, multicastLoop); r != nil {
return r, nil
}
}
}
}
if needRoute {
// TODO(https://gvisor.dev/issues/8105): This should be ErrNetworkUnreachable.
return nil, &tcpip.ErrHostUnreachable{}
}
if header.IsV6LoopbackAddress(remoteAddr) {
return nil, &tcpip.ErrBadLocalAddress{}
}
// TODO(https://gvisor.dev/issues/8105): This should be ErrNetworkUnreachable.
return nil, &tcpip.ErrNetworkUnreachable{}
}
// CheckNetworkProtocol checks if a given network protocol is enabled in the
// stack.
func (s *Stack) CheckNetworkProtocol(protocol tcpip.NetworkProtocolNumber) bool {
_, ok := s.networkProtocols[protocol]
return ok
}
// CheckDuplicateAddress performs duplicate address detection for the address on
// the specified interface.
func (s *Stack) CheckDuplicateAddress(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address, h DADCompletionHandler) (DADCheckAddressDisposition, tcpip.Error) {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return 0, &tcpip.ErrUnknownNICID{}
}
return nic.checkDuplicateAddress(protocol, addr, h)
}
// CheckLocalAddress determines if the given local address exists, and if it
// does, returns the id of the NIC it's bound to. Returns 0 if the address
// does not exist.
func (s *Stack) CheckLocalAddress(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.NICID {
s.mu.RLock()
defer s.mu.RUnlock()
// If a NIC is specified, use its NIC id.
if nicID != 0 {
nic, ok := s.nics[nicID]
if !ok {
return 0
}
// In IPv4, linux only checks the interface. If it matches, then it does
// not bother with the address.
// https://github.com/torvalds/linux/blob/15205c2829ca2cbb5ece5ceaafe1171a8470e62b/net/ipv4/igmp.c#L1829-L1837
if protocol == header.IPv4ProtocolNumber {
return nic.id
}
if nic.CheckLocalAddress(protocol, addr) {
return nic.id
}
return 0
}
// Go through all the NICs.
for _, nic := range s.nics {
if nic.CheckLocalAddress(protocol, addr) {
return nic.id
}
}
return 0
}
// SetPromiscuousMode enables or disables promiscuous mode in the given NIC.
func (s *Stack) SetPromiscuousMode(nicID tcpip.NICID, enable bool) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[nicID]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
nic.setPromiscuousMode(enable)
return nil
}
// SetSpoofing enables or disables address spoofing in the given NIC, allowing
// endpoints to bind to any address in the NIC.
func (s *Stack) SetSpoofing(nicID tcpip.NICID, enable bool) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[nicID]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
nic.setSpoofing(enable)
return nil
}
// LinkResolutionResult is the result of a link address resolution attempt.
type LinkResolutionResult struct {
LinkAddress tcpip.LinkAddress
Err tcpip.Error
}
// GetLinkAddress finds the link address corresponding to a network address.
//
// Returns ErrNotSupported if the stack is not configured with a link address
// resolver for the specified network protocol.
//
// Returns ErrWouldBlock if the link address is not readily available, along
// with a notification channel for the caller to block on. Triggers address
// resolution asynchronously.
//
// onResolve will be called either immediately, if resolution is not required,
// or when address resolution is complete, with the resolved link address and
// whether resolution succeeded.
//
// If specified, the local address must be an address local to the interface
// the neighbor cache belongs to. The local address is the source address of
// a packet prompting NUD/link address resolution.
func (s *Stack) GetLinkAddress(nicID tcpip.NICID, addr, localAddr tcpip.Address, protocol tcpip.NetworkProtocolNumber, onResolve func(LinkResolutionResult)) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.getLinkAddress(addr, localAddr, protocol, onResolve)
}
// Neighbors returns all IP to MAC address associations.
func (s *Stack) Neighbors(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber) ([]NeighborEntry, tcpip.Error) {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return nil, &tcpip.ErrUnknownNICID{}
}
return nic.neighbors(protocol)
}
// AddStaticNeighbor statically associates an IP address to a MAC address.
func (s *Stack) AddStaticNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address, linkAddr tcpip.LinkAddress) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.addStaticNeighbor(addr, protocol, linkAddr)
}
// RemoveNeighbor removes an IP to MAC address association previously created
// either automically or by AddStaticNeighbor. Returns ErrBadAddress if there
// is no association with the provided address.
func (s *Stack) RemoveNeighbor(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.removeNeighbor(protocol, addr)
}
// ClearNeighbors removes all IP to MAC address associations.
func (s *Stack) ClearNeighbors(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.clearNeighbors(protocol)
}
// RegisterTransportEndpoint registers the given endpoint with the stack
// transport dispatcher. Received packets that match the provided id will be
// delivered to the given endpoint; specifying a nic is optional, but
// nic-specific IDs have precedence over global ones.
func (s *Stack) RegisterTransportEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) tcpip.Error {
return s.demux.registerEndpoint(netProtos, protocol, id, ep, flags, bindToDevice)
}
// CheckRegisterTransportEndpoint checks if an endpoint can be registered with
// the stack transport dispatcher.
func (s *Stack) CheckRegisterTransportEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, flags ports.Flags, bindToDevice tcpip.NICID) tcpip.Error {
return s.demux.checkEndpoint(netProtos, protocol, id, flags, bindToDevice)
}
// UnregisterTransportEndpoint removes the endpoint with the given id from the
// stack transport dispatcher.
func (s *Stack) UnregisterTransportEndpoint(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) {
s.demux.unregisterEndpoint(netProtos, protocol, id, ep, flags, bindToDevice)
}
// StartTransportEndpointCleanup removes the endpoint with the given id from
// the stack transport dispatcher. It also transitions it to the cleanup stage.
func (s *Stack) StartTransportEndpointCleanup(netProtos []tcpip.NetworkProtocolNumber, protocol tcpip.TransportProtocolNumber, id TransportEndpointID, ep TransportEndpoint, flags ports.Flags, bindToDevice tcpip.NICID) {
s.cleanupEndpointsMu.Lock()
s.cleanupEndpoints[ep] = struct{}{}
s.cleanupEndpointsMu.Unlock()
s.demux.unregisterEndpoint(netProtos, protocol, id, ep, flags, bindToDevice)
}
// CompleteTransportEndpointCleanup removes the endpoint from the cleanup
// stage.
func (s *Stack) CompleteTransportEndpointCleanup(ep TransportEndpoint) {
s.cleanupEndpointsMu.Lock()
delete(s.cleanupEndpoints, ep)
s.cleanupEndpointsMu.Unlock()
}
// FindTransportEndpoint finds an endpoint that most closely matches the provided
// id. If no endpoint is found it returns nil.
func (s *Stack) FindTransportEndpoint(netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, id TransportEndpointID, nicID tcpip.NICID) TransportEndpoint {
return s.demux.findTransportEndpoint(netProto, transProto, id, nicID)
}
// RegisterRawTransportEndpoint registers the given endpoint with the stack
// transport dispatcher. Received packets that match the provided transport
// protocol will be delivered to the given endpoint.
func (s *Stack) RegisterRawTransportEndpoint(netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, ep RawTransportEndpoint) tcpip.Error {
return s.demux.registerRawEndpoint(netProto, transProto, ep)
}
// UnregisterRawTransportEndpoint removes the endpoint for the transport
// protocol from the stack transport dispatcher.
func (s *Stack) UnregisterRawTransportEndpoint(netProto tcpip.NetworkProtocolNumber, transProto tcpip.TransportProtocolNumber, ep RawTransportEndpoint) {
s.demux.unregisterRawEndpoint(netProto, transProto, ep)
}
// RegisterRestoredEndpoint records e as an endpoint that has been restored on
// this stack.
func (s *Stack) RegisterRestoredEndpoint(e ResumableEndpoint) {
s.mu.Lock()
s.resumableEndpoints = append(s.resumableEndpoints, e)
s.mu.Unlock()
}
// RegisteredEndpoints returns all endpoints which are currently registered.
func (s *Stack) RegisteredEndpoints() []TransportEndpoint {
s.mu.Lock()
defer s.mu.Unlock()
var es []TransportEndpoint
for _, e := range s.demux.protocol {
es = append(es, e.transportEndpoints()...)
}
return es
}
// CleanupEndpoints returns endpoints currently in the cleanup state.
func (s *Stack) CleanupEndpoints() []TransportEndpoint {
s.cleanupEndpointsMu.Lock()
es := make([]TransportEndpoint, 0, len(s.cleanupEndpoints))
for e := range s.cleanupEndpoints {
es = append(es, e)
}
s.cleanupEndpointsMu.Unlock()
return es
}
// RestoreCleanupEndpoints adds endpoints to cleanup tracking. This is useful
// for restoring a stack after a save.
func (s *Stack) RestoreCleanupEndpoints(es []TransportEndpoint) {
s.cleanupEndpointsMu.Lock()
for _, e := range es {
s.cleanupEndpoints[e] = struct{}{}
}
s.cleanupEndpointsMu.Unlock()
}
// Close closes all currently registered transport endpoints.
//
// Endpoints created or modified during this call may not get closed.
func (s *Stack) Close() {
for _, e := range s.RegisteredEndpoints() {
e.Abort()
}
for _, p := range s.transportProtocols {
p.proto.Close()
}
for _, p := range s.networkProtocols {
p.Close()
}
}
// Wait waits for all transport and link endpoints to halt their worker
// goroutines.
//
// Endpoints created or modified during this call may not get waited on.
//
// Note that link endpoints must be stopped via an implementation specific
// mechanism.
func (s *Stack) Wait() {
for _, e := range s.RegisteredEndpoints() {
e.Wait()
}
for _, e := range s.CleanupEndpoints() {
e.Wait()
}
for _, p := range s.transportProtocols {
p.proto.Wait()
}
for _, p := range s.networkProtocols {
p.Wait()
}
s.mu.Lock()
defer s.mu.Unlock()
for id, n := range s.nics {
// Remove NIC to ensure that qDisc goroutines are correctly
// terminated on stack teardown.
s.removeNICLocked(id)
n.NetworkLinkEndpoint.Wait()
}
}
// Destroy destroys the stack with all endpoints.
func (s *Stack) Destroy() {
s.Close()
s.Wait()
}
// Pause pauses any protocol level background workers.
func (s *Stack) Pause() {
for _, p := range s.transportProtocols {
p.proto.Pause()
}
}
// Resume restarts the stack after a restore. This must be called after the
// entire system has been restored.
func (s *Stack) Resume() {
// ResumableEndpoint.Resume() may call other methods on s, so we can't hold
// s.mu while resuming the endpoints.
s.mu.Lock()
eps := s.resumableEndpoints
s.resumableEndpoints = nil
s.mu.Unlock()
for _, e := range eps {
e.Resume(s)
}
// Now resume any protocol level background workers.
for _, p := range s.transportProtocols {
p.proto.Resume()
}
}
// RegisterPacketEndpoint registers ep with the stack, causing it to receive
// all traffic of the specified netProto on the given NIC. If nicID is 0, it
// receives traffic from every NIC.
func (s *Stack) RegisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) tcpip.Error {
s.mu.Lock()
defer s.mu.Unlock()
// If no NIC is specified, capture on all devices.
if nicID == 0 {
// Register with each NIC.
for _, nic := range s.nics {
if err := nic.registerPacketEndpoint(netProto, ep); err != nil {
s.unregisterPacketEndpointLocked(0, netProto, ep)
return err
}
}
return nil
}
// Capture on a specific device.
nic, ok := s.nics[nicID]
if !ok {
return &tcpip.ErrUnknownNICID{}
}
if err := nic.registerPacketEndpoint(netProto, ep); err != nil {
return err
}
return nil
}
// UnregisterPacketEndpoint unregisters ep for packets of the specified
// netProto from the specified NIC. If nicID is 0, ep is unregistered from all
// NICs.
func (s *Stack) UnregisterPacketEndpoint(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) {
s.mu.Lock()
defer s.mu.Unlock()
s.unregisterPacketEndpointLocked(nicID, netProto, ep)
}
// +checklocks:s.mu
func (s *Stack) unregisterPacketEndpointLocked(nicID tcpip.NICID, netProto tcpip.NetworkProtocolNumber, ep PacketEndpoint) {
// If no NIC is specified, unregister on all devices.
if nicID == 0 {
// Unregister with each NIC.
for _, nic := range s.nics {
nic.unregisterPacketEndpoint(netProto, ep)
}
return
}
// Unregister in a single device.
nic, ok := s.nics[nicID]
if !ok {
return
}
nic.unregisterPacketEndpoint(netProto, ep)
}
// WritePacketToRemote writes a payload on the specified NIC using the provided
// network protocol and remote link address.
func (s *Stack) WritePacketToRemote(nicID tcpip.NICID, remote tcpip.LinkAddress, netProto tcpip.NetworkProtocolNumber, payload buffer.Buffer) tcpip.Error {
s.mu.Lock()
nic, ok := s.nics[nicID]
s.mu.Unlock()
if !ok {
return &tcpip.ErrUnknownDevice{}
}
pkt := NewPacketBuffer(PacketBufferOptions{
ReserveHeaderBytes: int(nic.MaxHeaderLength()),
Payload: payload,
})
defer pkt.DecRef()
pkt.NetworkProtocolNumber = netProto
return nic.WritePacketToRemote(remote, pkt)
}
// WriteRawPacket writes data directly to the specified NIC without adding any
// headers.
func (s *Stack) WriteRawPacket(nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber, payload buffer.Buffer) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[nicID]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
pkt := NewPacketBuffer(PacketBufferOptions{
Payload: payload,
})
defer pkt.DecRef()
pkt.NetworkProtocolNumber = proto
return nic.writeRawPacketWithLinkHeaderInPayload(pkt)
}
// NetworkProtocolInstance returns the protocol instance in the stack for the
// specified network protocol. This method is public for protocol implementers
// and tests to use.
func (s *Stack) NetworkProtocolInstance(num tcpip.NetworkProtocolNumber) NetworkProtocol {
if p, ok := s.networkProtocols[num]; ok {
return p
}
return nil
}
// TransportProtocolInstance returns the protocol instance in the stack for the
// specified transport protocol. This method is public for protocol implementers
// and tests to use.
func (s *Stack) TransportProtocolInstance(num tcpip.TransportProtocolNumber) TransportProtocol {
if pState, ok := s.transportProtocols[num]; ok {
return pState.proto
}
return nil
}
// AddTCPProbe installs a probe function that will be invoked on every segment
// received by a given TCP endpoint. The probe function is passed a copy of the
// TCP endpoint state before and after processing of the segment.
//
// NOTE: TCPProbe is added only to endpoints created after this call. Endpoints
// created prior to this call will not call the probe function.
//
// Further, installing two different probes back to back can result in some
// endpoints calling the first one and some the second one. There is no
// guarantee provided on which probe will be invoked. Ideally this should only
// be called once per stack.
func (s *Stack) AddTCPProbe(probe TCPProbeFunc) {
s.tcpProbeFunc.Store(probe)
}
// GetTCPProbe returns the TCPProbeFunc if installed with AddTCPProbe, nil
// otherwise.
func (s *Stack) GetTCPProbe() TCPProbeFunc {
p := s.tcpProbeFunc.Load()
if p == nil {
return nil
}
return p.(TCPProbeFunc)
}
// RemoveTCPProbe removes an installed TCP probe.
//
// NOTE: This only ensures that endpoints created after this call do not
// have a probe attached. Endpoints already created will continue to invoke
// TCP probe.
func (s *Stack) RemoveTCPProbe() {
// This must be TCPProbeFunc(nil) because atomic.Value.Store(nil) panics.
s.tcpProbeFunc.Store(TCPProbeFunc(nil))
}
// JoinGroup joins the given multicast group on the given NIC.
func (s *Stack) JoinGroup(protocol tcpip.NetworkProtocolNumber, nicID tcpip.NICID, multicastAddr tcpip.Address) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[nicID]; ok {
return nic.joinGroup(protocol, multicastAddr)
}
return &tcpip.ErrUnknownNICID{}
}
// LeaveGroup leaves the given multicast group on the given NIC.
func (s *Stack) LeaveGroup(protocol tcpip.NetworkProtocolNumber, nicID tcpip.NICID, multicastAddr tcpip.Address) tcpip.Error {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[nicID]; ok {
return nic.leaveGroup(protocol, multicastAddr)
}
return &tcpip.ErrUnknownNICID{}
}
// IsInGroup returns true if the NIC with ID nicID has joined the multicast
// group multicastAddr.
func (s *Stack) IsInGroup(nicID tcpip.NICID, multicastAddr tcpip.Address) (bool, tcpip.Error) {
s.mu.RLock()
defer s.mu.RUnlock()
if nic, ok := s.nics[nicID]; ok {
return nic.isInGroup(multicastAddr), nil
}
return false, &tcpip.ErrUnknownNICID{}
}
// IPTables returns the stack's iptables.
func (s *Stack) IPTables() *IPTables {
return s.tables
}
// ICMPLimit returns the maximum number of ICMP messages that can be sent
// in one second.
func (s *Stack) ICMPLimit() rate.Limit {
return s.icmpRateLimiter.Limit()
}
// SetICMPLimit sets the maximum number of ICMP messages that be sent
// in one second.
func (s *Stack) SetICMPLimit(newLimit rate.Limit) {
s.icmpRateLimiter.SetLimit(newLimit)
}
// ICMPBurst returns the maximum number of ICMP messages that can be sent
// in a single burst.
func (s *Stack) ICMPBurst() int {
return s.icmpRateLimiter.Burst()
}
// SetICMPBurst sets the maximum number of ICMP messages that can be sent
// in a single burst.
func (s *Stack) SetICMPBurst(burst int) {
s.icmpRateLimiter.SetBurst(burst)
}
// AllowICMPMessage returns true if we the rate limiter allows at least one
// ICMP message to be sent at this instant.
func (s *Stack) AllowICMPMessage() bool {
return s.icmpRateLimiter.Allow()
}
// GetNetworkEndpoint returns the NetworkEndpoint with the specified protocol
// number installed on the specified NIC.
func (s *Stack) GetNetworkEndpoint(nicID tcpip.NICID, proto tcpip.NetworkProtocolNumber) (NetworkEndpoint, tcpip.Error) {
s.mu.Lock()
defer s.mu.Unlock()
nic, ok := s.nics[nicID]
if !ok {
return nil, &tcpip.ErrUnknownNICID{}
}
return nic.getNetworkEndpoint(proto), nil
}
// NUDConfigurations gets the per-interface NUD configurations.
func (s *Stack) NUDConfigurations(id tcpip.NICID, proto tcpip.NetworkProtocolNumber) (NUDConfigurations, tcpip.Error) {
s.mu.RLock()
nic, ok := s.nics[id]
s.mu.RUnlock()
if !ok {
return NUDConfigurations{}, &tcpip.ErrUnknownNICID{}
}
return nic.nudConfigs(proto)
}
// SetNUDConfigurations sets the per-interface NUD configurations.
//
// Note, if c contains invalid NUD configuration values, it will be fixed to
// use default values for the erroneous values.
func (s *Stack) SetNUDConfigurations(id tcpip.NICID, proto tcpip.NetworkProtocolNumber, c NUDConfigurations) tcpip.Error {
s.mu.RLock()
nic, ok := s.nics[id]
s.mu.RUnlock()
if !ok {
return &tcpip.ErrUnknownNICID{}
}
return nic.setNUDConfigs(proto, c)
}
// Seed returns a 32 bit value that can be used as a seed value.
//
// NOTE: The seed is generated once during stack initialization only.
func (s *Stack) Seed() uint32 {
return s.seed
}
// Rand returns a reference to a pseudo random generator that can be used
// to generate random numbers as required.
func (s *Stack) Rand() *rand.Rand {
return s.randomGenerator
}
// SecureRNG returns the stack's cryptographically secure random number
// generator.
func (s *Stack) SecureRNG() io.Reader {
return s.secureRNG
}
// FindNICNameFromID returns the name of the NIC for the given NICID.
func (s *Stack) FindNICNameFromID(id tcpip.NICID) string {
s.mu.RLock()
defer s.mu.RUnlock()
nic, ok := s.nics[id]
if !ok {
return ""
}
return nic.Name()
}
// ParseResult indicates the result of a parsing attempt.
type ParseResult int
const (
// ParsedOK indicates that a packet was successfully parsed.
ParsedOK ParseResult = iota
// UnknownTransportProtocol indicates that the transport protocol is unknown.
UnknownTransportProtocol
// TransportLayerParseError indicates that the transport packet was not
// successfully parsed.
TransportLayerParseError
)
// ParsePacketBufferTransport parses the provided packet buffer's transport
// header.
func (s *Stack) ParsePacketBufferTransport(protocol tcpip.TransportProtocolNumber, pkt PacketBufferPtr) ParseResult {
pkt.TransportProtocolNumber = protocol
// Parse the transport header if present.
state, ok := s.transportProtocols[protocol]
if !ok {
return UnknownTransportProtocol
}
if !state.proto.Parse(pkt) {
return TransportLayerParseError
}
return ParsedOK
}
// networkProtocolNumbers returns the network protocol numbers the stack is
// configured with.
func (s *Stack) networkProtocolNumbers() []tcpip.NetworkProtocolNumber {
protos := make([]tcpip.NetworkProtocolNumber, 0, len(s.networkProtocols))
for p := range s.networkProtocols {
protos = append(protos, p)
}
return protos
}
func isSubnetBroadcastOnNIC(nic *nic, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) bool {
addressEndpoint := nic.getAddressOrCreateTempInner(protocol, addr, false /* createTemp */, NeverPrimaryEndpoint)
if addressEndpoint == nil {
return false
}
subnet := addressEndpoint.Subnet()
addressEndpoint.DecRef()
return subnet.IsBroadcast(addr)
}
// IsSubnetBroadcast returns true if the provided address is a subnet-local
// broadcast address on the specified NIC and protocol.
//
// Returns false if the NIC is unknown or if the protocol is unknown or does
// not support addressing.
//
// If the NIC is not specified, the stack will check all NICs.
func (s *Stack) IsSubnetBroadcast(nicID tcpip.NICID, protocol tcpip.NetworkProtocolNumber, addr tcpip.Address) bool {
s.mu.RLock()
defer s.mu.RUnlock()
if nicID != 0 {
nic, ok := s.nics[nicID]
if !ok {
return false
}
return isSubnetBroadcastOnNIC(nic, protocol, addr)
}
for _, nic := range s.nics {
if isSubnetBroadcastOnNIC(nic, protocol, addr) {
return true
}
}
return false
}
// PacketEndpointWriteSupported returns true iff packet endpoints support write
// operations.
func (s *Stack) PacketEndpointWriteSupported() bool {
return s.packetEndpointWriteSupported
}
|
package dynamodb
import (
"bytes"
"context"
"encoding/binary"
"fmt"
"log"
"math"
"net/http"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
godynamodb "github.com/aws/aws-sdk-go/service/dynamodb"
godynamodbiface "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/pkg/errors"
"github.com/yuuki/diamondb/pkg/config"
"github.com/yuuki/diamondb/pkg/model"
"github.com/yuuki/diamondb/pkg/storage/util"
"github.com/yuuki/diamondb/pkg/timeparser"
)
//go:generate mockgen -source ../../../vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go -destination dynamodb_mock.go -package dynamodb
// ReadWriter defines the interface for DynamoDB reader and writer.
type ReadWriter interface {
Ping() error
Client() godynamodbiface.DynamoDBAPI
CreateTable(*CreateTableParam) error
Fetch(string, time.Time, time.Time) (model.SeriesMap, error)
batchGet(q *query) (model.SeriesMap, error)
Put(string, string, string, int64, map[int64]float64) error
}
// DynamoDB provides a dynamodb client.
type DynamoDB struct {
svc godynamodbiface.DynamoDBAPI
}
type timeSlot struct {
itemEpoch int64
step int
}
type query struct {
names []string
start time.Time
end time.Time
slot *timeSlot
// context
}
const (
HTTPTimeout = time.Duration(10) * time.Second
pingTimeout = time.Duration(5) * time.Second
batchGetTimeout = time.Duration(5) * time.Second
updateTimeout = time.Duration(5) * time.Second
oneYear time.Duration = time.Duration(24*360) * time.Hour
oneWeek time.Duration = time.Duration(24*7) * time.Hour
oneDay time.Duration = time.Duration(24*1) * time.Hour
)
var (
dynamodbBatchLimit = 100
oneYearSeconds = int(oneYear.Seconds())
oneWeekSeconds = int(oneWeek.Seconds())
oneDaySeconds = int(oneDay.Seconds())
)
var _ ReadWriter = &DynamoDB{}
// New creates a new DynamoDB.
func New() (*DynamoDB, error) {
awsConf := aws.NewConfig().WithRegion(config.Config.DynamoDBRegion)
if config.Config.DynamoDBEndpoint != "" {
// For dynamodb-local configuration
awsConf.WithEndpoint(config.Config.DynamoDBEndpoint)
awsConf.WithCredentials(credentials.NewStaticCredentials("dummy", "dummy", "dummy"))
}
awsConf.WithHTTPClient(&http.Client{
Timeout: HTTPTimeout,
Transport: http.DefaultTransport,
})
sess, err := session.NewSession(awsConf)
if err != nil {
return nil, errors.Wrapf(err,
"failed to create session for dynamodb (%s,%s)",
config.Config.DynamoDBRegion,
config.Config.DynamoDBEndpoint,
)
}
return &DynamoDB{
svc: godynamodb.New(sess),
}, nil
}
// Client returns the DynamoDB client.
func (d *DynamoDB) Client() godynamodbiface.DynamoDBAPI {
return d.svc
}
// Ping pings DynamoDB endpoint.
func (d *DynamoDB) Ping() error {
ctx, cancel := context.WithTimeout(context.TODO(), pingTimeout)
defer cancel()
var opt request.Option = func(r *request.Request) {}
_, err := d.svc.DescribeTableWithContext(ctx, &godynamodb.DescribeTableInput{
TableName: aws.String(config.Config.DynamoDBTableName),
}, opt)
if err != nil {
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == request.CanceledErrorCode {
// If the SDK can determine the request or retry delay was canceled
// by a context the CanceledErrorCode error code will be returned.
return errors.Wrap(err, "failed to ping dynamodb due to timeout")
}
return errors.Wrapf(err, "failed to ping dynamodb")
}
return nil
}
// CreateTableParam is parameter set of CreateTable.
type CreateTableParam struct {
Name string
RCU int64 // ReadCapacityUnits
WCU int64 // WriteCapacityUnits
}
// CreateTable creates a dynamodb table to store time series data.
// Skip creating table if the table already exists.
func (d *DynamoDB) CreateTable(param *CreateTableParam) error {
_, err := d.svc.CreateTable(&godynamodb.CreateTableInput{
TableName: aws.String(param.Name),
AttributeDefinitions: []*godynamodb.AttributeDefinition{
{
AttributeName: aws.String("Name"),
AttributeType: aws.String(godynamodb.ScalarAttributeTypeS),
},
{
AttributeName: aws.String("Timestamp"),
AttributeType: aws.String(godynamodb.ScalarAttributeTypeS),
},
},
KeySchema: []*godynamodb.KeySchemaElement{
{
AttributeName: aws.String("Name"),
KeyType: aws.String("HASH"),
},
{
AttributeName: aws.String("Timestamp"),
KeyType: aws.String("RANGE"),
},
},
ProvisionedThroughput: &godynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(param.RCU),
WriteCapacityUnits: aws.Int64(param.WCU),
},
// TODO StreamSpecification to export to s3
})
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceInUseException" {
// Skip if the table already exists
log.Printf("Skip creating DynamoDB table because %s already exists\n", param.Name)
return nil
}
}
return errors.Wrapf(err, "failed to create dynamodb table (%s,%d,%d)",
param.Name, param.RCU, param.WCU)
}
log.Printf("Creating DynamoDB table (name:%s, rcu:%d, wcu:%d) ...\n",
param.Name, param.RCU, param.WCU)
err = d.svc.WaitUntilTableExists(&godynamodb.DescribeTableInput{
TableName: aws.String(param.Name),
})
if err != nil {
return errors.Wrapf(err, "failed to wait until table exists (%s,%d,%d)",
param.Name, param.RCU, param.WCU)
}
if config.Config.DynamoDBTTL {
_, err = d.svc.UpdateTimeToLive(&godynamodb.UpdateTimeToLiveInput{
TableName: aws.String(param.Name),
TimeToLiveSpecification: &godynamodb.TimeToLiveSpecification{
AttributeName: aws.String("TTL"),
Enabled: aws.Bool(true),
},
})
if err != nil {
return errors.Wrapf(err, "failed to set TTL to (%s,%d,%d)",
param.Name, param.RCU, param.WCU)
}
}
return nil
}
// Fetch fetches datapoints by name from start until end.
func (d *DynamoDB) Fetch(name string, start, end time.Time) (model.SeriesMap, error) {
slots := selectTimeSlots(start, end)
nameGroups := util.GroupNames(util.SplitName(name), dynamodbBatchLimit)
numQueries := len(slots) * len(nameGroups)
type result struct {
value model.SeriesMap
err error
}
c := make(chan *result, numQueries)
for _, slot := range slots {
for _, names := range nameGroups {
q := &query{
names: names,
start: start,
end: end,
slot: slot,
}
go func(q *query) {
sm, err := d.batchGet(q)
c <- &result{value: sm, err: err}
}(q)
}
}
sm := make(model.SeriesMap, len(nameGroups))
for i := 0; i < numQueries; i++ {
ret := <-c
if ret.err != nil {
return nil, ret.err
}
sm.MergePointsToMap(ret.value)
}
return sm, nil
}
func batchGetResultToMap(resp *godynamodb.BatchGetItemOutput, q *query) model.SeriesMap {
sm := make(model.SeriesMap, len(resp.Responses))
for _, xs := range resp.Responses {
for _, x := range xs {
name := (*x["Name"].S)
points := make(model.DataPoints, 0, len(x["Values"].BS))
for _, y := range x["Values"].BS {
t := int64(binary.BigEndian.Uint64(y[0:8]))
v := math.Float64frombits(binary.BigEndian.Uint64(y[8:]))
// Trim datapoints out of [start, end]
if t < q.start.Unix() || q.end.Unix() < t {
continue
}
points = append(points, model.NewDataPoint(t, v))
}
sm[name] = model.NewSeriesPoint(name, points, q.slot.step)
}
}
return sm
}
func (d *DynamoDB) batchGet(q *query) (model.SeriesMap, error) {
var keys []map[string]*godynamodb.AttributeValue
for _, name := range q.names {
keys = append(keys, map[string]*godynamodb.AttributeValue{
"Name": {S: aws.String(name)},
"Timestamp": {S: aws.String(fmt.Sprintf("%d:%d", q.slot.itemEpoch, q.slot.step))},
})
}
items := make(map[string]*godynamodb.KeysAndAttributes)
items[config.Config.DynamoDBTableName] = &godynamodb.KeysAndAttributes{Keys: keys}
params := &godynamodb.BatchGetItemInput{
RequestItems: items,
ReturnConsumedCapacity: aws.String("NONE"),
}
ctx, cancel := context.WithTimeout(context.TODO(), batchGetTimeout)
defer cancel()
var opt request.Option = func(r *request.Request) {}
resp, err := d.svc.BatchGetItemWithContext(ctx, params, opt)
if err != nil {
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == request.CanceledErrorCode {
// If the SDK can determine the request or retry delay was canceled
// by a context the CanceledErrorCode error code will be returned.
return nil, errors.Wrap(err, "failed to batchGet dynamodb due to timeout")
}
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "ResourceNotFoundException" {
// Don't handle ResourceNotFoundException as error
// bacause diamondb web return length 0 series as 200.
return model.SeriesMap{}, nil
}
}
return nil, errors.Wrapf(err,
"failed to call dynamodb API batchGetItem (%s,%d,%d)",
config.Config.DynamoDBTableName, q.slot.itemEpoch, q.slot.step,
)
}
return batchGetResultToMap(resp, q), nil
}
// Put writes the datapoints into DynamoDB. It creates item
// if item doesn't exist and updates item if it exists.
func (d *DynamoDB) Put(name, slot, history string, itemEpoch int64, tv map[int64]float64) error {
stepDuration, err := timeparser.ParseTimeOffset(slot)
if err != nil {
return err
}
historyDuration, err := timeparser.ParseTimeOffset(history)
if err != nil {
return err
}
ttl := itemEpoch + int64(historyDuration.Seconds())
vals := make([][]byte, 0, len(tv))
for timestamp, value := range tv {
buf := new(bytes.Buffer)
binary.Write(buf, binary.BigEndian, timestamp)
binary.Write(buf, binary.BigEndian, math.Float64bits(value))
vals = append(vals, buf.Bytes())
}
params := &godynamodb.UpdateItemInput{
TableName: aws.String(config.Config.DynamoDBTableName),
Key: map[string]*godynamodb.AttributeValue{
"Name": {S: aws.String(name)},
"Timestamp": {S: aws.String(fmt.Sprintf("%d:%d", itemEpoch, int64(stepDuration.Seconds())))},
},
UpdateExpression: aws.String(`
SET #ttl = :new_ttl
ADD #values_set :new_values
`),
ExpressionAttributeNames: map[string]*string{
"#ttl": aws.String("TTL"),
"#values_set": aws.String("Values"),
},
ExpressionAttributeValues: map[string]*godynamodb.AttributeValue{
":new_ttl": {N: aws.String(fmt.Sprintf("%d", ttl))},
":new_values": {BS: vals},
},
ReturnValues: aws.String("NONE"),
}
ctx, cancel := context.WithTimeout(context.TODO(), updateTimeout)
defer cancel()
var opt request.Option = func(r *request.Request) {}
if _, err := d.svc.UpdateItemWithContext(ctx, params, opt); err != nil {
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == request.CanceledErrorCode {
// If the SDK can determine the request or retry delay was canceled
// by a context the CanceledErrorCode error code will be returned.
return errors.Wrap(err, "failed to updateItem dynamodb due to timeout")
}
return errors.Wrapf(err, "failed to call dynamodb API putItem (%s,%s,%d)",
config.Config.DynamoDBTableName, name, itemEpoch)
}
return nil
}
func selectTimeSlots(startTime, endTime time.Time) []*timeSlot {
var (
step int
itemEpochStep int
)
diff := endTime.Sub(startTime)
switch {
case oneYear <= diff:
itemEpochStep = oneYearSeconds
step = 60 * 60 * 24
case oneWeek <= diff:
itemEpochStep = oneWeekSeconds
step = 60 * 60
case oneDay <= diff:
itemEpochStep = oneDaySeconds
step = 5 * 60
default:
itemEpochStep = 60 * 60
step = 60
}
var slots []*timeSlot
startItemEpoch := startTime.Unix() - startTime.Unix()%int64(itemEpochStep)
endItemEpoch := endTime.Unix()
for epoch := startItemEpoch; epoch < endItemEpoch; epoch += int64(itemEpochStep) {
slots = append(slots, &timeSlot{itemEpoch: epoch, step: step})
}
return slots
}
|
package leetcode
/*
605. 种花问题
假设你有一个很长的花坛,一部分地块种植了花,另一部分却没有。可是,花卉不能种植在相邻的地块上,它们会争夺水源,两者都会死去。
给定一个花坛(表示为一个数组包含0和1,其中0表示没种植花,1表示种植了花),和一个数 n 。能否在不打破种植规则的情况下种入 n 朵花?能则返回True,不能则返回False。
示例 1:
输入: flowerbed = [1,0,0,0,1], n = 1
输出: True
示例 2:
输入: flowerbed = [1,0,0,0,1], n = 2
输出: False
注意:
数组内已种好的花不会违反种植规则。
输入的数组长度范围为 [1, 20000]。
n 是非负整数,且不会超过输入数组的大小。
*/
func canPlaceFlowers(flowerbed []int, n int) bool {
size := len(flowerbed)
if n > size {
return false
}
for i := 0; i < size; i++ {
if n == 0 {
return true
}
if flowerbed[i] == 0 && (i == 0 || flowerbed[i-1] == 0) && (i == size-1 || flowerbed[i+1] == 0) {
flowerbed[i], n = 1, n-1
}
}
return n == 0
}
|
package main
import (
"fmt"
"reflect"
)
func main() {
var nome = "Vitor"
var idade = 25
// não precisa de var
versao := 1.2
fmt.Println("String:", nome, " Idade :", idade, " Versão: ", versao)
fmt.Println(reflect.TypeOf(nome))
fmt.Println(reflect.TypeOf(idade))
fmt.Println(reflect.TypeOf(versao))
}
|
package main
/*
Fetches several web pages simultaneously using the net/http package, and prints
the URL of the biggest home page (defined as the most bytes in the response)
*/
import (
"fmt"
"io/ioutil"
"net/http"
)
type HomePageSize struct {
URL string
Size int
}
func main() {
urls := []string{
"http://www.apple.com",
"http://www.amazon.com",
"http://www.google.com",
"http://www.microsoft.com",
}
results := make(chan HomePageSize)
for _, url := range urls {
go func(url string) {
res, err := http.Get(url)
if err != nil {
panic(err)
}
defer res.Body.Close()
bs, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
results <- HomePageSize{
URL: url,
Size: len(bs),
}
}(url)
}
var biggest HomePageSize
for range urls {
result := <-results
if result.Size > biggest.Size {
biggest = result
}
}
fmt.Println("The biggest home page:", biggest.URL)
}
|
package opsgenie
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"time"
log "github.com/Sirupsen/logrus"
)
var timeout = time.Second * 30
var apiURL = "https://api.opsgenie.com"
func startHeartbeatAndSend(args OpsArgs) {
startHeartbeat(args)
sendHeartbeat(args)
}
func startHeartbeat(args OpsArgs) {
heartbeat, err := getHeartbeat(args)
if err != nil {
log.Error(err)
} else {
if heartbeat == nil {
addHeartbeat(args)
} else {
updateHeartbeatWithEnabledTrue(args, *heartbeat)
}
}
}
//StartHeartbeatLoop can be used from other codes as a library call
func StartHeartbeatLoop(args OpsArgs) {
startHeartbeat(args)
sendHeartbeatLoop(args)
}
func getHeartbeat(args OpsArgs) (*Heartbeat, error) {
code, body, err := doHTTPRequest("GET", "/v1/json/heartbeat/", mandatoryRequestParams(args), nil)
if err != nil {
return nil, err
}
if code != 200 {
return checkHeartbeatError(code, body, args.Name)
}
return createHeartbeat(body, args.Name)
}
func checkHeartbeatError(code int, body []byte, name string) (*Heartbeat, error) {
errorResponse, err := createErrorResponse(body)
if err != nil {
return nil, err
}
if code == 400 && errorResponse.Code == 17 {
log.Infof("Heartbeat [%s] doesn't exist", name)
return nil, nil
}
return nil, fmt.Errorf("%#v", errorResponse)
}
func createHeartbeat(body []byte, name string) (*Heartbeat, error) {
heartbeat := &Heartbeat{}
err := json.Unmarshal(body, &heartbeat)
if err != nil {
return nil, err
}
log.Info("Successfully retrieved heartbeat [" + name + "]")
return heartbeat, nil
}
func addHeartbeat(args OpsArgs) {
doOpsGenieHTTPRequestHandled("POST", "/v1/json/heartbeat/", nil, allContentParams(args), "Successfully added heartbeat ["+args.Name+"]")
}
func updateHeartbeatWithEnabledTrue(args OpsArgs, heartbeat Heartbeat) {
var contentParams = allContentParams(args)
contentParams["id"] = heartbeat.ID
contentParams["name"] = args.Name
contentParams["enabled"] = true
doOpsGenieHTTPRequestHandled("POST", "/v1/json/heartbeat", nil, contentParams, "Successfully enabled and updated heartbeat ["+args.Name+"]")
}
func sendHeartbeat(args OpsArgs) {
doOpsGenieHTTPRequestHandled("POST", "/v1/json/heartbeat/send", nil, mandatoryContentParams(args), "Successfully sent heartbeat ["+args.Name+"]")
}
func sendHeartbeatLoop(args OpsArgs) {
for _ = range time.Tick(args.LoopInterval) {
sendHeartbeat(args)
}
}
func stopHeartbeat(args OpsArgs) {
if args.Delete {
deleteHeartbeat(args)
} else {
disableHeartbeat(args)
}
}
func deleteHeartbeat(args OpsArgs) {
doOpsGenieHTTPRequestHandled("DELETE", "/v1/json/heartbeat", mandatoryRequestParams(args), nil, "Successfully deleted heartbeat ["+args.Name+"]")
}
func disableHeartbeat(args OpsArgs) {
doOpsGenieHTTPRequestHandled("POST", "/v1/json/heartbeat/disable", nil, mandatoryContentParams(args), "Successfully disabled heartbeat ["+args.Name+"]")
}
func mandatoryContentParams(args OpsArgs) map[string]interface{} {
var contentParams = make(map[string]interface{})
contentParams["apiKey"] = args.ApiKey
contentParams["name"] = args.Name
return contentParams
}
func allContentParams(args OpsArgs) map[string]interface{} {
var contentParams = mandatoryContentParams(args)
if args.Description != "" {
contentParams["description"] = args.Description
}
if args.Interval != 0 {
contentParams["interval"] = args.Interval
}
if args.IntervalUnit != "" {
contentParams["intervalUnit"] = args.IntervalUnit
}
return contentParams
}
func mandatoryRequestParams(args OpsArgs) map[string]string {
var requestParams = make(map[string]string)
requestParams["apiKey"] = args.ApiKey
requestParams["name"] = args.Name
return requestParams
}
func createErrorResponse(responseBody []byte) (ErrorResponse, error) {
errResponse := &ErrorResponse{}
err := json.Unmarshal(responseBody, &errResponse)
if err != nil {
return *errResponse, err
}
return *errResponse, nil
}
func doOpsGenieHTTPRequestHandled(method string, urlSuffix string, requestParameters map[string]string, contentParameters map[string]interface{}, msg string) {
_, err := doOpsGenieHTTPRequest(method, urlSuffix, requestParameters, contentParameters)
if err != nil {
log.Error(err)
} else {
log.Info(msg)
}
}
func doOpsGenieHTTPRequest(method string, urlSuffix string, requestParameters map[string]string, contentParameters map[string]interface{}) ([]byte, error) {
code, body, err := doHTTPRequest(method, urlSuffix, requestParameters, contentParameters)
if err != nil {
return nil, err
}
if code != 200 {
e, err := createErrorResponse(body)
if err != nil {
return nil, err
}
return nil, fmt.Errorf("%#v", e)
}
return body, nil
}
func doHTTPRequest(method string, urlSuffix string, requestParameters map[string]string, contentParameters map[string]interface{}) (int, []byte, error) {
request, err := createRequest(method, urlSuffix, requestParameters, contentParameters)
if err != nil {
return 0, nil, err
}
resp, err := getHTTPClient().Do(request)
if err != nil {
return 0, nil, err
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return 0, nil, err
}
defer resp.Body.Close()
return resp.StatusCode, body, nil
}
func createRequest(method string, urlSuffix string, requestParameters map[string]string, contentParameters map[string]interface{}) (*http.Request, error) {
body, err := json.Marshal(contentParameters)
if err != nil {
return nil, err
}
url, err := createURL(urlSuffix, requestParameters)
if err != nil {
return nil, err
}
request, err := http.NewRequest(method, url, bytes.NewReader(body))
if err != nil {
return nil, err
}
return request, nil
}
func createURL(urlSuffix string, requestParameters map[string]string) (string, error) {
var URL *url.URL
URL, err := url.Parse(apiURL + urlSuffix)
if err != nil {
return "", err
}
parameters := url.Values{}
for k, v := range requestParameters {
parameters.Add(k, v)
}
URL.RawQuery = parameters.Encode()
return URL.String(), nil
}
func getHTTPClient() *http.Client {
client := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyFromEnvironment,
Dial: func(netw, addr string) (net.Conn, error) {
conn, err := net.DialTimeout(netw, addr, timeout)
if err != nil {
return nil, err
}
conn.SetDeadline(time.Now().Add(timeout))
return conn, nil
},
},
}
return client
}
//Heartbeat represents the OpsGenie heartbeat data structure
type Heartbeat struct {
ID string `json:"id"`
}
//ErrorResponse represents the OpsGenie error response data structure
type ErrorResponse struct {
Code int `json:"code"`
Message string `json:"error"`
}
|
package clicksend
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
)
var (
clicksendURL = `https://rest.clicksend.com/v3`
)
type HttpClientAPI interface {
Do(req *http.Request) (*http.Response, error)
}
type ClientAPI interface {
SendSMS(s *SMS) (*SMSResponse, error)
}
// Client provides a connection to the Clicksend API
type Client struct {
// HTTPClient
HTTPClient HttpClientAPI
// Username as described
Username string
// APIKey as described
APIKey string
// BaseURL is the root API endpoint
BaseURL string
}
// an object to hold variable parameters to perform request.
type parameters struct {
// Method is HTTP method type.
Method string
// Path is postfix for URI.
Path string
// Payload for the request.
Payload interface{}
}
// NewClient builds a new Client pointer using the provided key and a default API base URL
// Accepts `httpClient`, `username`, `apiKey` as arguments
func NewClient(httpClient HttpClientAPI, username string, apiKey string) *Client {
return &Client{
HTTPClient: httpClient,
Username: username,
APIKey: apiKey,
BaseURL: clicksendURL,
}
}
func (client *Client) doRequest(opts parameters, dst interface{}) error {
url := fmt.Sprintf("%s/%s", client.BaseURL, opts.Path)
req, err := http.NewRequest(opts.Method, url, nil)
if err != nil {
return err
}
if opts.Payload != nil {
payloadData, err := json.Marshal(opts.Payload)
if err != nil {
return err
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(payloadData))
}
req.Header.Add("Accept", "application/json")
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.Username+":"+client.APIKey)))
res, err := client.HTTPClient.Do(req)
if err != nil {
return err
}
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
err = json.Unmarshal(body, dst)
return err
}
|
// date: 2019-03-14
package balance
type Node struct {
nodeKey string
spotValue uint32
}
type nodesArray []Node
func (p nodesArray) Len() int {
return len(p)
}
func (p nodesArray) Less() {
}
|
package main
import (
"flag"
"fmt"
"io"
"log"
"net"
"sync/atomic"
"syscall"
"github.com/514366607/reload"
)
var (
port int
isAccept int32 = 1
)
func main() {
flag.IntVar(&port, "p", 8888, `端口`)
flag.Parse()
log.Printf("Actual pid is %d\n", syscall.Getpid())
listener, err := reload.GetListener(fmt.Sprintf(":%d", port))
if err != nil {
log.Println(err)
}
var s = reload.NewServiceWith(listener, reload.WithDefaultHandle(), reload.WithHandleFunc(syscall.SIGUSR1, func(s reload.Service) {
if err := s.Reload(); err != nil {
s.Logger().Error(err)
}
log.Print("INlasdkjflaksjdflkasjdlkfajsldkfjaslkdfjaskldfjaslkdf\n\n\n\n\n\n\n\n\n\n")
atomic.StoreInt32(&isAccept, 0)
}))
log.Printf("isChild : %v ,listener: %v\n", s.IsChild(), listener)
go func() {
defer listener.Close()
for atomic.LoadInt32(&isAccept) == 1 {
conn, err := listener.Accept()
if err != nil {
log.Println(err)
continue
}
s.Add(1)
log.Println("Accept ", conn.RemoteAddr())
go recvConnMsg(conn, s)
}
}()
s.Start()
}
func recvConnMsg(conn net.Conn, s reload.Service) {
// var buf [4096]byte
buf := make([]byte, 4096)
defer conn.Close()
defer s.Done()
for {
n, err := conn.Read(buf)
if err == io.EOF {
//连接结束
return
} else if err != nil {
log.Println(err)
return
}
var recv = string(buf[0:n])
recv = fmt.Sprintf(" pid %d Return: %s ", syscall.Getpid(), recv)
log.Printf("Rev Data : %v", recv)
conn.Write([]byte(recv))
}
}
|
package main
import "net/http"
type Route struct {
Name string
Method string
Pattern string
HandlerFunc http.HandlerFunc
}
type Routes []Route
var routes = Routes{
Route{
"Index",
"GET",
"/",
Index,
},
Route{
"Index",
"HEAD",
"/",
Index,
},
Route{
"Auth",
"POST",
"/auth",
Auth,
},
/*Route{
"Authz",
"POST",
"/authz",
Authz,
},*/
Route{
"Healthz",
"GET",
"/healthz",
Healthz,
},
Route{
"Healthz",
"HEAD",
"/healthz",
Healthz,
},
}
|
package api
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"errors"
"github.com/pborman/uuid"
"log"
)
type ContentDeliveryNetwork struct {
Id string `json:"id"`
Label string `json:"label"`
Ips []string `json:"ips"`
Hostnames []string `json:"hostnames"`
}
var db = map[string]*ContentDeliveryNetwork{}
func ContentDeliveryNetworkCreateV1(w http.ResponseWriter, r *http.Request) {
if authenticateRequest(r, w) != nil {
return
}
xRequestID := r.Header.Get("X-Request-ID")
log.Printf("Header [X-Request-ID]: %s", xRequestID)
cdn, err := readRequest(r)
if err != nil {
sendErrorResponse(http.StatusBadRequest, err.Error(), w)
return
}
cdn.Id = uuid.New()
db[cdn.Id] = cdn
sendResponse(http.StatusCreated, w, cdn)
}
func ContentDeliveryNetworkGetV1(w http.ResponseWriter, r *http.Request) {
if authenticateRequest(r, w) != nil {
return
}
a, err := retrieveCdn(r)
if err != nil {
sendErrorResponse(http.StatusNotFound, err.Error(), w)
return
}
sendResponse(http.StatusOK, w, a)
}
func ContentDeliveryNetworkUpdateV1(w http.ResponseWriter, r *http.Request) {
if authenticateRequest(r, w) != nil {
return
}
a, err := retrieveCdn(r)
if err != nil {
sendErrorResponse(http.StatusNotFound, err.Error(), w)
return
}
cdn, err := readRequest(r)
if err != nil {
sendErrorResponse(http.StatusBadRequest, err.Error(), w)
return
}
cdn.Id = a.Id
db[cdn.Id] = cdn
sendResponse(http.StatusOK, w, cdn)
}
func ContentDeliveryNetworkDeleteV1(w http.ResponseWriter, r *http.Request) {
if authenticateRequest(r, w) != nil {
return
}
a, err := retrieveCdn(r)
if err != nil {
sendErrorResponse(http.StatusNotFound, err.Error(), w)
return
}
delete(db, a.Id)
updateResponseHeaders(http.StatusNoContent, w)
}
func retrieveCdn(r *http.Request) (*ContentDeliveryNetwork, error) {
id := strings.TrimPrefix(r.URL.Path, "/v1/cdns/")
if id == "" {
return nil, fmt.Errorf("cdn id path param not provided")
}
cdn := db[id]
if cdn == nil {
return nil, fmt.Errorf("cdn id '%s' not found", id)
}
return cdn, nil
}
func readRequest(r *http.Request) (*ContentDeliveryNetwork, error) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, fmt.Errorf("failed to read request body - %s", err)
}
cdn := &ContentDeliveryNetwork{}
if err := json.Unmarshal(body, cdn); err != nil {
return nil, fmt.Errorf("payload does not match cdn spec - %s", err)
}
return cdn, nil
}
func authenticateRequest(r *http.Request, w http.ResponseWriter) error {
apiKey := r.Header.Get("Authorization")
if apiKey == "" || apiKey != "apiKeyValue" {
msg := fmt.Sprintf("unauthorized user")
sendErrorResponse(http.StatusUnauthorized, msg, w)
return errors.New(msg)
}
return nil
}
func sendResponse(httpResponseStatusCode int, w http.ResponseWriter, cdn *ContentDeliveryNetwork) {
var resBody []byte
var err error
if resBody, err = json.Marshal(cdn); err != nil {
msg := fmt.Sprintf("internal server error - %s", err)
sendErrorResponse(http.StatusInternalServerError, msg, w)
}
w.WriteHeader(httpResponseStatusCode)
w.Write(resBody)
}
func sendErrorResponse(httpStatusCode int, message string, w http.ResponseWriter) {
updateResponseHeaders(httpStatusCode, w)
w.Write([]byte(fmt.Sprintf(`{"code":"%d", "message": "%s"}`, httpStatusCode, message)))
}
func updateResponseHeaders(httpStatusCode int, w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(httpStatusCode)
}
|
// SPDX-License-Identifier: MIT
package core
import (
"bytes"
"fmt"
"testing"
"time"
"github.com/issue9/assert/v3"
"github.com/caixw/apidoc/v7/internal/locale"
)
var _ fmt.Stringer = Erro
func TestType_String(t *testing.T) {
a := assert.New(t, false)
a.Equal("ERRO", Erro.String())
a.Equal("SUCC", Succ.String())
a.Equal("INFO", Info.String())
a.Equal("WARN", Warn.String())
a.Equal("<unknown>", MessageType(-22).String())
}
func TestHandler(t *testing.T) {
a := assert.New(t, false)
erro := new(bytes.Buffer)
warn := new(bytes.Buffer)
info := new(bytes.Buffer)
succ := new(bytes.Buffer)
h := NewMessageHandler(func(msg *Message) {
switch msg.Type {
case Erro:
erro.WriteString("erro")
case Warn:
warn.WriteString("warn")
case Info:
info.WriteString("info")
case Succ:
succ.WriteString("succ")
default:
panic("panic")
}
})
a.NotNil(h)
h.Error((Location{URI: "erro.go"}).NewError(locale.ErrInvalidUTF8Character))
h.Warning((Location{URI: "warn.go"}).NewError(locale.ErrInvalidUTF8Character))
h.Info((Location{URI: "info.go"}).NewError(locale.ErrInvalidUTF8Character))
h.Success((Location{URI: "succ.go"}).NewError(locale.ErrInvalidUTF8Character))
time.Sleep(1 * time.Second) // 等待 channel 完成
a.Equal(erro.String(), "erro")
a.Equal(warn.String(), "warn")
a.Equal(info.String(), "info")
a.Equal(succ.String(), "succ")
h.Stop()
a.Panic(func() { // 已经关闭 messages
h.Error((Location{URI: "erro"}).NewError(locale.ErrInvalidUTF8Character))
})
}
func TestHandler_Stop(t *testing.T) {
a := assert.New(t, false)
var exit bool
h := NewMessageHandler(func(msg *Message) {
time.Sleep(time.Second)
exit = true
})
a.NotNil(h)
h.Locale(Erro, locale.ErrInvalidUTF8Character)
h.Stop() // 此处会阻塞,等待完成
a.True(exit)
}
|
//go:generate go get github.com/jteeuwen/go-bindata/go-bindata
//go:generate go-bindata -o templates.go -pkg assets templates/...
package assets
|
//go:build gofuzzbeta
// +build gofuzzbeta
package network
import (
"context"
"math/big"
"testing"
"time"
"github.com/ethereum/go-ethereum/ethclient"
abci "github.com/tendermint/tendermint/abci/types"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/cosmos/cosmos-sdk/simapp"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
banktypes "github.com/cosmos/cosmos-sdk/x/bank/types"
"github.com/cosmos/cosmos-sdk/crypto/keyring"
stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types"
"github.com/ethereum/go-ethereum/common"
ethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/baseapp"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/tharsis/ethermint/app"
"github.com/tharsis/ethermint/crypto/ethsecp256k1"
"github.com/tharsis/ethermint/tests"
ethermint "github.com/tharsis/ethermint/types"
"github.com/tharsis/ethermint/x/evm"
"github.com/tharsis/ethermint/x/evm/types"
"github.com/tendermint/tendermint/crypto/tmhash"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
"github.com/tendermint/tendermint/version"
)
func FuzzABCI(f *testing.F) {
f.Fuzz(func(t *testing.T, msg []byte) {
eapp, ctx, _, _ := setupApp(t)
tmheader := ctx.BlockHeader()
eapp.BeginBlock(abci.RequestBeginBlock{
Header: tmheader,
})
eapp.DeliverTx(abci.RequestDeliverTx{
Tx: msg,
})
endBR := abci.RequestEndBlock{Height: tmheader.Height}
eapp.EndBlocker(ctx, endBR)
eapp.Commit()
})
}
func FuzzNetworkRPC(f *testing.F) {
f.Fuzz(func(t *testing.T, msg []byte) {
ethjson := new(ethtypes.Transaction)
binerr := ethjson.UnmarshalBinary(msg)
if binerr == nil {
testnetwork := New(t, DefaultConfig())
_, err := testnetwork.WaitForHeight(1)
if err != nil {
t.Log("failed to start up the network")
testnetwork.Cleanup()
} else {
client, err := ethclient.Dial(testnetwork.Validators[0].JSONRPCAddress)
if err != nil {
t.Log("failed to create a client")
} else {
client.SendTransaction(context.Background(), ethjson)
h, err := testnetwork.WaitForHeightWithTimeout(10, time.Minute)
if err != nil {
testnetwork.Cleanup()
t.Fatalf("expected to reach 10 blocks; got %d", h)
}
latestHeight, err := testnetwork.LatestHeight()
if err != nil {
testnetwork.Cleanup()
t.Fatalf("latest height failed")
}
if latestHeight < h {
testnetwork.Cleanup()
t.Errorf("latestHeight should be greater or equal to")
}
}
testnetwork.Cleanup()
}
}
})
}
func setupApp(t *testing.T) (*app.EthermintApp, sdk.Context, keyring.Signer, common.Address) {
checkTx := false
// account key
priv, err := ethsecp256k1.GenerateKey()
require.NoError(t, err)
address := common.BytesToAddress(priv.PubKey().Address().Bytes())
signer := tests.NewSigner(priv)
from := address
// consensus key
priv, err = ethsecp256k1.GenerateKey()
require.NoError(t, err)
consAddress := sdk.ConsAddress(priv.PubKey().Address())
eapp := app.Setup(checkTx, nil)
coins := sdk.NewCoins(sdk.NewCoin(types.DefaultEVMDenom, sdk.NewInt(100000000000000)))
genesisState := app.ModuleBasics.DefaultGenesis(eapp.AppCodec())
b32address := sdk.MustBech32ifyAddressBytes(sdk.GetConfig().GetBech32AccountAddrPrefix(), priv.PubKey().Address().Bytes())
balances := []banktypes.Balance{
{
Address: b32address,
Coins: coins,
},
{
Address: eapp.AccountKeeper.GetModuleAddress(authtypes.FeeCollectorName).String(),
Coins: coins,
},
}
// update total supply
bankGenesis := banktypes.NewGenesisState(banktypes.DefaultGenesisState().Params, balances, sdk.NewCoins(sdk.NewCoin(types.DefaultEVMDenom, sdk.NewInt(200000000000000))), []banktypes.Metadata{})
genesisState[banktypes.ModuleName] = eapp.AppCodec().MustMarshalJSON(bankGenesis)
stateBytes, err := tmjson.MarshalIndent(genesisState, "", " ")
require.NoError(t, err)
// Initialize the chain
eapp.InitChain(
abci.RequestInitChain{
ChainId: "ethermint_9000-1",
Validators: []abci.ValidatorUpdate{},
ConsensusParams: simapp.DefaultConsensusParams,
AppStateBytes: stateBytes,
},
)
ctx := eapp.BaseApp.NewContext(checkTx, tmproto.Header{
Height: 1,
ChainID: "ethermint_9000-1",
Time: time.Now().UTC(),
ProposerAddress: consAddress.Bytes(),
Version: tmversion.Consensus{
Block: version.BlockProtocol,
},
LastBlockId: tmproto.BlockID{
Hash: tmhash.Sum([]byte("block_id")),
PartSetHeader: tmproto.PartSetHeader{
Total: 11,
Hash: tmhash.Sum([]byte("partset_header")),
},
},
AppHash: tmhash.Sum([]byte("app")),
DataHash: tmhash.Sum([]byte("data")),
EvidenceHash: tmhash.Sum([]byte("evidence")),
ValidatorsHash: tmhash.Sum([]byte("validators")),
NextValidatorsHash: tmhash.Sum([]byte("next_validators")),
ConsensusHash: tmhash.Sum([]byte("consensus")),
LastResultsHash: tmhash.Sum([]byte("last_result")),
})
eapp.EvmKeeper.WithContext(ctx)
queryHelper := baseapp.NewQueryServerTestHelper(ctx, eapp.InterfaceRegistry())
types.RegisterQueryServer(queryHelper, eapp.EvmKeeper)
acc := ðermint.EthAccount{
BaseAccount: authtypes.NewBaseAccount(sdk.AccAddress(address.Bytes()), nil, 0, 0),
CodeHash: common.BytesToHash(crypto.Keccak256(nil)).String(),
}
eapp.AccountKeeper.SetAccount(ctx, acc)
valAddr := sdk.ValAddress(address.Bytes())
validator, err := stakingtypes.NewValidator(valAddr, priv.PubKey(), stakingtypes.Description{})
require.NoError(t, err)
err = eapp.StakingKeeper.SetValidatorByConsAddr(ctx, validator)
require.NoError(t, err)
err = eapp.StakingKeeper.SetValidatorByConsAddr(ctx, validator)
require.NoError(t, err)
eapp.StakingKeeper.SetValidator(ctx, validator)
return eapp, ctx, signer, from
}
func FuzzEVMHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, amount1 int64, gasLimit1 uint64, gasPrice1 int64, input1 []byte,
amount2 int64, nonce2 uint64, gasLimit2 uint64, gasPrice2 int64, input2 []byte,
amount3 int64, gasLimit3 uint64, gasPrice3 int64, input3 []byte) {
eapp, ctx, signer, from := setupApp(t)
ethSigner := ethtypes.LatestSignerForChainID(eapp.EvmKeeper.ChainID())
handler := evm.NewHandler(eapp.EvmKeeper)
to := crypto.CreateAddress(from, 1)
chainID := big.NewInt(1)
tx1 := types.NewTxContract(chainID, 0, big.NewInt(amount1), gasLimit1, big.NewInt(gasPrice1), nil, nil, input1, nil)
tx1.From = from.String()
tx1.Sign(ethSigner, signer)
tx2 := types.NewTx(chainID, nonce2, &to, big.NewInt(amount2), gasLimit2, big.NewInt(gasPrice2), nil, nil, input2, nil)
tx2.From = from.String()
tx2.Sign(ethSigner, signer)
tx3 := types.NewTx(chainID, 1, &to, big.NewInt(amount3), gasLimit3, big.NewInt(gasPrice3), nil, nil, input3, nil)
tx3.From = from.String()
tx3.Sign(ethSigner, signer)
handler(ctx, tx1)
handler(ctx, tx2)
handler(ctx, tx3)
})
}
|
package db
import (
"context"
"time"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
type DisconnectFunc func()
func GetClient(uri string, username string, password string) (*mongo.Client, DisconnectFunc) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
// credentials := options.Credential{
// Username: username,
// Password: password,
// }
client, err := mongo.Connect(ctx, options.Client().ApplyURI(uri)) //.SetAuth(credentials))
return client, func() {
if err = client.Disconnect(ctx); err != nil {
panic(err)
}
}
}
|
package main
import (
"bytes"
"encoding/binary"
"fmt"
)
// every binary struct pack that google does seems to be in big endian
/*
SUBPROTOCOL_TAG_CONNECT_SUCCESS_SID = 0x0001
SUBPROTOCOL_TAG_RECONNECT_SUCCESS_ACK = 0x0002
SUBPROTOCOL_TAG_DATA = 0x0004
SUBPROTOCOL_TAG_ACK = 0x0007
return (struct.unpack(str('>H'), binary_data[:2])[0],
binary_data[2:])
*/
//tag, bytes_left = utils.ExtractSubprotocolTag(binary_data)
func extractSubProtocolTag(data []byte) (uint16, []byte, error) {
if len(data) < 2 {
return 0, nil, fmt.Errorf("incomplete data")
}
i := binary.BigEndian.Uint16(data[:2])
return i, data[2:], nil
}
func handleSubprotocolConnectSuccessSid(data []byte) ([]byte, []byte, error) {
return extractSubprotocolConnectSuccessSid(data)
}
func extractSubprotocolConnectSuccessSid(data []byte) ([]byte, []byte, error) {
nextBytes, binaryData, err := extractUnsignedInt32(data)
if err != nil {
return nil, nil, err
}
return extractBinaryArray(binaryData, int(nextBytes))
}
func extractUnsignedInt32(data []byte) (uint32, []byte, error) {
if len(data) < 4 {
return 0, nil, fmt.Errorf("incomplete data")
}
dataLength := binary.BigEndian.Uint32(data[:4])
return dataLength, data[4:], nil
}
func extractBinaryArray(data []byte, dataLen int) ([]byte, []byte, error) {
if len(data) < dataLen {
return nil, nil, fmt.Errorf("incomplete data")
}
return data[:dataLen], data[dataLen:], nil
}
func handleSubprotocolData(data []byte) ([]byte, []byte, error) {
nextBytes, binaryData, err := extractUnsignedInt32(data)
if err != nil {
return nil, nil, err
}
return extractBinaryArray(binaryData, int(nextBytes))
}
type AckFrame struct {
Tag uint16
Received uint64
}
// Q is uint64
// H is uint16
func sendAck(bytesReceived int) ([]byte, error) {
af := AckFrame{
Tag: 7,
Received: uint64(bytesReceived),
}
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.BigEndian, af)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
//CreateSubprotocolDataFrame
// I is uint32
type DataFrame struct {
Tag uint16
Len uint32
}
func createSubprotocolDataFrame(data []byte) []byte {
df := DataFrame{
Tag: 4,
Len: uint32(len(data)),
}
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.BigEndian, df)
if err != nil {
panic(err)
}
_, err = buf.Write(data)
if err != nil {
panic(err)
}
return buf.Bytes()
}
|
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"sync"
commonLogger "github.com/trustedanalytics-ng/tap-go-common/logger"
"github.com/trustedanalytics-ng/tap-go-common/util"
"github.com/trustedanalytics-ng/tap-monitor/app"
)
var logger, _ = commonLogger.InitLogger("main")
var waitGroup = &sync.WaitGroup{}
func main() {
go util.TerminationObserver(waitGroup, "Monitor")
if err := app.InitConnections(); err != nil {
logger.Fatal("ERROR initConnections: ", err.Error())
}
app.StartMonitor(waitGroup)
}
|
package client
import (
"context"
"net/url"
"time"
"github.com/go-kit/kit/endpoint"
v1 "github.com/turao/go-worker/api/v1"
)
// client wraps an http client and add a bunch of stuff to it
type client struct {
// dependencies
// auth
// server
// logger (?)
dispatch endpoint.Endpoint
stop endpoint.Endpoint
query endpoint.Endpoint
}
func New(url *url.URL) *client {
return &client{
dispatch: makeDispatchEndpoint(url),
stop: makeStopEndpoint(url),
query: makeQueryEndpoint(url),
}
}
func (c *client) Dispatch(name string, args ...string) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
request := v1.DispatchRequest{
Name: name,
Args: args,
}
res, err := c.dispatch(ctx, request)
if err != nil {
return nil, err
}
return res, nil
}
func (c *client) Stop(jobID string) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
request := v1.StopRequest{
ID: v1.JobID(jobID),
}
res, err := c.stop(ctx, request)
if err != nil {
return nil, err
}
return res, nil
}
func (c *client) Query(jobID string) (interface{}, error) {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
request := v1.QueryInfoRequest{
ID: v1.JobID(jobID),
}
res, err := c.query(ctx, request)
if err != nil {
return nil, err
}
return res, nil
}
|
package errors
import (
"encoding/json"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/suite"
)
type Errors struct {
suite.Suite
}
func TestErrors(t *testing.T) {
suite.Run(t, new(Errors))
}
func (s *Errors) TestStack() {
// getPCs
pcs := getPCs(0)
if !s.True(len(pcs) > 0, "wrong pcs length") {
return
}
// Skip one frame
pcsSkip1 := getPCs(1)
s.Equal(len(pcs)-1, len(pcsSkip1), "wrong pcs length with skip 1")
// callstack
stack := callstack(pcs)
s.Equal(len(pcs), len(stack), "wrong stack length")
}
func (s *Errors) TestFromError() {
err := errors.New("error message")
newErr := fromError(err)
s.Equal(err, newErr.cause, "cause should be original error")
s.NotNil(newErr.context, "context should be initialized")
s.NotNil(newErr.data, "data should be initialized")
s.True(len(newErr.pcs) > 0, "callstack should be generated")
newErr2 := fromError(newErr)
s.Equal(newErr, newErr2, "should not change an errorExt")
}
func (s *Errors) TestNew() {
msg := "error message"
err := New(msg)
eExt, ok := err.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
if s.NotNil(eExt.cause, "cause should be created") {
s.Equal(msg, eExt.cause.Error(), "cause should use supplied message")
}
s.NotNil(eExt.context, "context should be initialized")
s.NotNil(eExt.data, "data should be initialized")
s.True(len(eExt.pcs) > 0, "callstack should be generated")
}
func (s *Errors) TestNewf() {
format := "%s:%d"
args := []interface{}{"foo", uint64(10)}
msg := fmt.Sprintf(format, args...)
err := Newf(format, args...)
eExt, ok := err.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
if s.NotNil(eExt.cause, "cause should be created") {
s.Equal(msg, eExt.cause.Error(), "cause should use correct message")
}
s.NotNil(eExt.context, "context should be initialized")
s.NotNil(eExt.data, "data should be initialized")
s.True(len(eExt.pcs) > 0, "callstack should be generated")
}
func (s *Errors) TestWrap() {
// Wrap a regular error
msg := "error message"
err := errors.New(msg)
wrappedErr := Wrap(err)
eExt, ok := wrappedErr.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
s.Equal(err, eExt.cause, "cause should be original error")
s.NotNil(eExt.context, "context should be initialized")
s.NotNil(eExt.data, "data should be initialized")
s.True(len(eExt.pcs) > 0, "callstack should be generated")
// Wrap a regular error with context
ctx := "some context"
wrappedErrWithContext := Wrap(err, ctx)
eExt, ok = wrappedErrWithContext.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
s.Equal(err, eExt.cause, "cause should be original error")
if s.Len(eExt.context, 1, "should have added context") {
s.Equal(ctx, eExt.context[0], "should be correct context")
}
// Wrap an errorExt
rewrappedErr := Wrap(wrappedErrWithContext)
s.Equal(wrappedErrWithContext, rewrappedErr, "should not change an errorExt")
// Wrap an errorExt with context
ctx2 := "more context"
rewrappedErr = Wrap(wrappedErrWithContext, ctx2)
eExt, ok = wrappedErrWithContext.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
s.Equal(err, eExt.cause, "cause should be original error")
if s.Len(eExt.context, 2, "should have added context") {
s.Equal(ctx, eExt.context[0], "should be correct context")
s.Equal(ctx2, eExt.context[1], "should be correct context")
}
}
func (s *Errors) TestWrapf() {
// Wrap a regular error
format := "%s:%d"
args := []interface{}{"foo", uint64(10)}
ctx := fmt.Sprintf(format, args...)
msg := "error message"
err := errors.New(msg)
wrappedErr := Wrapf(err, format, args...)
eExt, ok := wrappedErr.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
s.Equal(err, eExt.cause, "cause should be original error")
if s.Len(eExt.context, 1, "should have added context") {
s.Equal(ctx, eExt.context[0], "should be correct context")
}
s.NotNil(eExt.data, "data should be initialized")
s.True(len(eExt.pcs) > 0, "callstack should be generated")
// Wrap an errorExt
format = "%s:%d"
args = []interface{}{"bar", uint64(20)}
ctx2 := fmt.Sprintf(format, args...)
rewrappedErr := Wrap(wrappedErr, ctx2)
eExt, ok = rewrappedErr.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
s.Equal(err, eExt.cause, "cause should be original error")
if s.Len(eExt.context, 2, "should have added context") {
s.Equal(ctx, eExt.context[0], "should be correct context")
s.Equal(ctx2, eExt.context[1], "should be correct context")
}
}
func (s *Errors) TestWrapv() {
// Wrap a regular error with nil values
msg := "error message"
err := errors.New(msg)
wrappedErr := Wrapv(err, nil)
eExt, ok := wrappedErr.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
if s.NotNil(eExt.cause, "cause should be created") {
s.Equal(err, eExt.cause, "cause should be original error")
}
s.NotNil(eExt.data, "data should be initialized")
// Wrap a regular error with values
values := map[string]interface{}{"foo": "bar"}
wrappedErr = Wrapv(err, values)
eExt, ok = wrappedErr.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
if s.NotNil(eExt.cause, "cause should be created") {
s.Equal(err, eExt.cause, "cause should be original error")
}
s.Equal(values, eExt.data, "data should be values")
// Wrap an errorExt with nil values
rewrappedErr := Wrapv(wrappedErr, nil)
s.Equal(wrappedErr, rewrappedErr, "should not reset data or change error")
// Wrap an errorExt with values
values2 := map[string]interface{}{"baz": "bang"}
rewrappedErr = Wrapv(wrappedErr, values2)
eExt, ok = rewrappedErr.(*errorExt)
if !s.True(ok, "wrong error return type") {
return
}
combinedValues := make(map[string]interface{})
for k, v := range values {
combinedValues[k] = v
}
for k, v := range values2 {
combinedValues[k] = v
}
s.Equal(combinedValues, eExt.data, "data should be combined values")
}
type testErr struct {
SomeValue int `json:"someValue"`
msg string
}
func (t *testErr) Error() string {
return t.msg
}
func (s *Errors) TestMarshalJSON() {
msg := "error message"
ctx1 := "some context"
ctx2 := "more context"
values := map[string]interface{}{"foo": "bar"}
origErr := &testErr{SomeValue: 5, msg: msg}
err := Wrap(origErr, ctx1)
err = Wrap(err, ctx2)
err = Wrapv(err, values)
err = Wrapv(err, map[string]interface{}{"self": err})
j, jmErr := json.Marshal(err)
if !s.NoError(jmErr, "failed to marshal error") {
return
}
output := make(map[string]interface{})
if !s.NoError(json.Unmarshal(j, &output), "failed to unmarshal output") {
return
}
eExt := err.(*errorExt)
// error message should be present
causeI, ok := output["cause"]
if s.True(ok, "output missing cause") {
var cause string
cause, ok = causeI.(string)
if s.True(ok, "cause should be a string") {
s.Equal(ctx2+": "+ctx1+": "+msg, cause, "unexpected cause string")
}
}
// stack with strings should be present
stackI, ok := output["stack"]
if s.True(ok, "output missing stack") {
var stackAI []interface{}
stackAI, ok = stackI.([]interface{})
if s.True(ok, "stack should be an array of interface{}") {
stack := make([]string, len(stackAI))
for i, v := range stackAI {
stack[i], ok = v.(string)
s.True(ok, "stack array value should be a string")
}
s.Equal(callstack(eExt.pcs), stack, "wrong stack")
}
}
// data and cause fields should be top level
values["someValue"] = origErr.SomeValue
for k, v := range values {
valueI, ok := output[k]
if s.True(ok, "missing value:"+k) {
s.EqualValues(v, valueI, "wrong value:"+k)
}
}
// self error should be omitted
s.Nil(output["self"])
}
func (s *Errors) TestCause() {
s.Nil(Cause(nil))
err := errors.New("an error")
if !s.NotNil(Cause(err)) {
return
}
s.Equal(err, Cause(err))
wrapped := Wrap(err, "context")
s.Equal(err, Cause(wrapped))
s.NotEqual(wrapped, Cause(wrapped))
}
func resetFirst() *errorExt {
return New("first").(*errorExt)
}
func resetSecond(err error) *errorExt {
return ResetStack(err).(*errorExt)
}
func (s *Errors) TestResetStack() {
err := resetFirst()
stack := callstack(err.pcs)
s.Contains(stack[0], "resetFirst")
err = resetSecond(err)
stack = callstack(err.pcs)
s.Contains(stack[0], "resetSecond")
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvserver_test
import (
"context"
"fmt"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/testutils/testcluster"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/errors"
"go.etcd.io/etcd/raft/v3"
)
type unreliableRaftHandlerFuncs struct {
// If non-nil, can return false to avoid dropping the msg to
// unreliableRaftHandler.rangeID. If nil, all messages pertaining to the
// respective range are dropped.
dropReq func(*kvserver.RaftMessageRequest) bool
dropHB func(*kvserver.RaftHeartbeat) bool
dropResp func(*kvserver.RaftMessageResponse) bool
// snapErr defaults to returning nil.
snapErr func(*kvserver.SnapshotRequest_Header) error
}
func noopRaftHandlerFuncs() unreliableRaftHandlerFuncs {
return unreliableRaftHandlerFuncs{
dropResp: func(*kvserver.RaftMessageResponse) bool {
return false
},
dropReq: func(*kvserver.RaftMessageRequest) bool {
return false
},
dropHB: func(*kvserver.RaftHeartbeat) bool {
return false
},
}
}
// unreliableRaftHandler drops all Raft messages that are addressed to the
// specified rangeID, but lets all other messages through.
type unreliableRaftHandler struct {
name string
rangeID roachpb.RangeID
kvserver.RaftMessageHandler
unreliableRaftHandlerFuncs
}
func (h *unreliableRaftHandler) HandleRaftRequest(
ctx context.Context,
req *kvserver.RaftMessageRequest,
respStream kvserver.RaftMessageResponseStream,
) *roachpb.Error {
if len(req.Heartbeats)+len(req.HeartbeatResps) > 0 {
reqCpy := *req
req = &reqCpy
req.Heartbeats = h.filterHeartbeats(req.Heartbeats)
req.HeartbeatResps = h.filterHeartbeats(req.HeartbeatResps)
if len(req.Heartbeats)+len(req.HeartbeatResps) == 0 {
// Entirely filtered.
return nil
}
} else if req.RangeID == h.rangeID {
if h.dropReq == nil || h.dropReq(req) {
var prefix string
if h.name != "" {
prefix = fmt.Sprintf("[%s] ", h.name)
}
log.Infof(
ctx,
"%sdropping r%d Raft message %s",
prefix,
req.RangeID,
raft.DescribeMessage(req.Message, func([]byte) string {
return "<omitted>"
}),
)
return nil
}
}
return h.RaftMessageHandler.HandleRaftRequest(ctx, req, respStream)
}
func (h *unreliableRaftHandler) filterHeartbeats(
hbs []kvserver.RaftHeartbeat,
) []kvserver.RaftHeartbeat {
if len(hbs) == 0 {
return hbs
}
var cpy []kvserver.RaftHeartbeat
for i := range hbs {
hb := &hbs[i]
if hb.RangeID != h.rangeID || (h.dropHB != nil && !h.dropHB(hb)) {
cpy = append(cpy, *hb)
}
}
return cpy
}
func (h *unreliableRaftHandler) HandleRaftResponse(
ctx context.Context, resp *kvserver.RaftMessageResponse,
) error {
if resp.RangeID == h.rangeID {
if h.dropResp == nil || h.dropResp(resp) {
return nil
}
}
return h.RaftMessageHandler.HandleRaftResponse(ctx, resp)
}
func (h *unreliableRaftHandler) HandleSnapshot(
header *kvserver.SnapshotRequest_Header, respStream kvserver.SnapshotResponseStream,
) error {
if header.RaftMessageRequest.RangeID == h.rangeID && h.snapErr != nil {
if err := h.snapErr(header); err != nil {
return err
}
}
return h.RaftMessageHandler.HandleSnapshot(header, respStream)
}
// testClusterStoreRaftMessageHandler exists to allows a store to be stopped and
// restarted while maintaining a partition using an unreliableRaftHandler.
type testClusterStoreRaftMessageHandler struct {
tc *testcluster.TestCluster
storeIdx int
}
func (h *testClusterStoreRaftMessageHandler) getStore() (*kvserver.Store, error) {
ts := h.tc.Servers[h.storeIdx]
return ts.Stores().GetStore(ts.GetFirstStoreID())
}
func (h *testClusterStoreRaftMessageHandler) HandleRaftRequest(
ctx context.Context,
req *kvserver.RaftMessageRequest,
respStream kvserver.RaftMessageResponseStream,
) *roachpb.Error {
store, err := h.getStore()
if err != nil {
return roachpb.NewError(err)
}
return store.HandleRaftRequest(ctx, req, respStream)
}
func (h *testClusterStoreRaftMessageHandler) HandleRaftResponse(
ctx context.Context, resp *kvserver.RaftMessageResponse,
) error {
store, err := h.getStore()
if err != nil {
return err
}
return store.HandleRaftResponse(ctx, resp)
}
func (h *testClusterStoreRaftMessageHandler) HandleSnapshot(
header *kvserver.SnapshotRequest_Header, respStream kvserver.SnapshotResponseStream,
) error {
store, err := h.getStore()
if err != nil {
return err
}
return store.HandleSnapshot(header, respStream)
}
// testClusterPartitionedRange is a convenient abstraction to create a range on a node
// in a multiTestContext which can be partitioned and unpartitioned.
type testClusterPartitionedRange struct {
rangeID roachpb.RangeID
mu struct {
syncutil.RWMutex
partitionedNode int
partitioned bool
partitionedReplicas map[roachpb.ReplicaID]bool
}
handlers []kvserver.RaftMessageHandler
}
// setupPartitionedRange sets up an testClusterPartitionedRange for the provided
// TestCluster, rangeID, and node index in the TestCluster. The range is
// initially not partitioned.
//
// We're going to set up the cluster with partitioning so that we can
// partition node p from the others. We do this by installing
// unreliableRaftHandler listeners on all three Stores which we can enable
// and disable with an atomic. The handler on the partitioned store filters
// out all messages while the handler on the other two stores only filters
// out messages from the partitioned store. When activated the configuration
// looks like:
//
// [p]
// x x
// / \
// x x
// [*]<---->[*]
//
// The activated argument controls whether the partition is activated when this
// function returns.
//
// If replicaID is zero then it is resolved by looking up the replica for the
// partitionedNode of from the current range descriptor of rangeID.
func setupPartitionedRange(
tc *testcluster.TestCluster,
rangeID roachpb.RangeID,
replicaID roachpb.ReplicaID,
partitionedNode int,
activated bool,
funcs unreliableRaftHandlerFuncs,
) (*testClusterPartitionedRange, error) {
handlers := make([]kvserver.RaftMessageHandler, 0, len(tc.Servers))
for i := range tc.Servers {
handlers = append(handlers, &testClusterStoreRaftMessageHandler{
tc: tc,
storeIdx: i,
})
}
return setupPartitionedRangeWithHandlers(tc, rangeID, replicaID, partitionedNode, activated, handlers, funcs)
}
func setupPartitionedRangeWithHandlers(
tc *testcluster.TestCluster,
rangeID roachpb.RangeID,
replicaID roachpb.ReplicaID,
partitionedNode int,
activated bool,
handlers []kvserver.RaftMessageHandler,
funcs unreliableRaftHandlerFuncs,
) (*testClusterPartitionedRange, error) {
pr := &testClusterPartitionedRange{
rangeID: rangeID,
handlers: make([]kvserver.RaftMessageHandler, 0, len(handlers)),
}
pr.mu.partitioned = activated
pr.mu.partitionedNode = partitionedNode
if replicaID == 0 {
ts := tc.Servers[partitionedNode]
store, err := ts.Stores().GetStore(ts.GetFirstStoreID())
if err != nil {
return nil, err
}
partRepl, err := store.GetReplica(rangeID)
if err != nil {
return nil, err
}
partReplDesc, err := partRepl.GetReplicaDescriptor()
if err != nil {
return nil, err
}
replicaID = partReplDesc.ReplicaID
}
pr.mu.partitionedReplicas = map[roachpb.ReplicaID]bool{
replicaID: true,
}
for i := range tc.Servers {
s := i
h := &unreliableRaftHandler{
rangeID: rangeID,
RaftMessageHandler: handlers[s],
unreliableRaftHandlerFuncs: funcs,
}
// Only filter messages from the partitioned store on the other
// two stores.
if h.dropReq == nil {
h.dropReq = func(req *kvserver.RaftMessageRequest) bool {
pr.mu.RLock()
defer pr.mu.RUnlock()
return pr.mu.partitioned &&
(s == pr.mu.partitionedNode ||
req.FromReplica.StoreID == roachpb.StoreID(pr.mu.partitionedNode)+1)
}
}
if h.dropHB == nil {
h.dropHB = func(hb *kvserver.RaftHeartbeat) bool {
pr.mu.RLock()
defer pr.mu.RUnlock()
if !pr.mu.partitioned {
return false
}
if s == partitionedNode {
return true
}
return pr.mu.partitionedReplicas[hb.FromReplicaID]
}
}
if h.snapErr == nil {
h.snapErr = func(header *kvserver.SnapshotRequest_Header) error {
pr.mu.RLock()
defer pr.mu.RUnlock()
if !pr.mu.partitioned {
return nil
}
if pr.mu.partitionedReplicas[header.RaftMessageRequest.ToReplica.ReplicaID] {
return errors.New("partitioned")
}
return nil
}
}
pr.handlers = append(pr.handlers, h)
tc.Servers[s].RaftTransport().Listen(tc.Target(s).StoreID, h)
}
return pr, nil
}
func (pr *testClusterPartitionedRange) deactivate() { pr.set(false) }
func (pr *testClusterPartitionedRange) activate() { pr.set(true) }
func (pr *testClusterPartitionedRange) set(active bool) {
pr.mu.Lock()
defer pr.mu.Unlock()
pr.mu.partitioned = active
}
func (pr *testClusterPartitionedRange) addReplica(replicaID roachpb.ReplicaID) {
pr.mu.Lock()
defer pr.mu.Unlock()
pr.mu.partitionedReplicas[replicaID] = true
}
func (pr *testClusterPartitionedRange) extend(
tc *testcluster.TestCluster,
rangeID roachpb.RangeID,
replicaID roachpb.ReplicaID,
partitionedNode int,
activated bool,
funcs unreliableRaftHandlerFuncs,
) (*testClusterPartitionedRange, error) {
return setupPartitionedRangeWithHandlers(tc, rangeID, replicaID, partitionedNode, activated, pr.handlers, funcs)
}
|
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
//
package api
import (
"net/url"
"strconv"
"github.com/mattermost/mattermost-cloud/model"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func logSecurityLockConflict(resourceType string, logger logrus.FieldLogger) {
logger.WithField("api-security-lock-conflict", resourceType).Warn("API security lock conflict detected")
}
func parseString(u *url.URL, name string, defaultValue string) string {
valueStr := u.Query().Get(name)
if valueStr == "" {
return defaultValue
}
return valueStr
}
func parseInt(u *url.URL, name string, defaultValue int) (int, error) {
valueStr := u.Query().Get(name)
if valueStr == "" {
return defaultValue, nil
}
value, err := strconv.Atoi(valueStr)
if err != nil {
return 0, errors.Wrapf(err, "failed to parse %s as integer", name)
}
return value, nil
}
func parseBool(u *url.URL, name string, defaultValue bool) (bool, error) {
valueStr := u.Query().Get(name)
if valueStr == "" {
return defaultValue, nil
}
value, err := strconv.ParseBool(valueStr)
if err != nil {
return false, errors.Wrapf(err, "failed to parse %s as boolean", name)
}
return value, nil
}
func parsePaging(u *url.URL) (model.Paging, error) {
page, err := parseInt(u, "page", 0)
if err != nil {
return model.Paging{}, err
}
perPage, err := parseInt(u, "per_page", 100)
if err != nil {
return model.Paging{}, err
}
includeDeleted, err := parseBool(u, "include_deleted", false)
if err != nil {
return model.Paging{}, err
}
return model.Paging{
Page: page,
PerPage: perPage,
IncludeDeleted: includeDeleted,
}, nil
}
func parseGroupConfig(u *url.URL) (bool, bool, error) {
includeGroupConfig, err := parseBool(u, "include_group_config", true)
if err != nil {
return false, false, err
}
includeGroupConfigOverrides, err := parseBool(u, "include_group_config_overrides", true)
if err != nil {
return false, false, err
}
return includeGroupConfig, includeGroupConfigOverrides, nil
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package kvcoord
import "github.com/cockroachdb/cockroach/pkg/base"
// ClientTestingKnobs contains testing options that dictate the behavior
// of the key-value client.
type ClientTestingKnobs struct {
// The RPC dispatcher. Defaults to grpc but can be changed here for
// testing purposes.
TransportFactory TransportFactory
// The maximum number of times a txn will attempt to refresh its
// spans for a single transactional batch.
// 0 means use a default. -1 means disable refresh.
MaxTxnRefreshAttempts int
// CondenseRefreshSpansFilter, if set, is called when the span refresher is
// considering condensing the refresh spans. If it returns false, condensing
// will not be attempted and the span refresher will behave as if condensing
// failed to save enough memory.
CondenseRefreshSpansFilter func() bool
// LatencyFunc, if set, overrides RPCContext.RemoteClocks.Latency as the
// function used by the DistSender to order replicas for follower reads.
LatencyFunc LatencyFunc
// If set, the DistSender will try the replicas in the order they appear in
// the descriptor, instead of trying to reorder them by latency.
DontReorderReplicas bool
}
var _ base.ModuleTestingKnobs = &ClientTestingKnobs{}
// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.
func (*ClientTestingKnobs) ModuleTestingKnobs() {}
|
package models
import (
"errors"
"net/http"
"strconv"
"github.com/labstack/echo"
)
// Product structure
type Product struct {
ID string `json:"id"`
Name string `json:"name"`
ListOrder int64 `json:"list_order"`
OptionIDs []string `json:"option_ids"`
CategoryID string `json:"category_id"`
Price float64 `json:"price"`
// CreatedAt time.Time `json:"-"`
// UpdatedAt time.Time `json:"-"`
// DeletedAt time.Time `json:"-"`
}
type Products []Product
// ListProducts
func ListProducts(categoryID string) (*Products, error) {
var newProductSlice Products
for _, product := range ProductSlice {
if product.CategoryID == categoryID {
newProductSlice = append(newProductSlice, product)
}
}
return &newProductSlice, nil
}
// GetProduct
func GetProduct(productID, categoryID string) (*Product, error) {
for _, product := range ProductSlice {
if productID == product.ID {
return &product, nil
}
}
return nil, errors.New("not found")
}
// CreateProduct
func (product *Product) Create() (*Product, error) {
product.ID = strconv.Itoa(len(ProductSlice) + 1)
ProductSlice = append(ProductSlice, *product)
return product, nil
}
// DeleteProduct
func (product *Product) Delete() error {
for i, produc := range ProductSlice {
if product.ID == produc.ID {
ProductSlice = append(ProductSlice[:i], ProductSlice[i+1:]...)
return nil
}
}
return errors.New("Silinemedi")
}
// UpdateProduct
func (product *Product) Update() (*Product, error) {
for i, produc := range ProductSlice {
if product.ID == produc.ID {
ProductSlice = append(ProductSlice[:i], append([]Product{*product}, ProductSlice[i+1:]...)...)
return product, nil
}
}
return nil, echo.NewHTTPError(http.StatusBadRequest)
}
|
package main
import (
"math"
)
func pp(n int) (a int, b int) {
var fixn int = n
var num, currentNear, currentNearTemp float64
var maxNo int
var count int
var flag bool = false
for i := 2; i <= int(math.Floor(math.Log2(float64(n)))); i++ {
num = math.Floor(math.Pow(float64(n), float64(1.0/float64(i))))
currentNearTemp = float64(fixn) - math.Pow(num, float64(i))
k := fixn / 2
j := math.Pow(num, float64(i))
if j <= float64(k) {
m := math.Pow(float64(i), float64(i+1))
if m < float64(fixn) {
currentNearTemp = float64(fixn) - m
}
}
if num == 1 {
i = 20
} else {
if currentNearTemp != 0 {
if flag == false {
flag = true
currentNear = currentNearTemp
}
if currentNearTemp < currentNear {
currentNear = currentNearTemp
maxNo = int(fixn) - int(currentNear)
count = 1
} else if currentNearTemp == currentNear {
currentNear = currentNearTemp
maxNo = int(fixn) - int(currentNear)
count++
}
} else {
n--
i--
}
}
}
return maxNo, count
}
|
package diff
import (
"testing"
"github.com/containerum/kube-client/pkg/model"
)
func TestDiff(t *testing.T) {
var oldDepl = model.Deployment{
Containers: []model.Container{
{
Name: "gateway",
Image: "nginx",
},
{
Name: "feed",
Image: "wordpress",
},
},
}
var newDepl = model.Deployment{
Containers: []model.Container{
{
Name: "gateway",
Image: "caddy",
},
{
Name: "storage",
Image: "mongo",
},
{
Name: "ai",
Image: "pytnon",
},
{
Name: "blog",
Image: "box",
},
},
}
t.Log("\n", Diff(newDepl, oldDepl))
}
|
package lc
// Time: O(n)
// Benchmark: 4ms 3.1mb | 89% 13%
func minTimeToVisitAllPoints(points [][]int) int {
max := func(x, y int) int {
if x > y {
return x
}
return y
}
abs := func(x int) int {
if x < 0 {
return x * -1
}
return x
}
var dist int
for i := 0; i < len(points)-1; i++ {
x1 := abs(points[i+1][0] - points[i][0])
y1 := abs(points[i+1][1] - points[i][1])
dist += max(x1, y1)
}
return dist
}
|
package clop
import (
"bytes"
"fmt"
"go/format"
"strings"
)
func genStructName(k string) string {
return k + "AutoGen"
}
func genVarName(varName string) string {
return varName + "Var"
}
// 根据解析的函数名和参数, 生成结构体
func genStructBytes(p *ParseFlag) ([]byte, error) {
var code bytes.Buffer
var allCode bytes.Buffer
for k, funcAndArgs := range p.funcAndArgs {
v := funcAndArgs
if !v.haveParseFunc {
continue
}
if p.haveImportPath {
code.WriteString(`
package main
import (
"github.com/guonaihong/clop"
)
`)
}
if !p.haveStruct {
continue
}
code.WriteString(fmt.Sprintf("type %s struct{", genStructName(k)))
for _, arg := range v.args {
// 选项名是比较重要的, 没有就不生成
if len(arg.optName) == 0 || len(arg.varName) == 0 {
continue
}
// 写入字段名和类型名
varName := arg.varName
if varName[0] >= 'a' && varName[0] <= 'z' {
varName = string(varName[0]-'a'+'A') + varName[1:]
}
code.WriteString(fmt.Sprintf("%s %s", varName, arg.typeName))
var clopTag bytes.Buffer
// 写入选项名
if len(arg.optName) > 0 {
clopTag.WriteString("`clop:\"")
numMinuses := "-"
if len(arg.optName) > 1 {
numMinuses = "--"
}
clopTag.WriteString(fmt.Sprintf("%s%s\" ", numMinuses, arg.optName))
}
// 写入默认值
if len(arg.defVal) > 0 {
clopTag.WriteString(fmt.Sprintf("default:\"%s\" ", arg.defVal))
}
// 写入帮助信息
if len(arg.usage) > 0 {
clopTag.WriteString(fmt.Sprintf("usage:\"%s\" `\n", arg.usage))
}
code.WriteString(clopTag.String())
}
code.WriteString("}")
if p.haveMain {
varName := strings.ToLower(k)
code.WriteString(fmt.Sprintf(`
func main() {
var %s %s
clop.Bind(&%s)
}`, genVarName(varName), genStructName(k), genVarName(varName)))
}
fmtCode, err := format.Source(code.Bytes())
if err != nil {
return nil, err
}
allCode.Write(fmtCode)
code.Reset()
}
return allCode.Bytes(), nil
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package policy
import (
"context"
"strings"
"time"
"chromiumos/tast/common/pci"
"chromiumos/tast/common/policy"
"chromiumos/tast/common/policy/fakedms"
"chromiumos/tast/local/arc"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/policyutil/fixtures"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: ArcBackupRestoreServiceEnabled,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test the behavior of ArcBackupRestoreServiceEnabled policy: check the Backup Manager state after setting the policy",
Contacts: []string{
"gabormagda@google.com", // Test author
},
SoftwareDeps: []string{"chrome"},
// TODO(http://b/172073846): Test is disabled until it can be fixed
// Attr: []string{"group:mainline", "informational"},
Params: []testing.Param{{
ExtraSoftwareDeps: []string{"android_p"},
}, {
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
}},
Timeout: 8 * time.Minute, // There is a need to start Chrome 4 times.
SearchFlags: []*testing.StringPair{
pci.SearchFlag(&policy.ArcBackupRestoreServiceEnabled{}, pci.VerifiedFunctionalityOS),
},
})
}
// ArcBackupRestoreServiceEnabled tests the ArcBackupRestoreServiceEnabled policy.
func ArcBackupRestoreServiceEnabled(ctx context.Context, s *testing.State) {
// Start FakeDMS.
fdms, err := fakedms.New(ctx, s.OutDir())
if err != nil {
s.Fatal("Failed to start FakeDMS: ", err)
}
defer fdms.Stop(ctx)
for _, param := range []struct {
name string
wantEnabled bool
value *policy.ArcBackupRestoreServiceEnabled
}{
{
name: "disabled",
wantEnabled: false,
value: &policy.ArcBackupRestoreServiceEnabled{Val: 0},
},
{
name: "user_decides",
wantEnabled: false,
value: &policy.ArcBackupRestoreServiceEnabled{Val: 1},
},
{
name: "enabled",
wantEnabled: true,
value: &policy.ArcBackupRestoreServiceEnabled{Val: 2},
},
{
name: "unset",
wantEnabled: false,
value: &policy.ArcBackupRestoreServiceEnabled{Stat: policy.StatusUnset},
},
} {
s.Run(ctx, param.name, func(ctx context.Context, s *testing.State) {
// Update the policy blob.
pb := policy.NewBlob()
pb.AddPolicies([]policy.Policy{param.value})
if err := fdms.WritePolicyBlob(pb); err != nil {
s.Fatal("Failed to write policies to FakeDMS: ", err)
}
// Start a Chrome instance that will fetch policies from the FakeDMS.
// This policy must be updated before starting Chrome.
cr, err := chrome.New(ctx,
chrome.FakeLogin(chrome.Creds{User: fixtures.Username, Pass: fixtures.Password}),
chrome.DMSPolicy(fdms.URL),
chrome.ARCEnabled())
if err != nil {
s.Fatal("Chrome login failed: ", err)
}
defer cr.Close(ctx)
a, err := arc.New(ctx, s.OutDir())
if err != nil {
s.Fatal("Failed to start ARC: ", err)
}
defer a.Close(ctx)
// Get ARC Backup Manager state.
var enabled bool
if output, err := a.Command(ctx, "bmgr", "enabled").Output(); err != nil {
s.Fatal("Failed to run adb command: ", err)
} else if strings.Contains(string(output), "enabled") {
enabled = true
} else if strings.Contains(string(output), "disabled") {
enabled = false
} else {
s.Fatalf("Invalid adb response: %q", string(output))
}
if enabled != param.wantEnabled {
s.Errorf("Unexpected ARC backup restore service state: got %t; want %t", enabled, param.wantEnabled)
}
})
}
}
|
package commands
import (
"github.com/brooklyncentral/brooklyn-cli/net"
)
type CatalogEntity struct {
network *net.Network
}
func NewCatalogEntity(network *net.Network) (cmd *CatalogEntity) {
cmd = new(CatalogEntity)
cmd.network = network
return
}
|
package weather
import (
"encoding/json"
"errors"
"testing"
"github.com/stretchr/testify/assert"
)
type weatherProvider struct{}
func (w weatherProvider) GetForecastData(country, state, city string, forecastDays uint, client httpClient) (map[string]interface{}, error) {
data, err := getProviderTestDataJSON(true)
if err != nil {
return nil, err
}
if country == "chile" {
data, err = getProviderTestDataJSON(false)
if err != nil {
return nil, err
}
} else if country == "uruguay" {
return nil, errors.New("failed request")
}
return data, nil
}
func (w weatherProvider) GetAdapter() adapter {
return weatherBit
}
func getProviderTestDataJSON(proper bool) (map[string]interface{}, error) {
d := wbTestData
if !proper {
d = wbWrongTestData
}
var res map[string]interface{}
if err := json.Unmarshal([]byte(d), &res); err != nil {
return nil, err
}
return res, nil
}
func TestWService_GetForecast(t *testing.T) {
service, err := NewWeatherService(&weatherProvider{})
forecast, err := service.GetForecast("argentina", "cordoba", "cordoba", 2)
if err != nil {
t.Errorf("unexpected error, got: %v", err)
}
expected := &Forecast{
DateTempMap: map[int64]*DailyForecast{
1491004800: {
MaxTemp: 30,
MinTemp: 26,
},
1491091200: {
MaxTemp: 32,
MinTemp: 21,
},
},
}
assert.Equal(t, expected, forecast)
}
func TestWService_GetForecast_MissingTempFields(t *testing.T) {
service, err := NewWeatherService(&weatherProvider{})
forecast, err := service.GetForecast("chile", "some", "place", 2)
if err == nil {
t.Errorf("expected error, got: %v", forecast)
}
}
func TestWService_GetForecast_InvalidStatusCode(t *testing.T) {
service, err := NewWeatherService(&weatherProvider{})
forecast, err := service.GetForecast("uruguay", "some", "place", 2)
if err == nil {
t.Errorf("expected error, got: %v", forecast)
}
}
|
package mondohttp
import (
"net/http"
"net/url"
"strings"
)
// NewAccountsRequest creates a request for a listing of the user's accounts.
// https://getmondo.co.uk/docs/#list-accounts.
func NewAccountsRequest(accessToken string) *http.Request {
req, _ := http.NewRequest("GET", ProductionAPI+"accounts", nil)
req.Header.Set(auth(accessToken))
return req
}
// NewBalanceRequest creates a request for an account's current balance.
// https://getmondo.co.uk/docs/#read-balance.
func NewBalanceRequest(accessToken, accountID string) *http.Request {
req, _ := http.NewRequest("GET", ProductionAPI+"balance?account_id="+url.QueryEscape(accountID), nil)
req.Header.Set(auth(accessToken))
return req
}
// NewTransactionRequest creates a request for a single transactions.
// https://getmondo.co.uk/docs/#retrieve-transaction
func NewTransactionRequest(accessToken, transactionID string, expandMerchants bool) *http.Request {
var query string
if expandMerchants {
query = "?expand%5B%5D=merchant"
}
req, _ := http.NewRequest("GET", ProductionAPI+"transactions/"+transactionID+query, nil)
req.Header.Set(auth(accessToken))
return req
}
// NewTransactionsRequest creates a request for a series of account transactions.
// https://getmondo.co.uk/docs/#list-transactions
func NewTransactionsRequest(accessToken, accountID string, expandMerchants bool, since, before string, limit int) *http.Request {
query := &url.Values{
"account_id": {accountID},
}
appendPaginationParams(query, since, before, limit)
if expandMerchants {
query.Add("expand[]", "merchant")
}
req, _ := http.NewRequest("GET", ProductionAPI+"transactions?"+query.Encode(), nil)
req.Header.Set(auth(accessToken))
return req
}
// NewAnnotateTransactionRequest creates a request for updating annotations on a transactions.
// https://getmondo.co.uk/docs/#annotate-transaction
func NewAnnotateTransactionRequest(accessToken, transactionID string, metadata map[string]string) *http.Request {
body := &url.Values{}
appendQueryMap(body, "metadata[", "]", metadata)
req, _ := http.NewRequest("PATCH", ProductionAPI+"transactions/"+transactionID, strings.NewReader(body.Encode()))
req.Header.Set(formContentType())
req.Header.Set(auth(accessToken))
return req
}
// NewCreateFeedItemRequest creates a request for adding a feed item to an account.
// https://getmondo.co.uk/docs/#create-feed-item
func NewCreateFeedItemRequest(accessToken, accountID, itemType, itemURL string, params map[string]string) *http.Request {
body := &url.Values{
"account_id": {accountID},
"type": {itemType},
}
if itemURL != "" {
body.Set("url", itemURL)
}
appendQueryMap(body, "params[", "]", params)
req, _ := http.NewRequest("POST", ProductionAPI+"feed", strings.NewReader(body.Encode()))
req.Header.Set(formContentType())
req.Header.Set(auth(accessToken))
return req
}
// NewCreateURLFeedItemRequest is shorthand for creating a request to add a
// basic account feed item with only a URL, title, and image.
// https://getmondo.co.uk/docs/#create-feed-item
func NewCreateURLFeedItemRequest(accessToken, accountID, url, title, imageURL string) *http.Request {
return NewCreateFeedItemRequest(accessToken, accountID, "basic", url, map[string]string{"image_url": imageURL, "title": title})
}
// NewCreateBasicFeedItemRequest is shorthand for creating a request to add a
// basic account feed item.
// https://getmondo.co.uk/docs/#create-feed-item
func NewCreateBasicFeedItemRequest(accessToken, accountID, url, title, imageURL, body, backgroundColor, titleColor, bodyColor string) *http.Request {
params := map[string]string{
"title": title,
"image_url": imageURL,
}
if body != "" {
params["body"] = body
}
if backgroundColor != "" {
params["background_color"] = backgroundColor
}
if titleColor != "" {
params["title_color"] = titleColor
}
if bodyColor != "" {
params["body_color"] = bodyColor
}
return NewCreateFeedItemRequest(accessToken, accountID, "basic", url, params)
}
// TODO: https://getmondo.co.uk/docs/#webhooks
// TODO: https://getmondo.co.uk/docs/#attachments
|
package main
import "fmt"
func main() {
}
func isPalindrome(x int) bool {
sX := fmt.Sprintf("%d", x)
for i := 0; i < len(sX)/2; i++ {
if sX[i] != sX[len(sX)-1-i] {
return false
}
}
return true
}
|
package server
import (
"errors"
"github.com/asaskevich/govalidator"
"github.com/sergeychur/avito_auto/internal/models"
"net/http"
"time"
)
type Validator struct {
TimeOut int
}
func NewValidator(timeOut int) *Validator {
validator := new(Validator)
validator.TimeOut = timeOut
return validator
}
func (v *Validator)ValidateLink(/*timeOut int,*/ link models.Link) error {
err := ValidateFormat(link.RealURL)
if err != nil {
return err
}
err = ValidateURLExists(time.Duration(v.TimeOut), link.RealURL)
if err != nil {
return err
}
return nil
}
func ValidateURLExists(timeOut time.Duration, url string) error {
timeout := time.Duration(timeOut * time.Second)
client := http.Client{
Timeout: timeout,
}
_, err := client.Head(url)
if err != nil {
return err
}
return nil
}
func ValidateFormat(link string) error {
if govalidator.IsURL(link) {
return nil
}
return errors.New("not url")
} |
package authlete
import (
"fmt"
"net/http"
"time"
"github.com/dodosuke/authlete-go/pkg/util"
)
// AuthorizationRequest is a request to Authlete's /auth/authorization API.
//
// OAuth 2.0 authorization request parameters which are the
// request parameters that the OAuth 2.0 authorization endpoint
// of the service implementation received from the client application.
type AuthorizationRequest struct {
// The value of Parameter is (1) the entire query string when
// the HTTP method of the request from the client application is GET
// or (2) the entire entity body (which is formatted in application/x-www-form-urlencoded
// when the HTTP method of the request from the client application is POST.
Parameters string `json:"parameters"`
}
// SetParameters set a value of Parameters in AuthorizationRequest.
func (req *AuthorizationRequest) SetParameters(parameters string) {
req.Parameters = parameters
}
// String returns the content of the AuthorizationRequest
func (req *AuthorizationRequest) String() string {
return fmt.Sprintf("%#v", req)
}
// Process sends the request to Authlete's /auth/authorization API and returns a response.
func (req *AuthorizationRequest) Process(api *API, r *http.Request, h AuthorizationRequestHandler) (*util.Response, error) {
// Get a response from Authlete API server
res, err := api.Authorization(req)
if err != nil {
return nil, err
}
// Follow the action in the response
action, content := res.Action, res.ResponseContent
switch action {
case actionInternalServerError:
// 500 Internal Server Error
return util.InternalServerError(content), nil
case actionBadRequest:
// 400 Bad Request
return util.BadRequest(content), nil
case actionLocation:
// 302 Found
return util.Location(content), nil
case actionForm:
// 200 OK
return util.Form(content), nil
case actionInteraction:
// Process the authorization request with user interaction.
return handleInteraction(res, h)
case actionNoInteraction:
// Process the authorization request without user interaction.
// The flow reaches here only when the authorization request
// contains 'prompt=none'.
return handleNoInteraction(api, res, h)
default:
return nil, fmt.Errorf("unexpected action %s at /api/auth/authorization", action)
}
}
// AuthorizationResponse is a response from Authlete's /auth/authorization API.
// You can use the information to generate an authorization page.
type AuthorizationResponse struct {
//ResultCode string `json:"resultCode"`
//ResultMessage string `json:"resultMessage"`
Action string `json:"action"`
Service Service `json:"service"`
Client Client `json:"client"`
Display string `json:"display"`
MaxAge int64 `json:"maxAge"`
Scopes []Scope `json:"scopes"`
UILocales []string `json:"uiLocales"`
ClaimsLocales []string `json:"claimsLocales"`
Claims []string `json:"claims"`
ACREssential bool `json:"acrEssential"`
ClientIDAliasUsed bool `json:"clientIdAliasUsed"`
ACRs []string `json:"acrs"`
Subject string `json:"subject"`
LoginHint string `json:"loginHint"`
Prompts []string `json:"prompts"`
LowestPrompt string `json:"lowestPromt"`
RequestObjectPayload string `json:"requestObjectPayload"`
IDTokenClaims string `json:"idTokenClaims"`
UserInfoClaims string `json:"userinfoClaims"`
ResponseContent string `json:"responseContent"`
Ticket string `json:"ticket"`
}
// String returns a stringified AuthorizationResponse.
func (res *AuthorizationResponse) String() string {
return fmt.Sprintf("%#v", res)
}
// AuthorizationRequestHandler is to generate authorization page.
type AuthorizationRequestHandler interface {
GenerateAuthorizationPage(res *AuthorizationResponse) (*util.Response, error)
IsUserAuthenticated() bool
GetUserAuthenticatedAt() int64
GetUserSubject() string
GetACR() string
GetProperties() []Property
GetScopes() []string
}
func handleInteraction(res *AuthorizationResponse, h AuthorizationRequestHandler) (*util.Response, error) {
return h.GenerateAuthorizationPage(res)
}
func handleNoInteraction(api *API, res *AuthorizationResponse, h AuthorizationRequestHandler) (*util.Response, error) {
// Check 1: End-user Authentication
if !h.IsUserAuthenticated() {
// A user must have logged in.
req := &AuthorizationFailRequest{Ticket: res.Ticket, Reason: "NOT_LOGGED_IN"}
return req.Process(api)
}
// Check 2: Max Age
authTime := h.GetUserAuthenticatedAt()
if res.MaxAge != 0 && time.Now().Unix() > res.MaxAge+authTime {
// The maximum authentication age has elapsed.
req := &AuthorizationFailRequest{Ticket: res.Ticket, Reason: "EXCEEDS_MAX_AGE"}
return req.Process(api)
}
// Chck 3: Subject
subject := h.GetUserSubject()
if res.Subject != "" && res.Subject != subject {
// The maximum authentication age has elapsed.
req := &AuthorizationFailRequest{Ticket: res.Ticket, Reason: "DIFFERENT_SUBJECT"}
return req.Process(api)
}
// Chck 4:
acr := h.GetACR()
if !checkACR(res, acr) {
// None of the requested ACRs is satisfied.
req := &AuthorizationFailRequest{Ticket: res.Ticket, Reason: "ACR_NOT_SATISFIED"}
return req.Process(api)
}
// Issue
req := &AuthorizationIssueRequest{
Ticket: res.Ticket,
Subject: subject,
AuthTime: authTime,
ACR: acr,
Properties: h.GetProperties(),
Scopes: h.GetScopes(),
}
return req.Process(api)
}
func checkACR(res *AuthorizationResponse, acr string) bool {
requestedACRs := res.ACRs
// If no ACR is requested or no ACR is required
if requestedACRs == nil || len(requestedACRs) == 0 || !res.ACREssential {
return true
}
for _, requestedACR := range requestedACRs {
if acr == requestedACR {
// OK. The ACR satisfied when the current user was
// authenticated matches one of the requested ACRs.
return true
}
}
return false
}
|
package aggregatedprocessor
import (
"context"
"fmt"
"strings"
"sync"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/config/configmodels"
"go.opentelemetry.io/collector/consumer"
"go.opentelemetry.io/collector/processor/processorhelper"
"go.uber.org/zap"
)
type processorSettings struct {
*processor
nextConsumer consumer.TracesConsumer
params component.ProcessorCreateParams
pipeline []string
factories map[configmodels.Type]component.ProcessorFactory
config *config
}
// Aggregator is in charge of the lifecycle of the aggregated processors
type Aggregator struct {
logger *zap.Logger
aggregatedprocessors []processorSettings
processorFactories map[configmodels.Type]component.ProcessorFactory
configUpdate chan configmodels.Processors
}
// New creates a new aggregator
func New(fs map[configmodels.Type]component.ProcessorFactory) *Aggregator {
a := &Aggregator{
processorFactories: fs,
configUpdate: make(chan configmodels.Processors),
}
go a.listenToConfigUpdate()
return a
}
func (p *Aggregator) listenToConfigUpdate() {
for {
c := <-p.configUpdate
if len(c) == 0 {
// channel was closed.
return
}
p.logger.Debug("Received a configuration update")
err := p.ReloadProcessors(context.Background(), c)
if err != nil {
p.logger.Sugar().Errorf("Failed to update configuration for processors: %v", err)
}
}
}
// ConfigUpdate returns the channel to send the config updates
func (p *Aggregator) ConfigUpdate() chan configmodels.Processors {
return p.configUpdate
}
// CreateTraceProcessor creates a new aggregated processor as part of the factory
func (p *Aggregator) CreateTraceProcessor(
ctx context.Context,
params component.ProcessorCreateParams,
cfg configmodels.Processor,
nextConsumer consumer.TracesConsumer,
) (component.TracesProcessor, error) {
aggregatedCfg := cfg.(*config)
subprocessors, err := p.buildSubprocessors(ctx, params, aggregatedCfg.subprocessorsConfigs, nextConsumer)
if err != nil {
return nil, err
}
ap := &processor{
subprocessors: subprocessors,
firstSubprocessor: subprocessors[0],
logger: params.Logger,
mx: &sync.Mutex{},
}
if p.logger == nil {
p.logger = params.Logger
}
p.aggregatedprocessors = append(p.aggregatedprocessors, processorSettings{
processor: ap,
nextConsumer: nextConsumer,
params: params,
pipeline: aggregatedCfg.pipeline,
config: aggregatedCfg,
})
return ap, nil
}
func (p *Aggregator) buildSubprocessors(
ctx context.Context,
params component.ProcessorCreateParams,
cfgs []subprocessorConfig,
nextConsumer consumer.TracesConsumer,
) ([]component.TracesProcessor, error) {
var (
nc = nextConsumer
err error
)
subprocessors := make([]component.TracesProcessor, len(cfgs))
for i := len(cfgs) - 1; i >= 0; i-- {
subCfg := cfgs[i]
factory := p.processorFactories[configmodels.Type(subCfg.factoryName)]
nc, err = factory.CreateTracesProcessor(ctx, params, subCfg.config, nc)
if err != nil {
return nil, fmt.Errorf("failed to create subprocessor: %v", err)
}
subprocessors[i] = nc.(component.TracesProcessor)
}
return subprocessors, nil
}
// Factory creates a processor factory for an aggregated processor
func (p *Aggregator) Factory() component.ProcessorFactory {
return processorhelper.NewFactory(
Type,
CreateDefaultConfig,
processorhelper.WithTraces(p.CreateTraceProcessor),
)
}
// Type returns the for an aggregated processor
func (p *Aggregator) Type() configmodels.Type {
return Type
}
// AggregateConfig aggregates the config for all the processor in a pipeline to be
// passed to the aggregated processor factory
func (p *Aggregator) AggregateConfig(
pipelineProcessors []string,
subprocessorsConfig configmodels.Processors,
) configmodels.Processor {
cfg := &config{
ProcessorSettings: configmodels.ProcessorSettings{
TypeVal: Type,
NameVal: Type,
},
pipeline: pipelineProcessors,
subprocessorsConfigs: make([]subprocessorConfig, len(pipelineProcessors)),
}
for i, p := range pipelineProcessors {
cfg.subprocessorsConfigs[i] = subprocessorConfig{
name: p,
factoryName: strings.SplitN(p, "/", 2)[0],
config: subprocessorsConfig[p],
}
}
return cfg
}
// ReloadProcessors reloads the aggregated processors and
func (p *Aggregator) ReloadProcessors(ctx context.Context, newCfgs configmodels.Processors) error {
for _, aps := range p.aggregatedprocessors {
for name, newCfg := range newCfgs {
for i := 0; i < len(aps.config.subprocessorsConfigs); i++ {
if aps.config.subprocessorsConfigs[i].name == name {
aps.config.subprocessorsConfigs[i].config = newCfg
}
}
}
subprocessors, err := p.buildSubprocessors(ctx, aps.params, aps.config.subprocessorsConfigs, aps.nextConsumer)
if err != nil {
return err
}
err = aps.replaceSubprocessors(ctx, subprocessors)
if err != nil {
return err
}
}
return nil
}
// GetProcessorName returns the aggregated processor name based on the pipeline name
func GetProcessorName(pipelineName string) string {
return Type + "_" + pipelineName
}
|
package main
import(
"database/sql"
)
var db *sql.DB
func main() {
//user, port, database name
db = getDB("root", "26257", "recipes")
defer db.Close()
initializeRoutes()
}
|
package mypkg
import "fmt"
func PrintMe(s string) {
fmt.Println(s)
}
|
package main
import "fmt"
/* una variadic function es una funcion que admite un numero variable de parametros
se especifica con 3 puntos antes del tipo, y se guarda en una slice */
func average(sliceFloat ...float64) {
sum := 0.0
/* esta linea usa range para recorrer toda la slice e ir almacenando dos valores,
el primero el index de ese valor, que como aqui no lo necesitamos lo guardamos
en un blank identifier, y el segundo el valor en si, que vamos a ir añadiendo a sum */
for _, v := range sliceFloat {
sum += v
}
/* como sum es float64 convertimos a float el len slicefloat. len devuelve la longitud
o numero de valores de esa slice */
average := sum / float64(len(sliceFloat))
fmt.Println(average)
}
func main() {
//aqui le pasamos dos valores pero le podriamos pasar los que quisieramos
average(10, 20)
numeritos := []float64{30, 40, 50, 60, 70}
/*numeritos no podemos pasarselo a average, pq es []float64 (una slice) y average
requiere un float64, para ello le pasamos el slice con 3 puntos suspensivos detras
del nombre del slice y asi lo que hace es ir cogiendo como un valor individual cada
valor del slice */
average(numeritos...)
}
|
// package main defines the executable for the bcc (bit code compiler) compiler.
package main
import (
"fmt"
"os"
"github.com/mkenney/8bit-cpu/cmp2/pkg/bcc"
"github.com/bdlm/log/v2"
)
func init() {
//log.SetFormatter(&log.TextFormatter{DisableTTY: true})
log.SetLevel(log.DebugLevel)
}
func main() {
var err error
sourceFile := os.Args[1]
destFile := os.Args[2]
logger := log.WithFields(log.Fields{"src": os.Args[1], "dest": os.Args[2]})
logger.Debug("initializing compiler")
prg, err := bcc.New(sourceFile, destFile)
if nil != err {
logger.WithError(err).Fatal("failed to initialize bit code compiler")
}
logger.Debug("parsing src file")
err = prg.Parse()
if nil != err {
logger.WithError(err).Fatal("failed to parse source file")
}
logger.Debug("writing dest image")
err = prg.Compile()
if nil != err {
logger.WithError(err).Fatal("failed to compile ROM images")
}
logger.Info("success")
// DEBUG
code := ""
for _, inst := range prg.Instructions() {
code = code + "\n" + inst.Line()
}
fmt.Printf("\n%s\n\n", code)
}
|
package main
import (
//该包是用来使用框架接口的
"crypto/ecdsa"
"crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"math/big"
shim "github.com/tjfoc/tjfoc/core/chaincode/shim" //该包是用来使用通信消息结构的
pb "github.com/tjfoc/tjfoc/protos/chaincode"
)
const (
use_ecdsa = true
)
// 合约方法处理器
type handler func(stub shim.ChaincodeStubInterface, args []string) pb.Response
//该结构是自定义结构,表示当前这份合约,该结构必须实现两个方法Init和Invoke
type MyChaincode struct {
handlerMap map[string]handler
}
func newChaincodes() *MyChaincode {
cc := &MyChaincode{}
cc.handlerMap = map[string]handler{
"addCar": cc.addCar,
"carQuery": cc.carQuery,
"carUpdate": cc.carUpdate,
"addChargingPile": cc.addChargingPile,
"chargingPileUpdate": cc.chargingPileUpdate,
"addAccount": cc.addAccount,
"updateAccount": cc.updateAccount,
"queryAccount": cc.queryAccount,
"orderStart": cc.orderStart,
"del": cc.del,
"orderEnd": cc.orderEnd,
}
return cc
}
type CarData struct {
CompNum string
CompName string
CarNum string //车辆唯一识别号
PlateNum string //车牌号
CarModel string //车型
Seating string //座位数
Capacity string //电池容量
Quantity string //可用电量
ExpectedMileage string //预计可行驶里程
BillingRulesDesc string //计费规则描述,每分钟租车费用
ParkingchargingPile string //当前停车的充电桩
StateMark string //状态标记 0未使用1可使用2正在使用3维护
RecordTime string //上链时间
}
type ChargingPileData struct {
CompName string
CompNum string
PilePlace string //充电桩所处位置
PileNum string //充电桩编号
PileDesc string //充电桩描述
Position string //所处位置坐标
//billingRulesDesc string //计费规则描述
ParkingNum string //当前停车的车牌
StateMark string //状态标记 0空闲,1正在使用,2维护
RecordTime string //上链时间
}
type AccountData struct {
Password string //密码
CompName string //平台名字
CompNum string //平台ID
Phone string //手机号
Idcard string //身份证
Role string //1company,2user
FreezeMoney int //冻结资金
Balance int //平台资金
RecordTime string //注册时间
}
type OrderData struct {
OrderId string //订单Id
CarNum string //车辆id
Phone string //用户帐号
CompNum string //租车的平台
StartTime string //订单开始时间
EndTime string //订单结束时间
StartCharging string //离开的充电桩
EndCharging string //订单结束时的充电桩
Money int //订单金额
}
type OrderEndData struct {
OrderId string //订单Id
CarNum string //车辆id
Phone string //用户帐号
CompNum string //租车的平台
StartTime string //订单开始时间
EndTime string //订单结束时间
StartCharging string //离开的充电桩
EndCharging string //订单结束时的充电桩
Money int //订单金额
CarCompNum string
PileCompNum string
CarCompMoney int
PileCompMoney int
CompMoney int
}
func main() {
mycc := new(MyChaincode)
err := shim.Start(mycc)
if err != nil {
fmt.Printf("Error starting my chaincode : %s", err)
}
}
func (cc *MyChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {
return shim.Success(nil)
}
func (cc *MyChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response {
function, args := stub.GetFunctionAndParameters()
if function == "addCar" { //存证
return cc.addCar(stub, args)
} else if function == "carQuery" { //根据hash查询存证
return cc.carQuery(stub, args)
} else if function == "carUpdate" { //更新车辆
return cc.carUpdate(stub, args)
} else if function == "addChargingPile" { //添加充电桩
return cc.addChargingPile(stub, args)
} else if function == "addAccount" { //添加账号
return cc.addAccount(stub, args)
} else if function == "chargingPileUpdate" { //更新充电充电桩
return cc.chargingPileUpdate(stub, args)
} else if function == "orderStart" { //开始订单
return cc.orderStart(stub, args)
} else if function == "orderEnd" { //订单结束
return cc.orderEnd(stub, args)
} else if function == "updateAccount" { //更新账号信息
return cc.updateAccount(stub, args)
}
return shim.Success(nil)
}
//添加车辆
func (cc *MyChaincode) addCar(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
var car CarData
if err := json.Unmarshal([]byte(args[0]), &car); err != nil {
return shim.Error(err.Error())
}
// if car.CompNum == "" {
// return shim.Error("wrong record")
// }
Key := "car_" + car.CarNum
value, _ := stub.GetState(Key)
if value != nil {
return shim.Error("has been existed!")
}
carResult, _ := json.Marshal(car)
err := stub.PutState(Key, []byte(carResult))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte(Key))
}
//查询车辆
func (cc *MyChaincode) carQuery(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
value, err := stub.GetState(args[0])
if err != nil {
return shim.Error(err.Error())
}
return shim.Success(value)
}
//更新车辆信息
func (cc *MyChaincode) carUpdate(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
var car CarData
if err := json.Unmarshal([]byte(args[0]), &car); err != nil {
return shim.Error(err.Error())
}
Key := "car_" + car.CarNum
carResult, _ := json.Marshal(car)
err := stub.PutState(Key, []byte(carResult))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("Update Success!!"))
}
func (cc *MyChaincode) verifySign(data, sign []byte) bool {
sign, err := base64.StdEncoding.DecodeString(string(sign))
if err != nil {
return false
}
return cc.verifySignEcdsa(data, sign)
}
func (cc *MyChaincode) verifySignEcdsa(data, sign []byte) bool {
var ecdsasign struct {
R, S *big.Int
}
_, err := asn1.Unmarshal(sign, &ecdsasign)
if err != nil {
return false
}
pub := []byte(Pub_Key)
block, _ := pem.Decode(pub)
pubkey, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return false
}
ecdsaPub := pubkey.(*ecdsa.PublicKey)
return ecdsa.Verify(ecdsaPub, data, ecdsasign.R, ecdsasign.S)
}
//添加充电桩
func (cc *MyChaincode) addChargingPile(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
var chargingPile ChargingPileData
if err := json.Unmarshal([]byte(args[0]), &chargingPile); err != nil {
return shim.Error(err.Error())
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
Key := "cp_" + chargingPile.PileNum
value, _ := stub.GetState(Key)
if value != nil {
return shim.Error(Key)
}
chargingPileResult, _ := json.Marshal(chargingPile)
err := stub.PutState(Key, []byte(chargingPileResult))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("Add Success!!!"))
}
//更新充电桩信息
func (cc *MyChaincode) chargingPileUpdate(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
var chargingPile ChargingPileData
if err := json.Unmarshal([]byte(args[0]), &chargingPile); err != nil {
return shim.Error(err.Error())
}
Key := "cp_" + chargingPile.PileNum
chargingPileResult, _ := json.Marshal(chargingPile)
err := stub.PutState(Key, []byte(chargingPileResult))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("Update Success!!"))
}
//添加帐户
func (cc *MyChaincode) addAccount(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
var account AccountData
if err := json.Unmarshal([]byte(args[0]), &account); err != nil {
return shim.Error(err.Error())
}
Key := "ac_" + getSha256Code(account.Idcard)
value, _ := stub.GetState(Key)
if value != nil {
return shim.Error("has been existed!")
}
accountResult, _ := json.Marshal(account)
err := stub.PutState(Key, []byte(accountResult))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("Add Success!!"))
}
//更新用户帐号
func (cc *MyChaincode) updateAccount(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
var account AccountData
if err := json.Unmarshal([]byte(args[0]), &account); err != nil {
return shim.Error(err.Error())
}
Key := "ac_" + getSha256Code(account.Idcard)
value, _ := stub.GetState(Key)
var accountData AccountData
if err := json.Unmarshal([]byte(value), &accountData); err != nil {
return shim.Error(err.Error())
}
accountData.FreezeMoney = accountData.FreezeMoney + 5000
accountResult, _ := json.Marshal(accountData)
err := stub.PutState(Key, []byte(accountResult))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("Update Success!!"))
}
//查询帐号
func (cc *MyChaincode) queryAccount(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
value, err := stub.GetState(args[0])
if err != nil {
return shim.Error(err.Error())
}
return shim.Success(value)
}
//新增订单
func (cc *MyChaincode) orderStart(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
var order OrderData
if err := json.Unmarshal([]byte(args[0]), &order); err != nil {
return shim.Error(err.Error())
}
// if car.CompNum == "" {
// return shim.Error("wrong record")
// }
carKey := "car_" + order.CarNum
carDatas, _ := stub.GetState(carKey)
if carDatas == nil {
return shim.Error("no this car!")
}
pileKey := "cp_" + order.StartCharging
pileDatas, _ := stub.GetState(pileKey)
if pileDatas == nil {
return shim.Error("no this chargingpile!")
}
orderResult, _ := json.Marshal(order)
Key := "or_" + order.OrderId
value, _ := stub.GetState(Key)
if value != nil {
return shim.Error("has been existed!")
}
err := stub.PutState(Key, []byte(orderResult))
if err != nil {
return shim.Error(err.Error())
}
var car CarData
if err := json.Unmarshal(carDatas, &car); err != nil {
return shim.Error(err.Error())
}
car.ParkingchargingPile = ""
car.StateMark = "2"
carStr, _ := json.Marshal(car)
err = stub.PutState(carKey, []byte(carStr))
if err != nil {
return shim.Error(err.Error())
}
var pile ChargingPileData
if err := json.Unmarshal(pileDatas, &pile); err != nil {
return shim.Error(err.Error())
}
pile.ParkingNum = ""
pile.StateMark = "1"
pileStr, _ := json.Marshal(pile)
err = stub.PutState(pileKey, []byte(pileStr))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("add seccess"))
}
//完成订单
func (cc *MyChaincode) orderEnd(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
// 验签
// if !cc.verifySign([]byte(args[0]), []byte(args[1])) {
// return shim.Error("Invalid signature")
// }
var order OrderEndData
if err := json.Unmarshal([]byte(args[0]), &order); err != nil {
return shim.Error(err.Error())
}
carKey := "car_" + order.CarNum //获取车辆信息
carDatas, _ := stub.GetState(carKey)
if carDatas == nil {
return shim.Error("no this car!")
}
pileKey := "cp_" + order.EndCharging
pileDatas, _ := stub.GetState(pileKey)
if pileDatas == nil {
return shim.Error("no this chargingpile!")
}
adminKey := "ac_" + getSha256Code(cc.checkAccount(order.CompNum))
adminData, _ := stub.GetState(adminKey)
var admin AccountData
if err := json.Unmarshal([]byte(adminData), &admin); err != nil {
return shim.Error(adminKey)
}
var compMoney = (int)(order.Money * 5 / 100) //用户登录平台的钱
admin.Balance = admin.Balance + compMoney
adminResult, _ := json.Marshal(admin)
err := stub.PutState(adminKey, []byte(adminResult))
adminData, _ = stub.GetState(adminKey)
var admin1 AccountData
if err := json.Unmarshal([]byte(adminData), &admin1); err != nil {
return shim.Error(err.Error())
}
var carCompMoney = (int)(order.Money * 80 / 100) //车辆的平台的钱
admin1.Balance = admin1.Balance + carCompMoney
adminResult1, _ := json.Marshal(admin1)
err = stub.PutState(adminKey, []byte(adminResult1))
var admin2 AccountData
adminKey = "ac_" + getSha256Code(cc.checkAccount(order.PileCompNum))
adminData, _ = stub.GetState(adminKey)
if err := json.Unmarshal([]byte(adminData), &admin2); err != nil {
return shim.Error(err.Error())
}
var pileCompMoney = (int)(order.Money - compMoney - carCompMoney)
admin2.Balance = admin2.Balance + pileCompMoney //充电桩平台的钱
adminResult2, _ := json.Marshal(admin2)
err = stub.PutState(adminKey, []byte(adminResult2))
Key := "or_" + order.OrderId
order.CompMoney = compMoney
order.CarCompMoney = carCompMoney
order.PileCompMoney = pileCompMoney
order.CarCompNum = admin1.CompNum
order.PileCompNum = admin2.CompNum
orderResult, _ := json.Marshal(order)
err = stub.PutState(Key, []byte(orderResult))
if err != nil {
return shim.Error(err.Error())
}
var car CarData
if err := json.Unmarshal(carDatas, &car); err != nil {
return shim.Error(err.Error())
}
car.ParkingchargingPile = order.EndCharging
car.StateMark = "1"
carStr, _ := json.Marshal(car)
err = stub.PutState(carKey, []byte(carStr))
if err != nil {
return shim.Error(err.Error())
}
var pile ChargingPileData
if err := json.Unmarshal(pileDatas, &pile); err != nil {
return shim.Error(err.Error())
}
pile.ParkingNum = order.CarNum
pile.StateMark = "2"
pileStr, _ := json.Marshal(pile)
err = stub.PutState(pileKey, []byte(pileStr))
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("add seccess"))
}
//删除
func (cc *MyChaincode) del(stub shim.ChaincodeStubInterface, args []string) pb.Response {
if len(args) != 2 {
return shim.Error("Invalid parameter")
}
err := stub.DelState(args[0])
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte("del success!!!"))
}
func (cc *MyChaincode) checkAccount(data string) string {
if data == "1" {
return "adminA"
}
if data == "2" {
return "adminB"
}
return "adminA"
}
func getSha256Code(s string) string {
h := sha256.New()
h.Write([]byte(s))
return fmt.Sprintf("%x", h.Sum(nil))
}
const (
Pub_Key = `
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE1D9XwBQpXmyIvCD6pXddIVJERSRg
EpTW7heKZH0FRGpsi/SUs7mGA5f1AKIC9eEuHHHZPYRYgBVxmddwKhv23Q==
-----END PUBLIC KEY-----
`
)
|
package main
import (
"github.com/go-playground/validator"
"go.uber.org/zap"
"github.com/imouto1994/yume/internal/infra/config"
httpProtocol "github.com/imouto1994/yume/internal/infra/http"
"github.com/imouto1994/yume/internal/infra/migration"
"github.com/imouto1994/yume/internal/infra/sqlite"
"github.com/imouto1994/yume/internal/route"
)
func main() {
// Initialize global logger
logger, _ := zap.NewDevelopment()
defer logger.Sync()
undo := zap.ReplaceGlobals(logger)
defer undo()
// Initialize validator
v := validator.New()
// Initialize configuration
cfg, err := config.Initialize(v)
if err != nil {
zap.L().Fatal("failed to load configurations", zap.Error(err))
}
// Initialize migration
migration.UpLatest()
// Intitialize database client
db, err := sqlite.Connect()
if err != nil {
zap.L().Fatal("failed to establish connection to database", zap.Error(err))
}
defer db.Close()
router := route.CreateRouter(cfg, db, v)
httpProtocol.RunServer(router, cfg)
}
|
// package config holds the const of the configuration values
// Right now it is just a thin wrapper around viper. If growing in
// use or complexity it should probably have its own structs and
// hide viper
// also depending on the use, maybe more sources of config
// TODO add tests
package config
import "github.com/spf13/viper"
const (
// MongoHost holds the mongo server name
MongoHost = "MongoHost"
// MongoPort holds the mongo server name
MongoPort = "MongoPort"
// MongoUser holds the mongo server url
MongoUser = "MongoUser"
// MongoPassword holds the mongo server url
MongoPassword = "MongoPassword"
)
// Load loads the config from the env vars. It could be extended to load also
// from a different source
// it also sets the defaults to the values if needed
func Load() error {
viper.SetEnvPrefix("APIPAY")
viper.SetDefault(MongoHost, "localhost")
err := viper.BindEnv(MongoHost)
if err != nil {
return err
}
viper.SetDefault(MongoPort, 27017)
err = viper.BindEnv(MongoPort)
if err != nil {
return err
}
err = viper.BindEnv(MongoUser)
if err != nil {
return err
}
err = viper.BindEnv(MongoPassword)
if err != nil {
return err
}
return nil
}
|
package customRoundrobin
import (
"context"
"google.golang.org/grpc/balancer/apis"
"google.golang.org/grpc/metadata"
"strings"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/grpcrand"
)
const Name = "customRoundrobin"
const OverWriteKeyName = "lb-addr"
var logger = grpclog.Component("customRoundrobin")
// newBuilder creates a new roundrobin balancer builder.
func newBuilder() balancer.Builder {
return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
}
func init() {
balancer.Register(newBuilder())
}
type rrPickerBuilder struct{}
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
logger.Infof("customRoundrobin: newPicker called with info: %v", info)
if len(info.ReadySCs) == 0 {
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
}
var scs []apis.SubConn
for sc := range info.ReadySCs {
scs = append(scs, sc)
}
return &rrPicker{
subConns: scs,
// Start at a random index, as the same RR balancer rebuilds a new
// picker when SubConn states change, and we don't want to apply excess
// load to the first server in the list.
next: grpcrand.Intn(len(scs)),
}
}
type rrPicker struct {
// subConns is the snapshot of the customRoundrobin balancer when this picker was
// created. The slice is immutable. Each Get() will do a round robin
// selection from it and return the selected SubConn.
subConns []apis.SubConn
mu sync.Mutex
next int
}
/*
Pick is the core logic of custom rooundrobin
For stateful load balancing, we look for the "lb-addr" from the context,
if the addr is present, we need to route request to the addr as overwritten,
if not, we switch to the regular roundrobin
*/
func (p *rrPicker) Pick(pi balancer.PickInfo) (balancer.PickResult, error) {
p.mu.Lock()
var chosenSc apis.SubConn
// subConn pick on user request
if overwriteAddr, ok := stickyKeyFromContext(pi.Ctx, OverWriteKeyName); ok {
for _, sc := range p.subConns {
curAddr := sc.GetAddrConnection() //reflect.ValueOf(sc).Elem().FieldByName("ac").Interface().(*addrConn)
if strings.Compare(curAddr.Addr, overwriteAddr) == 0 {
// add match, route to the subconnection
chosenSc = sc
}
}
} else {
// subConn pick on lb
chosenSc = p.subConns[p.next]
p.next = (p.next + 1) % len(p.subConns)
}
p.mu.Unlock()
return balancer.PickResult{SubConn: chosenSc}, nil
}
// Get one value from metadata in ctx with key stickinessMDKey.
//
// It returns "", false if stickinessMDKey is an empty string.
func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) {
if stickinessMDKey == "" {
return "", false
}
md, added, ok := metadata.FromOutgoingContextRaw(ctx)
if !ok {
return "", false
}
if vv, ok := md[stickinessMDKey]; ok {
if len(vv) > 0 {
return vv[0], true
}
}
for _, ss := range added {
for i := 0; i < len(ss)-1; i += 2 {
if ss[i] == stickinessMDKey {
return ss[i+1], true
}
}
}
return "", false
}
|
package chart
import (
"math"
"sort"
"github.com/wcharczuk/go-chart/drawing"
)
// XAxis represents the horizontal axis.
type XAxis struct {
Name string
Style Style
ValueFormatter ValueFormatter
Range Range
Ticks []Tick
}
// GetName returns the name.
func (xa XAxis) GetName() string {
return xa.Name
}
// GetStyle returns the style.
func (xa XAxis) GetStyle() Style {
return xa.Style
}
// GetTicks returns the ticks for a series. It coalesces between user provided ticks and
// generated ticks.
func (xa XAxis) GetTicks(r Renderer, ra Range, vf ValueFormatter) []Tick {
if len(xa.Ticks) > 0 {
return xa.Ticks
}
return xa.generateTicks(r, ra, vf)
}
func (xa XAxis) generateTicks(r Renderer, ra Range, vf ValueFormatter) []Tick {
step := xa.getTickStep(r, ra, vf)
return xa.generateTicksWithStep(ra, step, vf)
}
func (xa XAxis) getTickCount(r Renderer, ra Range, vf ValueFormatter) int {
fontSize := xa.Style.GetFontSize(DefaultFontSize)
r.SetFontSize(fontSize)
// take a cut at determining the 'widest' value.
l0 := vf(ra.Min)
ln := vf(ra.Max)
ll := l0
if len(ln) > len(l0) {
ll = ln
}
llw, _ := r.MeasureText(ll)
textWidth := drawing.PointsToPixels(r.GetDPI(), float64(llw))
width := textWidth + DefaultMinimumTickHorizontalSpacing
count := int(math.Ceil(float64(ra.Domain) / float64(width)))
return count
}
func (xa XAxis) getTickStep(r Renderer, ra Range, vf ValueFormatter) float64 {
tickCount := xa.getTickCount(r, ra, vf)
step := ra.Delta() / float64(tickCount)
return step
}
func (xa XAxis) generateTicksWithStep(ra Range, step float64, vf ValueFormatter) []Tick {
var ticks []Tick
for cursor := ra.Min; cursor < ra.Max; cursor += step {
ticks = append(ticks, Tick{
Value: cursor,
Label: vf(cursor),
})
}
return ticks
}
// Render renders the axis
func (xa XAxis) Render(r Renderer, canvasBox Box, ra Range, ticks []Tick) {
tickFontSize := xa.Style.GetFontSize(DefaultFontSize)
tickHeight := drawing.PointsToPixels(r.GetDPI(), tickFontSize)
ty := canvasBox.Bottom + DefaultXAxisMargin + int(tickHeight)
r.SetStrokeColor(xa.Style.GetStrokeColor(DefaultAxisColor))
r.SetStrokeWidth(xa.Style.GetStrokeWidth(DefaultAxisLineWidth))
r.MoveTo(canvasBox.Left, canvasBox.Bottom)
r.LineTo(canvasBox.Right, canvasBox.Bottom)
r.Stroke()
r.SetFontColor(xa.Style.GetFontColor(DefaultAxisColor))
r.SetFontSize(tickFontSize)
sort.Sort(Ticks(ticks))
for _, t := range ticks {
v := t.Value
x := ra.Translate(v)
tx := canvasBox.Right - x
r.Text(t.Label, tx, ty)
}
}
|
package createsubcommands
import (
"fmt"
snmpsimclient "github.com/inexio/snmpsim-restapi-go-client"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"os"
)
// CreateTagCmd represents the createTag command
var CreateTagCmd = &cobra.Command{
Use: "tag",
Args: cobra.ExactArgs(0),
Short: "Creates a new tag",
Long: `Creates a new tag and returns its id`,
Run: func(cmd *cobra.Command, args []string) {
//Load the client data from the config
baseUrl := viper.GetString("mgmt.http.baseUrl")
username := viper.GetString("mgmt.http.authUsername")
password := viper.GetString("mgmt.http.authPassword")
//Create a new client
client, err := snmpsimclient.NewManagementClient(baseUrl)
if err != nil {
log.Error().
Msg("Error while creating management client")
os.Exit(1)
}
err = client.SetUsernameAndPassword(username, password)
if err != nil {
log.Error().
Msg("Error while setting username and password")
os.Exit(1)
}
//Read in the tags name and description
name := cmd.Flag("name").Value.String()
description := cmd.Flag("description").Value.String()
//Create the tag
var tag snmpsimclient.Tag
tag, err = client.CreateTag(name, description)
if err != nil {
log.Error().
Msg("Error during creation of the tag")
os.Exit(1)
}
fmt.Println("Tag has been created successfully.")
fmt.Println("Id:", tag.Id)
},
}
func init() {
CreateTagCmd.Flags().String("description", "", "Description of the tag")
err := CreateTagCmd.MarkFlagRequired("description")
if err != nil {
log.Error().
Msg("Could not mark 'description' flag required")
os.Exit(1)
}
}
|
package main
import (
"fmt"
"os"
"runtime"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
)
const (
defaultConfig = "config.yaml"
exampleConfig = "config-example.yaml"
myName = `
✄╔════╗
✄╚══╗═║
✄──╔╝╔╝╔══╗╔╗╔╗╔══╗╔═╗╔══╗
✄─╔╝╔╝─║║═╣║║║║║══╣║╔╝║╔╗║
✄╔╝═╚═╗║║═╣║╚╝║╠══║║║─║╚╝║
✄╚════╝╚══╝╚══╝╚══╝╚╝─╚══╝
`
LINE = "----------------------------------------"
)
func init() {
zerolog.SetGlobalLevel(zerolog.InfoLevel)
log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})
}
func main() {
fmt.Println(LINE)
fmt.Print("Power by")
log.Info().Msgf("%s", myName)
// fmt.Println(myName)
fmt.Println(LINE)
setMaxProcs()
}
func setMaxProcs() {
// Allow as many threads as we have cores unless the user specified a value.
numProcs := runtime.NumCPU()
runtime.GOMAXPROCS(numProcs)
// Check if the setting was successful.
actualNumProcs := runtime.GOMAXPROCS(0)
if actualNumProcs != numProcs {
log.Info().Msgf("Specified max procs of %d but using %d", numProcs, actualNumProcs)
}
}
|
package gob
import (
"reflect"
"testing"
)
func TestCodec(t *testing.T) {
type Example struct {
Field1 string
Field2 int
}
example := &Example{
Field1: "field1",
Field2: 128,
}
codec := Codec()
marshal := codec.Marshaler()
unmarshal := codec.Unmarshaler()
data, err := marshal(example)
if err != nil {
t.Fatal(err)
}
target := &Example{}
err = unmarshal(data, target)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(example, target) {
t.Fatalf("expected unmarshaled object to be:\n%#+v\n\ngot:\n%#+v", example, target)
}
}
|
package main
import (
"crypto/md5"
_ "expvar"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
_ "net/http/pprof"
"os"
"path/filepath"
"sync"
"time"
humanize "github.com/dustin/go-humanize"
"github.robot.car/cruise/swift-profiler/copier"
)
const defaultGoroutineCount = 16
const defaultNumFiles = 120
const defaultInputDirectory = "/tmp/benchmark-test"
const defaultDestinationContainer = "benchmark-test"
const defaultVerifyChecksum = true
const defaultPrecomputeChecksum = true
func main() {
var goroutineCount, numFiles int
var inputDirectory, destinationContainer string
var precomputeChecksum, verifyChecksum bool
flag.IntVar(&goroutineCount, "concurrency", defaultGoroutineCount, "Number of goroutines")
flag.IntVar(&numFiles, "num-files", defaultNumFiles, "Number of files")
flag.StringVar(&inputDirectory, "input-dir", defaultInputDirectory, "Input directory")
flag.StringVar(&destinationContainer, "dest-prefix", defaultDestinationContainer, "Destination Swift container name")
flag.BoolVar(&verifyChecksum, "verify-checksum", defaultVerifyChecksum, "Whether Swift should verify checksum")
flag.BoolVar(&precomputeChecksum, "precompute-checksum", defaultPrecomputeChecksum, "Pre-compute checksum beforehand")
flag.Parse()
go func() {
hostPort := "0.0.0.0:6060"
log.Printf("Listening on %s\n", hostPort)
log.Println(http.ListenAndServe(hostPort, nil))
}()
files, err := ioutil.ReadDir(inputDirectory)
if err != nil {
log.Fatal(err)
}
swiftClient := copier.NewSwiftCopier()
if err := swiftClient.Setup(); err != nil {
log.Fatal(err)
}
// Pre-compute md5 checksums and pass them to the copier.
checksums := make(map[string]string, len(files))
if verifyChecksum {
if precomputeChecksum {
log.Printf("Precomputing checksum...\n")
for _, file := range files {
path := filepath.Join(inputDirectory, file.Name())
f, err := os.Open(path)
if err != nil {
log.Fatal(err)
}
defer f.Close()
h := md5.New()
if _, err := io.Copy(h, f); err != nil {
log.Fatal(err)
}
checksums[path] = fmt.Sprintf("%x", h.Sum(nil))
}
} else {
log.Printf("Letting Swift compute checksum...\n")
}
} else {
log.Printf("Ignore checksums completely...\n")
}
var wg sync.WaitGroup
fileChannel := make(chan string)
log.Printf("Spawning %d goroutines to run\n", goroutineCount)
for i := 0; i < goroutineCount; i++ {
wg.Add(1)
go func() {
for path := range fileChannel {
var md5 *string
if precomputeChecksum {
sum := checksums[path]
md5 = &sum
}
if err := swiftClient.Copy(path, destinationContainer, verifyChecksum, md5); err != nil {
log.Printf("Swift copy error: %s\n", err)
}
}
wg.Done()
}()
}
log.Printf("Copying files concurrently\n")
byteCount, fileCount := int64(0), 0
start := time.Now()
for _, file := range files {
path := filepath.Join(inputDirectory, file.Name())
fileChannel <- path
byteCount += file.Size()
fileCount++
if fileCount >= numFiles {
break
}
}
close(fileChannel)
wg.Wait()
duration := time.Since(start)
log.Printf(" %d files %s written in %s\n", fileCount, humanize.Bytes(uint64(byteCount)), duration)
log.Printf(" Copy throughput per second: %s\n", humanize.Bytes(uint64(float64(byteCount)/duration.Seconds())))
}
|
package controllers
import (
"github.com/astaxie/beego"
)
type Result struct {
Response string
}
type MainController struct {
beego.Controller
}
func (c *MainController) Get() {
result := Result{Response: "OK"}
c.Data["json"] = &result
c.ServeJson()
}
|
package main
import (
"log"
"github.com/cloudevents/sdk-go/pkg/cloudevents"
keptn "github.com/keptn/go-utils/pkg/lib"
)
/**
* Here are all the handler functions for the individual event
See https://github.com/keptn/spec/blob/0.1.3/cloudevents.md for details on the payload
-> "sh.keptn.event.configuration.change"
-> "sh.keptn.events.deployment-finished"
-> "sh.keptn.events.tests-finished"
-> "sh.keptn.event.start-evaluation"
-> "sh.keptn.events.evaluation-done"
-> "sh.keptn.event.problem.open"
-> "sh.keptn.events.problem"
-> "sh.keptn.event.action.triggered"
*/
//
// Handles ConfigurationChangeEventType = "sh.keptn.event.configuration.change"
// TODO: add in your handler code
//
func HandleConfigurationChangeEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.ConfigurationChangeEventData) error {
log.Printf("Handling Configuration Changed Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles DeploymentFinishedEventType = "sh.keptn.events.deployment-finished"
// TODO: add in your handler code
//
func HandleDeploymentFinishedEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.DeploymentFinishedEventData) error {
log.Printf("Handling Deployment Finished Event: %s", incomingEvent.Context.GetID())
// capture start time for tests
// startTime := time.Now()
// run tests
// ToDo: Implement your tests here
// Send Test Finished Event
// return myKeptn.SendTestsFinishedEvent(&incomingEvent, "", "", startTime, "pass", nil, "keptn-service-template-go")
return nil
}
//
// Handles TestsFinishedEventType = "sh.keptn.events.tests-finished"
// TODO: add in your handler code
//
func HandleTestsFinishedEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.TestsFinishedEventData) error {
log.Printf("Handling Tests Finished Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles EvaluationDoneEventType = "sh.keptn.events.evaluation-done"
// TODO: add in your handler code
//
func HandleStartEvaluationEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.StartEvaluationEventData) error {
log.Printf("Handling Start Evaluation Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles DeploymentFinishedEventType = "sh.keptn.events.deployment-finished"
// TODO: add in your handler code
//
func HandleEvaluationDoneEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.EvaluationDoneEventData) error {
log.Printf("Handling Evaluation Done Event: %s", incomingEvent.Context.GetID())
return nil
}
//
// Handles ProblemOpenEventType = "sh.keptn.event.problem.open"
// Handles ProblemEventType = "sh.keptn.events.problem"
// TODO: add in your handler code
//
func HandleProblemEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.ProblemEventData) error {
log.Printf("Handling Problem Event: %s", incomingEvent.Context.GetID())
// Deprecated since Keptn 0.7.0 - use the HandleActionTriggeredEvent instead
return nil
}
//
// Handles ActionTriggeredEventType = "sh.keptn.event.action.triggered"
// TODO: add in your handler code
//
func HandleActionTriggeredEvent(myKeptn *keptn.Keptn, incomingEvent cloudevents.Event, data *keptn.ActionTriggeredEventData) error {
log.Printf("Handling Action Triggered Event: %s", incomingEvent.Context.GetID())
// check if action is supported
if data.Action.Action == "action-xyz" {
//myKeptn.SendActionStartedEvent() TODO: implement the SendActionStartedEvent in keptn/go-utils/pkg/lib/events.go
// Implement your remediation action here
//myKeptn.SendActionFinishedEvent() TODO: implement the SendActionFinishedEvent in keptn/go-utils/pkg/lib/events.go
}
return nil
}
|
package golem
import "crypto/rc4"
type rc4ModeEcnryption struct {
key []byte
cipher *rc4.Cipher
}
// NewRc4Cipher returns a new rc4 cipher
func NewRc4Cipher() Cipher {
return &rc4ModeEcnryption{}
}
func (r *rc4ModeEcnryption) SetKey(key string) error {
keylen := len([]byte(key))
if keylen < 1 || keylen > 256 {
return rc4.KeySizeError(keylen)
}
r.key = []byte(key)
return nil
}
func (r *rc4ModeEcnryption) Encrypt(data []byte) []byte {
encrypted := make([]byte, len(data))
c, _ := rc4.NewCipher(r.key)
c.XORKeyStream(encrypted, data)
c.Reset()
return encrypted
}
func (r *rc4ModeEcnryption) Decrypt(data []byte) ([]byte, error) {
decrypted := make([]byte, len(data))
c, _ := rc4.NewCipher(r.key)
c.XORKeyStream(decrypted, data)
c.Reset()
return decrypted, nil
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// +build crdb_test
package memo
import "github.com/cockroachdb/errors"
// FiltersExprMutateChecker is used to check if a FiltersExpr has been
// erroneously mutated. This code is called in crdb_test builds so that the
// check is run for tests, but the overhead is not incurred for non-test builds.
type FiltersExprMutateChecker struct {
hasher hasher
hash internHash
}
// Init initializes a FiltersExprMutateChecker with the original filters.
func (fmc *FiltersExprMutateChecker) Init(filters FiltersExpr) {
// This initialization pattern ensures that fields are not unwittingly
// reused. Field reuse must be explicit.
*fmc = FiltersExprMutateChecker{}
fmc.hasher.Init()
fmc.hasher.HashFiltersExpr(filters)
fmc.hash = fmc.hasher.hash
}
// CheckForMutation panics if the given filters are not equal to the filters
// passed for the previous Init function call.
func (fmc *FiltersExprMutateChecker) CheckForMutation(filters FiltersExpr) {
fmc.hasher.Init()
fmc.hasher.HashFiltersExpr(filters)
if fmc.hash != fmc.hasher.hash {
panic(errors.AssertionFailedf("filters should not be mutated"))
}
}
|
package main
import (
"bufio"
"container/heap"
"fmt"
"log"
"os"
"sort"
"strings"
)
type ascend []visit
func (s ascend) Len() int { return len(s) }
func (s ascend) Less(i, j int) bool {
return s[i].room < s[j].room
}
func (s ascend) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func d2ts(d string) int {
var h, m, s int
fmt.Sscanf(d, "%d:%d:%d", &h, &m, &s)
return 3600*h + 60*m + s
}
func ts2d(ts int) string {
return fmt.Sprintf("%02d:%02d:%02d", ts/3600, (ts/60)%60, ts%60)
}
func hallway(room int) int {
return (room / 100) * 100
}
type visit struct {
room int
dura int
}
type agent struct {
id byte
enter int
time int
path []visit
curr int
index int
}
type prioEventQueue []*agent
func (pq prioEventQueue) Len() int { return len(pq) }
func (pq prioEventQueue) Less(i, j int) bool {
if pq[i].time == pq[j].time {
return pq[i].id < pq[j].id
}
return pq[i].time < pq[j].time
}
func (pq prioEventQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index, pq[j].index = i, j
}
func (pq *prioEventQueue) Push(x interface{}) {
n := len(*pq)
a := x.(*agent)
a.index = n
*pq = append(*pq, a)
}
func (pq *prioEventQueue) Pop() interface{} {
old := *pq
n := len(old)
a := old[n-1]
a.index = -1
*pq = old[0 : n-1]
return a
}
func (pq *prioEventQueue) update(a *agent, time, curr int, path []visit) {
a.time = time
a.path = path
a.curr = curr
heap.Fix(pq, a.index)
}
func main() {
var room, dura int
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
// room -1: outside building
// room xx00: hallway xth floor
var (
staff []agent
n int
)
hq := make(map[int]int) // map occupied until
scanner := bufio.NewScanner(data)
for scanner.Scan() {
var path []visit
s := strings.Fields(scanner.Text())
id, time := s[0][0], d2ts(s[1])
for i := 2; i < len(s); i += 2 {
fmt.Sscanf(s[i], "%d", &room)
hq[room] = 0
fmt.Sscan(s[i+1], &dura)
path = append(path, visit{room, dura})
}
sort.Sort(ascend(path))
staff = append(staff, agent{id, time, time + 10, path, 100, n})
n++
}
q := make(prioEventQueue, len(staff))
for i := range staff {
q[i] = &staff[i]
}
heap.Init(&q)
for q.Len() > 0 {
p := heap.Pop(&q).(*agent)
time := p.time
if len(p.path) == 0 { // no more rooms to visit
if p.curr == 100 { // on hallway 1st floor
p.time += 10
p.curr = -1 // has left the building
} else { // in some hallway >1st floor
p.time += p.curr / 10 // and we're out
p.curr = -1
}
} else {
if p.curr == hallway(p.path[0].room) {
if hq[p.path[0].room] > time { // is room occupied?
p.time = hq[p.path[0].room]
} else {
hq[p.path[0].room] = time + p.path[0].dura
p.time = time + p.path[0].dura + 10
copy(p.path, p.path[1:])
p.path = p.path[:len(p.path)-1]
}
} else {
p.time += (p.path[0].room/100)*10 - p.curr/10 + 10
p.curr = hallway(p.path[0].room)
}
heap.Push(&q, p)
}
}
for _, i := range staff {
fmt.Println(ts2d(i.enter), ts2d(i.time))
}
}
|
package codequalitybinding
import (
"alauda.io/devops-apiserver/pkg/apis/devops/v1alpha1"
devopsclient "alauda.io/devops-apiserver/pkg/client/clientset/versioned"
"alauda.io/diablo/src/backend/api"
"alauda.io/diablo/src/backend/errors"
"alauda.io/diablo/src/backend/resource/common"
"alauda.io/diablo/src/backend/resource/dataselect"
"github.com/golang/glog"
)
// CodeQualityBindingList contains a list of CodeQualityBinding in the cluster
type CodeQualityBindingList struct {
ListMeta api.ListMeta `json:"listMeta"`
// Unordered list of CodeQualityBinding.
Items []CodeQualityBinding `json:"codequalitybindings"`
// List of non-critical errors, that occured during resource retrieval
Errors []error `json:"errors"`
}
// CodeQualityBinding is a presentation layer view of Kubernetes namespaces. This means it is namespace plus
// additional augmented data we can get from other sources.
type CodeQualityBinding struct {
ObjectMeta api.ObjectMeta `json:"objectMeta"`
TypeMeta api.TypeMeta `json:"typeMeta"`
Spec v1alpha1.CodeQualityBindingSpec `json:"spec"`
Status v1alpha1.ServiceStatus `json:"status"`
}
func GetCodeQualityBindingList(client devopsclient.Interface, namespace *common.NamespaceQuery, dsQuery *dataselect.DataSelectQuery) (*CodeQualityBindingList, error) {
glog.Info("Getting codequalitybinding list")
crsList, err := client.DevopsV1alpha1().CodeQualityBindings(namespace.ToRequestParam()).List(api.ListEverything)
if err != nil {
glog.Infof("error while listing codequalitybinding: %v", err)
}
nonCriticalErrors, criticalError := errors.HandleError(err)
if criticalError != nil {
return nil, criticalError
}
return toList(crsList.Items, nonCriticalErrors, dsQuery), nil
}
func toList(codeQualityBindings []v1alpha1.CodeQualityBinding, nonCriticalErrors []error, dsQuery *dataselect.DataSelectQuery) *CodeQualityBindingList {
crsList := &CodeQualityBindingList{
Items: make([]CodeQualityBinding, 0),
ListMeta: api.ListMeta{TotalItems: len(codeQualityBindings)},
}
crsCells, filteredTotal := dataselect.GenericDataSelectWithFilter(toCells(codeQualityBindings), dsQuery)
codeQualityBindings = fromCells(crsCells)
crsList.ListMeta = api.ListMeta{TotalItems: filteredTotal}
crsList.Errors = nonCriticalErrors
for _, cqb := range codeQualityBindings {
crsList.Items = append(crsList.Items, toDetailsInList(&cqb))
}
return crsList
}
func toDetailsInList(codeQualityBinding *v1alpha1.CodeQualityBinding) CodeQualityBinding {
cqb := toDetails(codeQualityBinding)
if cqb.ObjectMeta.Annotations == nil {
cqb.ObjectMeta.Annotations = make(map[string]string, 0)
}
cqb.ObjectMeta.Annotations[common.AnnotationsKeyToolType] = v1alpha1.ToolChainCodeQualityToolName
cqb.ObjectMeta.Annotations[common.AnnotationsKeyToolItemKind] = v1alpha1.ResourceKindCodeQualityTool
cqb.ObjectMeta.Annotations[common.AnnotationsKeyToolItemType] = getValueFromLabels(codeQualityBinding.GetLabels(), v1alpha1.LabelCodeQualityToolType)
return *cqb
}
func getValueFromLabels(labels map[string]string, key string) string {
if labels == nil {
return ""
}
if value, ok := labels[key]; ok {
return value
}
return ""
}
|
package main
import (
"fmt"
"github.com/jwhett/gogo"
)
func main() {
var board gogo.Board
fmt.Printf("Black: %d\nWhite: %d\n", gogo.BLACK, gogo.WHITE)
board.PlayMove(gogo.BLACK, "d4")
board.PlayMove(gogo.WHITE, "f3")
board.PlayMove(gogo.BLACK, "c6")
board.PlayMove(gogo.WHITE, "a19")
board.PlayMove(gogo.BLACK, "s1")
board.Show()
board.ShowHistory()
}
|
package db
import (
"context"
"database/sql"
"database/sql/driver"
"errors"
"fmt"
"strings"
"testing"
"github.com/chadweimer/gomp/models"
gomock "github.com/golang/mock/gomock"
"github.com/samber/lo"
)
func Test_postgres_GetSearchFields(t *testing.T) {
type testArgs struct {
fields []models.SearchField
query string
}
// Arrange
tests := []testArgs{
{[]models.SearchField{models.SearchFieldName}, "query"},
{[]models.SearchField{models.SearchFieldName, models.SearchFieldDirections}, "query"},
{supportedSearchFields[:], "query"},
{[]models.SearchField{models.SearchFieldName, "invalid"}, "query"},
{[]models.SearchField{"invalid"}, "query"},
}
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
sut := postgresRecipeDriverAdapter{}
// Act
stmt, args := sut.GetSearchFields(test.fields, test.query)
// Assert
expectedFields := lo.Intersect(test.fields, supportedSearchFields[:])
if len(args) != len(expectedFields) {
t.Errorf("expected %d args, received %d", len(expectedFields), len(args))
}
for index, arg := range args {
strArg, ok := arg.(string)
if !ok {
t.Errorf("invalid argument type: %v", arg)
}
if strArg != test.query {
t.Errorf("arg at index %d, expected %v, received %v", index, test.query, arg)
}
}
if stmt == "" {
if len(expectedFields) > 0 {
t.Error("filter should not be empty")
}
} else {
segments := strings.Split(stmt, " OR ")
if len(segments) != len(expectedFields) {
t.Errorf("expected %d segments, received %d", len(expectedFields), len(segments))
}
}
})
}
}
func Test_lockPostgres(t *testing.T) {
type testArgs struct {
lock bool
expectedError error
}
// Arrange
tests := []testArgs{
{true, nil},
{true, sql.ErrNoRows},
{true, sql.ErrConnDone},
{false, nil},
{false, sql.ErrNoRows},
{false, sql.ErrConnDone},
}
for i, test := range tests {
t.Run(fmt.Sprint(i), func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
sut, dbmock := getMockDb(t)
defer sut.Close()
conn, err := sut.Db.Conn(context.Background())
if err != nil {
t.Fatalf("failed to open connection, error: %v", err)
}
defer conn.Close()
action := "lock"
if !test.lock {
action = "unlock"
}
exec := dbmock.ExpectExec(fmt.Sprintf("SELECT pg_advisory_%s\\(1\\)", action))
if test.expectedError == nil {
exec.WillReturnResult(driver.ResultNoRows)
} else {
exec.WillReturnError(test.expectedError)
}
// Act
if test.lock {
err = lockPostgres(conn)
} else {
err = unlockPostgres(conn)
}
// Assert
if !errors.Is(err, test.expectedError) {
t.Errorf("expected error: %v, received error: %v", test.expectedError, err)
}
if err := dbmock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
})
}
}
|
package gsftp
import (
"context"
"fmt"
"cloud.google.com/go/storage"
"github.com/pkg/sftp"
"google.golang.org/api/option"
)
func GoogleCloudStorageHandler(ctx context.Context, bucketName string, opts ...option.ClientOption) (*sftp.Handlers, error) {
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("Storage Client Error: %s", err)
}
bucket := client.Bucket(bucketName)
handler := &gcsHandler{
client: client,
bucket: bucket,
}
return &sftp.Handlers{handler, handler, handler, handler}, nil
}
|
package pkg
// PkgTemplate is the common template to generate encoded spack
// package specs.
var PkgTemplate = "" +
`{{.BlockComment}}
from spack import *
class {{.Name}}({{.PackageType}}):
{{if .Description}}"""{{.Description}}"""{{end}}
{{if .Homepage}}homepage = "{{.Homepage}}"{{end}}
{{if gt (len .URL) 0}}url = "{{.URL}}"{{end}}
` + VersionTemplate + `
{{range $_, $entry := .Dependencies}} depends_on('{{$entry}}')
{{end}}
{{.BuildInstructions}}
`
// VersionTemplate is the defining template for how versions are
// written to generate an encoded spack package.
var VersionTemplate = "" +
`{{range $_, $entry := .Versions}} version('{{printVersion $entry}}'{{if $entry.Checksum}}, {{$entry.Checksum}}{{end}}{{if $entry.Branch}}, branch='{{$entry.Branch}}'{{end}}{{if $entry.Commit}}, commit='{{$entry.Commit}}'{{end}}{{if $entry.Submodules}}, submodules={{$entry.Submodules}}{{end}}{{if $entry.Expand}}, expand={{$entry.Expand}}{{end}}{{if $entry.URL}}, url='{{$entry.URL}}'{{end}}{{if $entry.Extension}}, extension='{{$entry.Extension}}'{{end}}{{if $entry.Tag}}, tag='{{$entry.Tag}}'{{end}})
{{end}}`
|
package pipelinetemplate
import (
"log"
"alauda.io/devops-apiserver/pkg/apis/devops/v1alpha1"
devopsclient "alauda.io/devops-apiserver/pkg/client/clientset/versioned"
"alauda.io/diablo/src/backend/api"
)
// PreviewOptions used for render jenkinsfile
type PreviewOptions struct {
Source *v1alpha1.PipelineSource `json:"source"`
Values map[string]string `json:"values"`
}
// RenderJenkinsfile render jenkinsfile
func RenderJenkinsfile(client devopsclient.Interface, namespace string, name string, options *PreviewOptions) (jenkinsfile string, err error) {
var source *v1alpha1.PipelineSource
if options != nil && options.Source != nil {
source = options.Source
}
opts := &v1alpha1.JenkinsfilePreviewOptions{
Source: source,
Values: options.Values,
}
log.Printf("Render jenkinsfile from PipelineTemplate namespace[%s], name[%s]", namespace, name)
result, err := client.DevopsV1alpha1().PipelineTemplates(namespace).Preview(name, opts)
return result.Jenkinsfile, err
}
// GetCategories collect all categories from PipelineTemplate and ClusterPipelineTemplate
func GetCategories(client devopsclient.Interface, namespace string) (categories map[string]PipelineTemplateCategory) {
categories = make(map[string]PipelineTemplateCategory)
clusterTemplates, err := client.DevopsV1alpha1().ClusterPipelineTemplates().List(api.ListEverything)
if err != nil {
return
}
for _, template := range clusterTemplates.Items {
putCategory(template.Labels, categories)
}
templates, err := client.DevopsV1alpha1().PipelineTemplates(namespace).List(api.ListEverything)
if err != nil {
return
}
for _, template := range templates.Items {
putCategory(template.Labels, categories)
}
return
}
func putCategory(labels map[string]string, categories map[string]PipelineTemplateCategory) {
if category, ok := labels["category"]; ok {
categories[category] = PipelineTemplateCategory{
Name: category,
}
}
}
|
package media
import (
"fmt"
)
type Multimedia interface {
Mostrar() string
}
type ContenidoWeb struct {
Multimedias []Multimedia
}
func (cw ContenidoWeb) Mostrar() {
for _,i:= range cw.Multimedias{
fmt.Println(i)
}
}
type Imagen struct {
Titulo string
Formato string
Canales string
}
func (i *Imagen) Mostrar() string {
return i.Titulo + "\n" + i.Formato + "\n" + i.Canales + "\n"
}
type Audio struct {
Titulo string
Formato string
Duracion string
}
func (a *Audio) Mostrar() string {
return a.Titulo + "\n" + a.Formato + "\n" + a.Duracion + "\n"
}
type Video struct {
Titulo string
Formato string
Frames string
}
func (v Video) Mostrar() string {
return v.Titulo + "\n" + v.Formato + "\n" + v.Frames + "\n"
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.