text stringlengths 11 4.05M |
|---|
package main
import "C"
func main() {}
//export Add
func Add(x int, y int) int {
return x + y
}
//export Subtract
func Subtract(x int, y int) int {
return x - y
}
|
/*
* Npcf_SMPolicyControl API
*
* Session Management Policy Control Service © 2019, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 1.0.4
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// RedirectAddressType - Possible values are - IPV4_ADDR: Indicates that the address type is in the form of \"dotted-decimal\" IPv4 address. - IPV6_ADDR: Indicates that the address type is in the form of IPv6 address. - URL: Indicates that the address type is in the form of Uniform Resource Locator. - SIP_URI: Indicates that the address type is in the form of SIP Uniform Resource Identifier.
type RedirectAddressType struct {
}
|
package http
import (
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
"strconv"
)
func TestGetPopularBoardList(t *testing.T) {
usecase := NewMockUsecase()
delivery := NewHTTPDelivery(usecase)
req, err := http.NewRequest("GET", "/v1/popular-boards", nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
r := http.NewServeMux()
r.HandleFunc("/v1/popular-boards", delivery.routePopularBoards)
r.ServeHTTP(rr, req)
if status := rr.Code; status != http.StatusOK {
t.Errorf("handler returned wrong status code: got %v want %v",
status, http.StatusOK)
}
responseMap := map[string]interface{}{}
json.Unmarshal(rr.Body.Bytes(), &responseMap)
t.Logf("got response %v", rr.Body.String())
responseData := responseMap["data"]
popularBoards := responseData.(map[string]interface{})["items"].([]interface{})
var prevNum int
for i := range popularBoards {
curr := popularBoards[i].(map[string]interface{})["number_of_user"]
currNum, err := strconv.Atoi(curr.(string))
if err != nil {
t.Fatalf("handler returned unexpected body, invalid number_of_user: got %v",
currNum)
}
if i > 0 && prevNum < currNum {
t.Fatalf("handler returned unexpected body, invalid order: got %v before %v",
prevNum, currNum)
}
prevNum = currNum
}
}
|
// Copyright 2014-2016 The Zurichess Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package engine
import . "bitbucket.org/zurichess/board"
// distance stores the number of king steps required
// to reach from one square to another on an empty board.
var distance [SquareArraySize][SquareArraySize]int32
var murmurSeed = [ColorArraySize]uint64{
0x77a166129ab66e91,
0x4f4863d5038ea3a3,
0xe14ec7e648a4068b,
}
// max returns maximum of a and b.
func max(a, b int32) int32 {
if a >= b {
return a
}
return b
}
// min returns minimum of a and b.
func min(a, b int32) int32 {
if a <= b {
return a
}
return b
}
// murmuxMix function mixes two integers k&h.
//
// murmurMix is based on MurmurHash2 https://sites.google.com/site/murmurhash/ which is on public domain.
func murmurMix(k, h uint64) uint64 {
h ^= k
h *= uint64(0xc6a4a7935bd1e995)
return h ^ (h >> uint(51))
}
func init() {
for i := SquareMinValue; i <= SquareMaxValue; i++ {
for j := SquareMinValue; j <= SquareMaxValue; j++ {
f, r := int32(i.File()-j.File()), int32(i.Rank()-j.Rank())
f, r = max(f, -f), max(r, -r) // absolute value
distance[i][j] = max(f, r)
}
}
}
|
/**
* @Author: DollarKiller
* @Description: 抽象工厂模式
* @Github: https://github.com/dollarkillerx
* @Date: Create in 20:59 2019-09-16
*/
package main
type token interface {
} |
package main
import (
"fmt"
"reflect"
"testing"
)
func Test_run(t *testing.T) {
type args struct {
n, q int
a, k []int
}
tests := []struct {
args args
want []int
}{
{
args{4, 3, []int{3,5,6,7}, []int{2,5,3}},
[]int{2,9,4},
},
{
args{5, 2, []int{1,2,3,4,5}, []int{1,10}},
[]int{6,15},
},
}
for i, tt := range tests {
t.Run(fmt.Sprintf("Test %d", i), func(t *testing.T) {
if got := run(tt.args.n, tt.args.q, tt.args.a, tt.args.k); !reflect.DeepEqual(got, tt.want) {
t.Errorf("run() = %v, want %v", got, tt.want)
}
})
}
}
|
// SPDX-License-Identifier: MIT
package core
import (
"errors"
"os"
"testing"
"github.com/issue9/assert/v3"
"github.com/caixw/apidoc/v7/internal/locale"
)
var _ error = &Error{}
func TestError(t *testing.T) {
a := assert.New(t, false)
err1 := NewError("msg")
err2 := NewError("msg").WithField("field")
a.NotEqual(err1.Error(), err2.Error())
}
func TestWithError(t *testing.T) {
a := assert.New(t, false)
err := errors.New("test")
serr := WithError(err).WithField("field")
a.Equal(serr.Err, err)
serr2 := WithError(serr).WithLocation(Location{URI: "uri"})
a.Equal(serr2.Err, err)
}
func TestError_AddTypes(t *testing.T) {
a := assert.New(t, false)
loc := Location{}
err := loc.WithError(errors.New("err1"))
err.AddTypes(ErrorTypeDeprecated)
a.Equal(err.Types, []ErrorType{ErrorTypeDeprecated})
err.AddTypes(ErrorTypeDeprecated)
a.Equal(err.Types, []ErrorType{ErrorTypeDeprecated})
err.AddTypes(ErrorTypeUnused)
a.Equal(err.Types, []ErrorType{ErrorTypeDeprecated, ErrorTypeUnused})
}
func TestError_Is_Unwrap(t *testing.T) {
a := assert.New(t, false)
err := WithError(os.ErrExist).WithField("field")
a.True(errors.Is(err, os.ErrExist))
a.Equal(errors.Unwrap(err), os.ErrExist)
}
func TestError_Relate(t *testing.T) {
a := assert.New(t, false)
err := NewError(locale.ErrInvalidUTF8Character)
a.Empty(err.Related)
err.Relate(Location{}, "msg")
a.Equal(1, len(err.Related)).Equal(err.Related[0].Message, "msg")
err.Relate(Location{}, "msg2")
a.Equal(2, len(err.Related)).Equal(err.Related[1].Message, "msg2")
}
|
package app
import (
"context"
"image"
"net/http"
"github.com/go-chi/chi"
"github.com/go-http-utils/etag"
"github.com/pkg/errors"
"go.uber.org/zap"
"github.com/ivanovaleksey/resizer/internal/pkg/cache"
"github.com/ivanovaleksey/resizer/internal/pkg/imagestore"
"github.com/ivanovaleksey/resizer/internal/pkg/resizer"
"github.com/ivanovaleksey/resizer/internal/pkg/singleflight"
)
type Application struct {
ctx context.Context
logger *zap.Logger
handler http.Handler
resizeService Resizer
}
type Resizer interface {
Resize(ctx context.Context, target string, params resizer.Params) (image.Image, error)
}
func NewApp(ctx context.Context, logger *zap.Logger) *Application {
return &Application{
ctx: ctx,
logger: logger,
}
}
func (a *Application) Handler() http.Handler {
return a.handler
}
func (a *Application) Init(cfg Config) error {
a.handler = chi.ServerBaseContext(a.ctx, a.initRouter())
imageProvider, err := a.initImageProvider(cfg)
if err != nil {
return errors.Wrap(err, "can't create image provider")
}
opts := []resizer.ServiceOption{
resizer.WithLogger(a.logger),
resizer.WithImageProvider(imageProvider),
resizer.WithImageResizer(resizer.NewResizer()),
}
service, err := resizer.NewService(opts...)
if err != nil {
return errors.Wrap(err, "can't create service")
}
a.resizeService = service
return nil
}
func (a *Application) initRouter() http.Handler {
r := chi.NewRouter()
r.Route("/image", func(r chi.Router) {
r.Get("/resize", etag.Handler(http.HandlerFunc(a.ResizeImage), false).ServeHTTP)
})
return r
}
func (a *Application) initImageProvider(cfg Config) (resizer.ImageProvider, error) {
imageCache, err := cache.NewCache()
if err != nil {
return nil, errors.Wrap(err, "can't create cache")
}
var imageProvider resizer.ImageProvider
switch cfg.ImageProvider {
case ImageProviderHTTP:
imageProvider = imagestore.NewHTTPStore()
case ImageProviderFile:
imageProvider = imagestore.NewFileStore()
default:
return nil, errors.New("unknown image provider")
}
opts := []singleflight.Option{
singleflight.WithLogger(a.logger),
singleflight.WithCacheProvider(imageCache),
singleflight.WithImageProvider(imageProvider),
}
return singleflight.NewSingleFlight(opts...), nil
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package faketime_test
import (
"testing"
"time"
"gvisor.dev/gvisor/pkg/tcpip/faketime"
)
func TestManualClockAdvance(t *testing.T) {
const timeout = time.Millisecond
clock := faketime.NewManualClock()
start := clock.NowMonotonic()
clock.Advance(timeout)
if got, want := clock.NowMonotonic().Sub(start), timeout; got != want {
t.Errorf("got = %d, want = %d", got, want)
}
}
func TestManualClockAfterFunc(t *testing.T) {
const (
timeout1 = time.Millisecond // timeout for counter1
timeout2 = 2 * time.Millisecond // timeout for counter2
)
tests := []struct {
name string
advance time.Duration
wantCounter1 int
wantCounter2 int
}{
{
name: "before timeout1",
advance: timeout1 - 1,
wantCounter1: 0,
wantCounter2: 0,
},
{
name: "timeout1",
advance: timeout1,
wantCounter1: 1,
wantCounter2: 0,
},
{
name: "timeout2",
advance: timeout2,
wantCounter1: 1,
wantCounter2: 1,
},
{
name: "after timeout2",
advance: timeout2 + 1,
wantCounter1: 1,
wantCounter2: 1,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
clock := faketime.NewManualClock()
counter1 := 0
counter2 := 0
clock.AfterFunc(timeout1, func() {
counter1++
})
clock.AfterFunc(timeout2, func() {
counter2++
})
start := clock.NowMonotonic()
clock.Advance(test.advance)
if got, want := counter1, test.wantCounter1; got != want {
t.Errorf("got counter1 = %d, want = %d", got, want)
}
if got, want := counter2, test.wantCounter2; got != want {
t.Errorf("got counter2 = %d, want = %d", got, want)
}
if got, want := clock.NowMonotonic().Sub(start), test.advance; got != want {
t.Errorf("got elapsed = %d, want = %d", got, want)
}
})
}
}
|
package command
import (
"strings"
"github.com/mitchellh/cli"
"github.com/pragkent/aliyun-disk/volume"
)
type GetVolumeNameCommand struct {
Meta
}
func (c *GetVolumeNameCommand) Run(args []string) int {
fs := c.Meta.FlagSet("get_volume_name")
if err := fs.Parse(args); err != nil {
return cli.RunResultHelp
}
if fs.NArg() < 1 {
return cli.RunResultHelp
}
options, err := volume.ParseOptions(fs.Arg(0))
if err != nil {
c.Meta.Ui.Output(jsonify(volume.NewDriverError(err)))
return 1
}
status := c.Meta.Driver.GetVolumeName(options)
c.Meta.Ui.Output(jsonify(status))
return 1
}
func (c *GetVolumeNameCommand) Synopsis() string {
return "Get volume name"
}
func (c *GetVolumeNameCommand) Help() string {
helpText := `
Usage: aliyun-disk getvolumename <json options>
Get volume name.
`
return strings.TrimSpace(helpText)
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package arc
import (
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"time"
"github.com/golang/protobuf/ptypes/empty"
"chromiumos/tast/common/perf"
"chromiumos/tast/common/perf/perfpb"
"chromiumos/tast/rpc"
"chromiumos/tast/services/cros/arc"
"chromiumos/tast/testing"
)
var (
defaultIterations = 5 // The number of boot iterations. Can be overridden by var "arc.PerfBoot.iterations".
)
func init() {
testing.AddTest(&testing.Test{
Func: PerfBoot,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Signs in to DUT and measures Android boot performance metrics",
Contacts: []string{
"cywang@chromium.org", // Original author.
"niwa@chromium.org", // Tast port author.
"arc-performance@google.com",
},
Attr: []string{"group:crosbolt", "crosbolt_perbuild", "crosbolt_arc_perf_qual"},
SoftwareDeps: []string{"chrome"},
ServiceDeps: []string{"tast.cros.arc.PerfBootService"},
Vars: []string{"arc.PerfBoot.iterations"},
Timeout: 25 * time.Minute,
Params: []testing.Param{{
ExtraSoftwareDeps: []string{"android_p"},
}, {
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
}},
})
}
func perfBootOnce(ctx context.Context, s *testing.State, saveDir string) *perfpb.Values {
d := s.DUT()
// Connect to the gRPC server on the DUT.
cl, err := rpc.Dial(ctx, d, s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(ctx)
service := arc.NewPerfBootServiceClient(cl.Conn)
if _, err := service.WaitUntilCPUCoolDown(ctx, &empty.Empty{}); err != nil {
s.Fatal("PerfBootService.WaitUntilCPUCoolDown returned an error: ", err)
}
s.Log("Rebooting DUT")
if err := d.Reboot(ctx); err != nil {
s.Fatal("Failed to reboot DUT: ", err)
}
// Need to reconnect to the gRPC server after rebooting DUT.
cl, err = rpc.Dial(ctx, d, s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(ctx)
service = arc.NewPerfBootServiceClient(cl.Conn)
res, err := service.GetPerfValues(ctx, &empty.Empty{})
if err != nil {
s.Fatal("PerfBootService.GetPerfValues returned an error: ", err)
}
// Save raw data for this iteration.
if err = os.Mkdir(saveDir, 0755); err != nil {
s.Fatalf("Failed to create path %s", saveDir)
}
p := perf.NewValuesFromProto(res)
if err = p.Save(saveDir); err != nil {
s.Fatal("Failed to save perf raw data: ", err)
}
return res
}
func PerfBoot(ctx context.Context, s *testing.State) {
iterations := defaultIterations
if iter, ok := s.Var("arc.PerfBoot.iterations"); ok {
if i, err := strconv.Atoi(iter); err == nil {
iterations = i
} else {
// User might want to override the default value of iterations but passed a malformed value. Fail the test to inform the user.
s.Fatal("Invalid arc.PerfBoot.iterations value: ", iter)
}
}
pv := perf.NewValues()
// Collect single metric values from the same metric to be aggregated into the final result.
singleMetrics := make(map[perf.Metric][]float64)
for i := 0; i < iterations; i++ {
// Run the boot test once.
saveDir := filepath.Join(s.OutDir(), fmt.Sprintf("raw.%03d", i+1))
res := perfBootOnce(ctx, s, saveDir)
for _, m := range res.Values {
metric := perf.Metric{
Name: m.Name,
Unit: m.Unit,
Direction: perf.Direction(m.Direction),
Multiple: m.Multiple,
}
if m.Multiple {
pv.Append(metric, m.Value...)
s.Logf("Logcat event entry: tag=%s unit=%s values=%v", m.Name, m.Unit, m.Value)
} else {
singleMetrics[metric] = append(singleMetrics[metric], m.Value...)
s.Logf("Logcat event entry: tag=%s unit=%s value=%f", m.Name, m.Unit, m.Value[0])
}
}
}
for k, values := range singleMetrics {
sum := 0.0
for _, v := range values {
sum += v
}
pv.Set(k, sum/float64(len(values)))
}
if err := pv.Save(s.OutDir()); err != nil {
s.Error("Failed saving perf data: ", err)
}
}
|
package hud
import (
"context"
"fmt"
"os"
"runtime"
"runtime/pprof"
"sync"
"time"
"github.com/gdamore/tcell"
"github.com/pkg/errors"
"github.com/tilt-dev/tilt/internal/analytics"
"github.com/tilt-dev/tilt/internal/hud/view"
"github.com/tilt-dev/tilt/internal/openurl"
"github.com/tilt-dev/tilt/internal/output"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/pkg/logger"
"github.com/tilt-dev/tilt/pkg/model"
)
// The main loop ensures the HUD updates at least this often
const DefaultRefreshInterval = 100 * time.Millisecond
// number of arrows a pgup/dn is equivalent to
// (we don't currently worry about trying to know how big a page is, and instead just support pgup/dn as "faster arrows"
const pgUpDownCount = 20
type HeadsUpDisplay interface {
store.Subscriber
Run(ctx context.Context, dispatch func(action store.Action), refreshRate time.Duration) error
}
type Hud struct {
r *Renderer
webURL model.WebURL
openurl openurl.OpenURL
currentView view.View
currentViewState view.ViewState
mu sync.RWMutex
isStarted bool
isRunning bool
a *analytics.TiltAnalytics
}
var _ HeadsUpDisplay = (*Hud)(nil)
func NewHud(renderer *Renderer, webURL model.WebURL, analytics *analytics.TiltAnalytics, openurl openurl.OpenURL) HeadsUpDisplay {
return &Hud{
r: renderer,
webURL: webURL,
a: analytics,
openurl: openurl,
}
}
func (h *Hud) SetNarrationMessage(ctx context.Context, msg string) {
h.mu.Lock()
defer h.mu.Unlock()
currentViewState := h.currentViewState
currentViewState.ShowNarration = true
currentViewState.NarrationMessage = msg
h.setViewState(ctx, currentViewState)
}
func (h *Hud) Run(ctx context.Context, dispatch func(action store.Action), refreshRate time.Duration) error {
// Redirect stdout and stderr into our logger
err := output.CaptureAllOutput(logger.Get(ctx).Writer(logger.InfoLvl))
if err != nil {
logger.Get(ctx).Infof("Error capturing stdout and stderr: %v", err)
}
h.mu.Lock()
h.isRunning = true
h.mu.Unlock()
defer func() {
h.mu.Lock()
h.isRunning = false
h.mu.Unlock()
}()
screenEvents, err := h.r.SetUp()
if err != nil {
return errors.Wrap(err, "setting up screen")
}
defer h.Close()
if refreshRate == 0 {
refreshRate = DefaultRefreshInterval
}
ticker := time.NewTicker(refreshRate)
for {
select {
case <-ctx.Done():
err := ctx.Err()
if err != context.Canceled {
return err
}
return nil
case e := <-screenEvents:
done := h.handleScreenEvent(ctx, dispatch, e)
if done {
return nil
}
case <-ticker.C:
h.Refresh(ctx)
}
}
}
func (h *Hud) Close() {
h.r.Reset()
}
func (h *Hud) recordInteraction(name string) {
h.a.Incr(fmt.Sprintf("ui.interactions.%s", name), map[string]string{})
}
func (h *Hud) handleScreenEvent(ctx context.Context, dispatch func(action store.Action), ev tcell.Event) (done bool) {
h.mu.Lock()
defer h.mu.Unlock()
escape := func() {
am := h.activeModal()
if am != nil {
am.Close(&h.currentViewState)
}
}
switch ev := ev.(type) {
case *tcell.EventKey:
switch ev.Key() {
case tcell.KeyEscape:
escape()
case tcell.KeyRune:
switch r := ev.Rune(); {
case r == 'b': // [B]rowser
// If we have an endpoint(s), open the first one
// TODO(nick): We might need some hints on what load balancer to
// open if we have multiple, or what path to default to on the opened manifest.
_, selected := h.selectedResource()
if len(selected.Endpoints) > 0 {
h.recordInteraction("open_preview")
err := h.openurl(selected.Endpoints[0], logger.Get(ctx).Writer(logger.InfoLvl))
if err != nil {
h.currentViewState.AlertMessage = fmt.Sprintf("error opening url '%s' for resource '%s': %v",
selected.Endpoints[0], selected.Name, err)
}
} else {
h.currentViewState.AlertMessage = fmt.Sprintf("no urls for resource '%s' ¯\\_(ツ)_/¯", selected.Name)
}
case r == 'l': // Tilt [L]og
if h.webURL.Empty() {
break
}
url := h.webURL
url.Path = "/"
_ = h.openurl(url.String(), logger.Get(ctx).Writer(logger.InfoLvl))
case r == 'k':
h.activeScroller().Up()
h.refreshSelectedIndex()
case r == 'j':
h.activeScroller().Down()
h.refreshSelectedIndex()
case r == 'q': // [Q]uit
escape()
case r == 'R': // hidden key for recovering from printf junk during demos
h.r.screen.Sync()
case r == 't': // [T]rigger resource update
_, selected := h.selectedResource()
h.recordInteraction("trigger_resource")
dispatch(store.AppendToTriggerQueueAction{Name: selected.Name, Reason: model.BuildReasonFlagTriggerHUD})
case r == 'x':
h.recordInteraction("cycle_view_log_state")
h.currentViewState.CycleViewLogState()
case r == '1':
h.recordInteraction("tab_all_log")
h.currentViewState.TabState = view.TabAllLog
case r == '2':
h.recordInteraction("tab_build_log")
h.currentViewState.TabState = view.TabBuildLog
case r == '3':
h.recordInteraction("tab_pod_log")
h.currentViewState.TabState = view.TabRuntimeLog
}
case tcell.KeyUp:
h.activeScroller().Up()
h.refreshSelectedIndex()
case tcell.KeyDown:
h.activeScroller().Down()
h.refreshSelectedIndex()
case tcell.KeyPgUp:
for i := 0; i < pgUpDownCount; i++ {
h.activeScroller().Up()
}
h.refreshSelectedIndex()
case tcell.KeyPgDn:
for i := 0; i < pgUpDownCount; i++ {
h.activeScroller().Down()
}
h.refreshSelectedIndex()
case tcell.KeyEnter:
if len(h.currentView.Resources) == 0 {
break
}
_, r := h.selectedResource()
if h.webURL.Empty() {
break
}
url := h.webURL
// If the cursor is in the default position (Tiltfile), open the All log.
if r.Name != MainTiltfileManifestName {
url.Path = fmt.Sprintf("/r/%s/", r.Name)
}
h.a.Incr("ui.interactions.open_log", nil)
_ = h.openurl(url.String(), logger.Get(ctx).Writer(logger.InfoLvl))
case tcell.KeyRight:
i, _ := h.selectedResource()
h.currentViewState.Resources[i].CollapseState = view.CollapseNo
case tcell.KeyLeft:
i, _ := h.selectedResource()
h.currentViewState.Resources[i].CollapseState = view.CollapseYes
case tcell.KeyHome:
h.activeScroller().Top()
case tcell.KeyEnd:
h.activeScroller().Bottom()
case tcell.KeyCtrlC:
h.Close()
dispatch(NewExitAction(nil))
return true
case tcell.KeyCtrlD:
dispatch(DumpEngineStateAction{})
case tcell.KeyCtrlO:
go writeHeapProfile(ctx)
}
case *tcell.EventResize:
// since we already refresh after the switch, don't need to do anything here
// just marking this as where sigwinch gets handled
}
h.refresh(ctx)
return false
}
func (h *Hud) isEnabled(st store.RStore) bool {
state := st.RLockState()
defer st.RUnlockState()
return state.TerminalMode == store.TerminalModeHUD
}
func (h *Hud) OnChange(ctx context.Context, st store.RStore, _ store.ChangeSummary) error {
if !h.isEnabled(st) {
return nil
}
if !h.isStarted {
h.isStarted = true
go func() {
err := h.Run(ctx, st.Dispatch, DefaultRefreshInterval)
if err != nil && err != context.Canceled {
st.Dispatch(store.PanicAction{Err: err})
}
}()
}
h.mu.Lock()
defer h.mu.Unlock()
toPrint := ""
state := st.RLockState()
view := StateToTerminalView(state, st.StateMutex())
// if the hud isn't running, make sure new logs are visible on stdout
if !h.isRunning {
toPrint = state.LogStore.ContinuingString(h.currentViewState.ProcessedLogs)
}
h.currentViewState.ProcessedLogs = state.LogStore.Checkpoint()
st.RUnlockState()
fmt.Print(toPrint)
// if we're going from 1 resource (i.e., the Tiltfile) to more than 1, reset
// the resource selection, so that we're not scrolled to the bottom with the Tiltfile selected
if len(h.currentView.Resources) == 1 && len(view.Resources) > 1 {
h.resetResourceSelection()
}
h.currentView = view
h.refreshSelectedIndex()
return nil
}
func (h *Hud) Refresh(ctx context.Context) {
h.mu.Lock()
defer h.mu.Unlock()
h.refresh(ctx)
}
// Must hold the lock
func (h *Hud) setViewState(ctx context.Context, currentViewState view.ViewState) {
h.currentViewState = currentViewState
h.refresh(ctx)
}
// Must hold the lock
func (h *Hud) refresh(ctx context.Context) {
// TODO: We don't handle the order of resources changing
for len(h.currentViewState.Resources) < len(h.currentView.Resources) {
h.currentViewState.Resources = append(h.currentViewState.Resources, view.ResourceViewState{})
}
vs := h.currentViewState
vs.Resources = append(vs.Resources, h.currentViewState.Resources...)
h.r.Render(h.currentView, h.currentViewState)
}
func (h *Hud) resetResourceSelection() {
rty := h.r.RTY()
if rty == nil {
return
}
// wipe out any scroll/selection state for resources
// it will get re-set in the next call to render
rty.RegisterElementScroll("resources", []string{})
}
func (h *Hud) refreshSelectedIndex() {
rty := h.r.RTY()
if rty == nil {
return
}
scroller := rty.ElementScroller("resources")
if scroller == nil {
return
}
i := scroller.GetSelectedIndex()
h.currentViewState.SelectedIndex = i
}
func (h *Hud) selectedResource() (i int, resource view.Resource) {
return selectedResource(h.currentView, h.currentViewState)
}
func selectedResource(view view.View, state view.ViewState) (i int, resource view.Resource) {
i = state.SelectedIndex
if i >= 0 && i < len(view.Resources) {
resource = view.Resources[i]
}
return i, resource
}
var _ store.Subscriber = &Hud{}
func writeHeapProfile(ctx context.Context) {
f, err := os.Create("tilt.heap_profile")
if err != nil {
logger.Get(ctx).Infof("error creating file for heap profile: %v", err)
return
}
runtime.GC()
logger.Get(ctx).Infof("writing heap profile to %s", f.Name())
err = pprof.WriteHeapProfile(f)
if err != nil {
logger.Get(ctx).Infof("error writing heap profile: %v", err)
return
}
err = f.Close()
if err != nil {
logger.Get(ctx).Infof("error closing file for heap profile: %v", err)
return
}
logger.Get(ctx).Infof("wrote heap profile to %s", f.Name())
}
|
package templates
import(
"fmt"
"bytes"
"text/template"
"io/ioutil"
"encoding/json"
"gopkg.in/yaml.v2"
"github.com/devandrewgeorge/config-generator/internal/pkg/errors"
)
func New(name string, data interface{}) (*Template, error) {
template := &Template{name: name}
switch data.(type) {
case nil:
case string:
converted := data.(string)
template.text = &converted
case map[string]interface{}:
converted := data.(map[string]interface{})
if filename, found := converted["file"]; found {
content, err := ioutil.ReadFile(filename.(string))
if err != nil { return nil, err }
temp := string(content)
template.text = &temp
break
} else if _, found := converted["keys"]; found {
template.templates = map[string]*Template{}
for k, v := range converted["keys"].(map[string]interface{}) {
child, err := New(k, v)
if err != nil { return nil, err }
template.templates[k] = child
}
} else {
return nil, &errors.TemplateError{}
}
default:
return nil, &errors.TemplateError{}
}
return template, nil
}
type parse_func func(string) (interface{}, error)
type Template struct {
name string
text *string
templates map[string]*Template
}
func (t *Template) IsNested() bool {
return t.templates != nil
}
func (t *Template) Render(variables map[string]string) (string, error) {
if t.IsNested() {
return "", &errors.TemplateError{}
}
if t.text == nil {
return "", nil
}
renderer := template.New(t.name)
renderer.Option("missingkey=error")
if _, err := renderer.Parse(*t.text); err != nil {
return "", err
}
result := &bytes.Buffer{}
if variables == nil {
variables = map[string]string{}
}
if err := renderer.Execute(result, variables); err != nil {
return "", err
}
return result.String(), nil
}
func (t *Template) parse_json(raw string) (interface{}, error) {
var i interface{}
err := json.Unmarshal([]byte(raw), &i)
if err != nil {
str := new(string)
err = json.Unmarshal([]byte(fmt.Sprintf("\"%s\"", raw)), str)
if err != nil { return nil, err }
return str, nil
}
return i, nil
}
func (t *Template) parse_yaml(raw string) (interface{}, error) {
var i interface{}
err := yaml.Unmarshal([]byte(raw), &i)
return i, err
}
func (t *Template) parse(rendered map[string]interface{}, parser parse_func) (map[string]interface{}, error) {
var err error
parsed := map[string]interface{}{}
for key, child := range rendered {
raw, isChild := child.(string)
if isChild {
parsed[key], err = parser(raw)
if err != nil { return nil, err }
} else {
parsed[key], err = t.parse(child.(map[string]interface{}), parser)
if err != nil { return nil, err }
}
}
return parsed, nil
}
func (t *Template) RenderYaml(variables map[string]string) (string, error) {
if !t.IsNested() { return "", &errors.TemplateError{} }
rendered, render_error := t.RenderMap(variables)
if render_error != nil { return "", render_error }
parsed, parse_error := t.parse(rendered, t.parse_yaml)
if parse_error != nil { return "", parse_error }
encoded, encoding_error := yaml.Marshal(parsed)
if encoding_error != nil { return "", encoding_error }
return string(encoded), nil
}
func (t *Template) RenderJson(variables map[string]string) (string, error) {
if !t.IsNested() { return "", &errors.TemplateError{} }
rendered, render_error := t.RenderMap(variables)
if render_error != nil { return "", render_error }
parsed, parse_error := t.parse(rendered, t.parse_json)
if parse_error != nil { return "", parse_error }
encoded, encoding_error := json.MarshalIndent(parsed, "", " ")
if encoding_error != nil { return "", encoding_error }
return string(encoded), nil
}
func (t *Template) RenderMap(variables map[string]string) (map[string]interface{}, error) {
if !t.IsNested() { return nil, &errors.TemplateError{} }
if len(t.templates) == 0 { return map[string]interface{}{}, nil }
rendered := map[string]interface{}{}
for key, child := range t.templates {
var err error
if child.IsNested() {
rendered[key], err = child.RenderMap(variables)
if err != nil { return nil, err }
} else {
rendered[key], err = child.Render(variables)
if err != nil { return nil, err }
}
}
return rendered, nil
}
func (t *Template) Equal(o *Template) bool {
if t.name != o.name { return false }
if t.text != nil || o.text != nil {
if t.text == nil || o.text == nil {
return false
} else if *t.text != *o.text {
return false
}
}
if len(t.templates) != len(o.templates) { return false }
for k, t_child := range t.templates {
o_child, found := o.templates[k]
if !found || !t_child.Equal(o_child) { return false }
}
return true
}
|
package sizeof
import (
"testing"
)
type (
testGood struct {
ID string
Val int32
Val1 int32
Val2 int16
test1 uint16
test2 uint16
test bool
}
testBad struct {
test bool
test1 uint16
test2 uint16
Val1 int32
Val2 int16
ID string
Val int32
}
test1 struct {
testGood
d *int
f bool
}
test2 struct {
*testGood
f int
a interface{}
}
myStruct struct {
myBool bool // 1 byte
myFloat float64 // 8 bytes
myInt int32 // 4 bytes
Int int16 // 2 bytes
}
myStructOptimized1 struct {
myFloat float64 // 8 bytes
myInt int32 // 4 bytes
Int int16 // 2 bytes
myBool bool // 1 byte
}
myStructOptimized2 struct {
myFloat float64 // 8 bytes
myInt int32 // 4 bytes
myBool bool // 1 byte
Int int16 // 2 bytes
}
)
var result interface{}
func BenchmarkVisualizeStruct(b *testing.B) {
var s string
for i := 0; i < b.N; i++ {
s = VisualizeStruct(testGood{})
}
result = s
}
|
package bitcoin
import (
. "ftnox.com/common"
"ftnox.com/db"
"time"
"strings"
"errors"
"database/sql"
)
//////////// MPK
//////////// MPK
type MPK struct {
Id int64 `json:"id" db:"id,autoinc"`
PubKey string `json:"pubkey" db:"pubkey"`
Chain string `json:"chain" db:"chain"`
}
var MPKModel = db.GetModelInfo(new(MPK))
func SaveMPK(mpk *MPK) (*MPK) {
err := db.QueryRow(
`INSERT INTO mpk (`+MPKModel.FieldsInsert+`)
VALUES (`+MPKModel.Placeholders+`)
RETURNING id`,
mpk,
).Scan(&mpk.Id)
if err != nil { panic(err) }
return mpk
}
func SaveMPKIfNotExists(mpk *MPK) (*MPK) {
// Insert MPK if doesn't exist.
mpk_ := LoadMPKByPubKey(mpk.PubKey)
if mpk_ != nil {
if mpk_.Chain != mpk.Chain {
panic(errors.New("Loaded account MPK but chain did not match"))
} else {
return mpk_
}
}
return SaveMPK(mpk)
}
func LoadMPK(mpkId int64) (*MPK) {
var mpk MPK
err := db.QueryRow(
`SELECT `+MPKModel.FieldsSimple+`
FROM mpk WHERE id=?`,
mpkId,
).Scan(&mpk)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &mpk
default:
panic(err)
}
}
func LoadMPKByPubKey(pubKey string) (*MPK) {
var mpk MPK
err := db.QueryRow(
`SELECT `+MPKModel.FieldsSimple+`
FROM mpk WHERE pubkey=?`,
pubKey,
).Scan(&mpk)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &mpk
default:
panic(err)
}
}
//////////// ADDRESS
//////////// ADDRESS
// ChainPath starting with 1/... is a change address.
type Address struct {
Address string `json:"address" db:"address"`
Coin string `json:"coin" db:"coin"`
UserId int64 `json:"userId" db:"user_id"`
Wallet string `json:"wallet" db:"wallet"`
MPKId int64 `json:"mpkId" db:"mpk_id"`
ChainPath string `json:"chainPath" db:"chain_path"`
ChainIdx int32 `json:"chainIdx" db:"chain_idx"`
Time int64 `json:"time" db:"time"`
}
var AddressModel = db.GetModelInfo(new(Address))
func SaveAddress(addr *Address) (*Address, error) {
_, err := db.Exec(
`INSERT INTO address (`+AddressModel.FieldsInsert+`)
VALUES (`+AddressModel.Placeholders+`)`,
addr,
)
return addr, err
}
func LoadAddress(address string) (*Address) {
var addr Address
err := db.QueryRow(
`SELECT `+AddressModel.FieldsSimple+`
FROM address
WHERE address=?`,
address,
).Scan(&addr)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &addr
default:
panic(err)
}
}
func LoadAddressesByWallet(userId int64, wallet string) ([]*Address) {
rows, err := db.QueryAll(Address{},
`SELECT `+AddressModel.FieldsSimple+`
FROM address
WHERE user_id=? AND wallet=?`,
userId, wallet,
)
if err != nil { panic(err) }
return rows.([]*Address)
}
func LoadLastAddressByWallet(userId int64, wallet string, coin string) (*Address) {
var addr Address
err := db.QueryRow(
`SELECT `+AddressModel.FieldsSimple+`
FROM address
WHERE user_id=? AND wallet=? AND coin=?
ORDER BY chain_idx DESC LIMIT 1`,
userId, wallet, coin,
).Scan(&addr)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &addr
default:
panic(err)
}
}
func LoadKnownAddresses(addrStrs []string) []*Address {
if len(addrStrs) == 0 { return nil }
var addrStrs_i []interface{}
for _, addrStr := range addrStrs { addrStrs_i = append(addrStrs_i, addrStr) }
// TODO: consider limitations on placeholder count. 65536?
addrsPH := "?" + strings.Repeat(",?", len(addrStrs)-1)
rows, err := db.QueryAll(Address{},
`SELECT `+AddressModel.FieldsSimple+`
FROM address WHERE address in (`+addrsPH+`)`,
addrStrs_i...,
)
if err != nil { panic(err) }
return rows.([]*Address)
}
func LoadAddressesByMPK(mpkId int64) []*Address {
rows, err := db.QueryAll(Address{},
`SELECT `+AddressModel.FieldsSimple+`
FROM address WHERE mpk_id=?
ORDER BY (chain_path, chain_idx) ASC`,
mpkId,
)
if err != nil { panic(err) }
return rows.([]*Address)
}
func GetMaxAddressIndex(coin string, mpkId int64, chainPath string) int32 {
var countNull sql.NullInt64
err := db.QueryRow(
`SELECT max(chain_idx)
FROM address
WHERE coin=? AND mpk_id=? AND chain_path=?`,
coin, mpkId, chainPath,
).Scan(&countNull)
if err != nil { panic(err) }
return int32(countNull.Int64)
}
// Keeps trying until one is created in the path.
// The resulting address's path will be 'chainpath/x'
// where 'x' is the smallest nonnegative integer.
func CreateNewAddress(coin string, userId int64, wallet string, mpk *MPK, chainPath string) *Address {
now := time.Now().Unix()
index := GetMaxAddressIndex(coin, mpk.Id, chainPath)
for {
index += 1
address := ComputeAddress(coin, mpk.PubKey, mpk.Chain, chainPath, index)
addr := &Address{address, coin, userId, wallet, mpk.Id, chainPath, index, now}
addr, err := SaveAddress(addr)
Info("[%v] Created new address: wallet:%v/%v mpkId:%v chainPath:%v/%v",
coin, userId, wallet, mpk.Id, chainPath, index)
switch db.GetErrorType(err) {
case db.ERR_DUPLICATE_ENTRY:
continue
case nil:
return addr
default:
panic(err)
}
}
}
//////////// PAYMENT
//////////// PAYMENT
type Payment struct {
Id int64 `json:"-" db:"id,autoinc"`
Coin string `json:"coin" db:"coin"`
TxId string `json:"txid" db:"tx_id"`
Vout uint32 `json:"vout" db:"vout"`
Blockhash string `json:"blockhash" db:"blockhash"`
Blockheight uint32 `json:"blockheight" db:"blockheight"`
Address string `json:"address" db:"address"`
Amount uint64 `json:"amount" db:"amount"`
ScriptPK string `json:"scriptPk" db:"script_pk"`
MPKId int64 `json:"mpkId" db:"mpk_id"`
Spent uint32 `json:"-" db:"spent"`
WTxId int64 `json:"-" db:"wtx_id"`
Orphaned uint32 `json:"orphaned" db:"orphaned"`
Confirms uint32 `json:"confirms"`
Time int64 `json:"time" db:"time"`
Updated int64 `json:"updated" db:"updated"`
}
var PaymentModel = db.GetModelInfo(new(Payment))
const (
PAYMENT_ORPHANED_STATUS_GOOD = 0
PAYMENT_ORPHANED_STATUS_ORPHANED = 1
PAYMENT_SPENT_STATUS_AVAILABLE = 0
PAYMENT_SPENT_STATUS_CHECKEDOUT = 1
PAYMENT_SPENT_STATUS_SPENT = 2
)
func SavePayment(c db.MConn, p *Payment) (*Payment, error) {
if p.Time == 0 { p.Time = time.Now().Unix() }
err := c.QueryRow(
`INSERT INTO payment (`+PaymentModel.FieldsInsert+`)
VALUES (`+PaymentModel.Placeholders+`)
RETURNING id`,
p,
).Scan(&p.Id)
return p, err
}
func UpdatePayment(c db.MConn, p *Payment) {
p.Updated = time.Now().Unix()
_, err := c.Exec(
`UPDATE payment SET blockhash=?, blockheight=?, orphaned=?, time=?, updated=? WHERE tx_id=? AND vout=?`,
p.Blockhash, p.Blockheight, p.Orphaned, p.Time, p.Updated, p.TxId, p.Vout,
)
if err != nil { panic(err) }
}
func UpdatePaymentsSpent(tx *db.ModelTx, paymentIds []interface{}, oldStatus, newStatus int, wtxId int64) {
if len(paymentIds) == 0 { return }
now := time.Now().Unix()
res, err := tx.Exec(
`UPDATE payment
SET spent=?, wtx_id=?, updated=?
WHERE spent=? AND id IN (`+Placeholders(len(paymentIds))+`)`,
append([]interface{}{newStatus, wtxId, now, oldStatus}, paymentIds...)...,
)
if err != nil { panic(err) }
count, err := res.RowsAffected()
if int(count) != len(paymentIds) {
panic(NewError("Unexpected affected rows count: %v Expected %v", count, len(paymentIds)))
}
if err != nil { panic(err) }
}
func LoadPaymentByTxId(txId string, vout uint32) *Payment {
var payment Payment
err := db.QueryRow(
`SELECT `+PaymentModel.FieldsSimple+` FROM payment
WHERE tx_id=? AND vout=?`,
txId, vout,
).Scan(&payment)
switch err {
case sql.ErrNoRows: return nil
case nil: return &payment
default: panic(err)
}
}
func LoadPayments(limit uint) []*Payment {
rows, err := db.QueryAll(Payment{},
`SELECT `+PaymentModel.FieldsSimple+` FROM payment
ORDER BY id DESC LIMIT ?`,
limit,
)
if err != nil { panic(err) }
return rows.([]*Payment)
}
// Loads payments associated with a given block(hash),
// regardless of orphaned/spent status.
func LoadPaymentsByBlockhash(blockhash string) []*Payment {
rows, err := db.QueryAll(Payment{},
`SELECT `+PaymentModel.FieldsSimple+` FROM payment
WHERE blockhash=?`,
blockhash,
)
if err != nil { panic(err) }
return rows.([]*Payment)
}
func LoadSpendablePaymentsByAmount(mpkId int64, coin string, min, max uint64, reqHeight uint32, limit uint) []*Payment {
rows, err := db.QueryAll(Payment{},
`SELECT `+PaymentModel.FieldsSimple+` FROM payment
WHERE mpkId=? AND coin=? AND spent=0 AND orphaned=0 AND ?<=amount AND amount<=? AND blockheight>0 AND blockheight<=?
ORDER BY amount ASC LIMIT ?`,
mpkId, coin, min, max, reqHeight, limit,
)
if err != nil { panic(err) }
return rows.([]*Payment)
}
func LoadLargestSpendablePaymentLessThan(mpkId int64, coin string, amount uint64, reqHeight uint32, exclude []*Payment) *Payment {
excludeIds := Map(exclude, "Id")
if len(excludeIds) == 0 { excludeIds = []interface{}{-1} } // hack
var payment Payment
err := db.QueryRow(
`SELECT `+PaymentModel.FieldsSimple+`
FROM payment WHERE
mpk_id=? AND coin=? AND spent=0 AND orphaned=0 AND amount<=? AND blockheight>0 AND blockheight<=?
AND id NOT IN (`+Placeholders(len(excludeIds))+`)`,
append([]interface{}{mpkId, coin, amount, reqHeight}, excludeIds...)...).Scan(&payment)
switch err {
case sql.ErrNoRows: return nil
case nil: return &payment
default: panic(err)
}
}
func LoadSmallestSpendablePaymentGreaterThan(mpkId int64, coin string, amount uint64, reqHeight uint32, exclude []*Payment) *Payment {
excludeIds := Map(exclude, "Id")
if len(excludeIds) == 0 { excludeIds = []interface{}{-1} } // hack
var payment Payment
err := db.QueryRow(
`SELECT `+PaymentModel.FieldsSimple+`
FROM payment WHERE
mpk_id=? AND coin=? AND spent=0 AND orphaned=0 AND amount>=? AND blockheight>0 AND blockheight<=?
AND id NOT IN (`+Placeholders(len(excludeIds))+`)`,
append([]interface{}{mpkId, coin, amount, reqHeight}, excludeIds...)...).Scan(&payment)
switch err {
case sql.ErrNoRows: return nil
case nil: return &payment
default: panic(err)
}
return &payment
}
func LoadOldestSpendablePaymentsBetween(mpkId int64, coin string, min, max uint64, limit int, reqHeight uint32) []*Payment {
rows, err := db.QueryAll(Payment{},
`SELECT `+PaymentModel.FieldsSimple+`
FROM payment
WHERE mpk_id=? AND coin=? AND spent=0 AND orphaned=0 AND ?<=amount AND amount<=? AND blockheight>0 AND blockheight<=?
ORDER BY id ASC LIMIT ?`,
mpkId, coin, min, max, reqHeight, limit,
)
if err != nil { panic(err) }
return rows.([]*Payment)
}
// NOTE: only use this for gathering statistical data. Try really hard not to introduce randomness into the system, e.g. sweep transactions.
func LoadRandomSpendablePaymentsBetween(mpkId int64, coin string, min, max uint64, limit int, reqHeight uint32) []*Payment {
rows, err := db.QueryAll(Payment{},
`SELECT `+PaymentModel.FieldsSimple+`
FROM payment
WHERE mpk_id=? AND coin=? AND spent=0 AND orphaned=0 AND ?<=amount AND amount<=? AND blockheight>0 AND blockheight<=?
ORDER BY random() LIMIT ?`,
mpkId, coin, min, max, reqHeight, limit,
)
if err != nil { panic(err) }
return rows.([]*Payment)
}
//////////// BLOCK
//////////// BLOCK
type Block struct {
Coin string `db:"coin"`
Height uint32 `db:"height"`
Hash string `db:"hash"`
Status uint32 `db:"status"`
Time int64 `db:"time"`
Updated int64 `db:"updated"`
}
var BlockModel = db.GetModelInfo(new(Block))
const (
BLOCK_STATUS_GOOD = 0 // all payments are good.
BLOCK_STATUS_PROCESSING = 1 // was transistioning from GOOD -> ORPHANED or ORPHANED -> GOOD.
BLOCK_STATUS_ORPHANED = 2 // all payments are orphaned.
BLOCK_STATUS_GOOD_CREDITED = 10 // block is good and deposits were credited.
)
func SaveBlock(b *Block) *Block {
if b.Time == 0 { b.Time = time.Now().Unix() }
_, err := db.Exec(
`INSERT INTO block (`+BlockModel.FieldsInsert+`)
VALUES (`+BlockModel.Placeholders+`)`,
b,
)
if err != nil { panic(err) }
return b
}
func LoadBlock(hash string) *Block {
var block Block
err := db.QueryRow(
`SELECT `+BlockModel.FieldsSimple+`
FROM block WHERE hash=?`,
hash,
).Scan(&block)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &block
default:
panic(err)
}
}
func LoadBlockAtHeight(coin string, height uint32) *Block {
var block Block
err := db.QueryRow(
`SELECT `+BlockModel.FieldsSimple+`
FROM block WHERE coin=? AND height=? AND (status=0 OR status=1 OR status=10)`,
coin, height,
).Scan(&block)
switch db.GetErrorType(err) {
case sql.ErrNoRows:
return nil
case nil:
return &block
default:
panic(err)
}
}
func LoadLastBlocks(coin string, n uint32) []*Block {
rows, err := db.QueryAll(Block{},
`SELECT `+BlockModel.FieldsSimple+` FROM block
WHERE coin=? AND (status=0 OR status=1 OR status=10)
ORDER BY height DESC LIMIT ?`,
coin, n,
)
if err != nil { panic(err) }
return rows.([]*Block)
}
func UpdateBlockStatus(hash string, oldStatus, newStatus uint32) {
now := time.Now().Unix()
res, err := db.Exec(
`UPDATE block
SET status=?, updated=?
WHERE status=? AND hash=?`,
newStatus, now, oldStatus, hash,
)
if err != nil { panic(err) }
count, err := res.RowsAffected()
if int(count) != 1 {
panic(NewError("Expected to update 1 block's status, but none changed"))
}
if err != nil { panic(err) }
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Package vendorutils contains utils to fetch OEM info.
package vendorutils
import (
"context"
"os"
"strings"
"chromiumos/tast/errors"
"chromiumos/tast/local/crosconfig"
)
// FetchVendor returns vendor name using new CrOSConfig based approach and deprecated sysfs approach as a backup.
func FetchVendor(ctx context.Context) (string, error) {
if got, err := crosconfig.Get(ctx, "/branding", "oem-name"); err != nil && !crosconfig.IsNotFound(err) {
return "", errors.Wrap(err, "failed to get OEM name from CrOSConfig")
} else if err == nil {
return got, nil
}
if got, err := os.ReadFile("/sys/firmware/vpd/ro/oem_name"); err != nil && !os.IsNotExist(err) {
return "", errors.Wrap(err, "failed to get OEM name from VPD field")
} else if err == nil {
return string(got), nil
}
vendorBytes, err := os.ReadFile("/sys/devices/virtual/dmi/id/sys_vendor")
if err != nil {
return "", errors.Wrap(err, "failed to read vendor name")
}
vendor := strings.TrimSpace(string(vendorBytes))
return vendor, nil
}
|
package main
import (
"context"
"log"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
pb "github.com/willdot/grpccontext/server/proto"
)
const (
address = "localhost:50051"
)
func main() {
// Set up a connection to the server.
conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock(), grpc.WithUnaryInterceptor(clientInterceptor))
if err != nil {
log.Fatalf("failed to connect: %v", err)
}
defer conn.Close()
c := pb.NewTestClient(conn)
// make gRPC request to server to DoSomething
response, err := c.DoSomething(context.Background(), &pb.Input{Id: 1})
if err != nil {
log.Fatalf("failed to DoSomething: %v", err)
}
log.Printf("response: %s", response.GetResult())
// make gRPC request to server to RunLongTask
_, err = c.RunLongTask(context.Background(), &pb.Input{Id: 2})
if err != nil {
log.Fatalf("failed to RunLongTask: %v", err)
}
}
func clientInterceptor(ctx context.Context, method string, req interface{}, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
// create a context with a timeout that can be used to cancel the gRPC request
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
defer cancel()
// create the headers
headers := metadata.Pairs(
"source", "client service")
// add the headers to the context
ctx = metadata.NewOutgoingContext(ctx, headers)
// make request
err := invoker(ctx, method, req, reply, cc, opts...)
if err != nil {
return err
}
return nil
}
|
package services
import (
"database/sql"
"io/ioutil"
"os"
"strings"
"github.com/gocarina/gocsv"
)
const (
allEntries = "SELCET entryid, firstname, lastname, email, phone FROM entries;"
entryByID = "SELECT entryid, firstname, lastname, email, phone FROM entries WHERE entryid=%1;"
createEntry = "INSERT INTO entries (firstname, lastname, email, phone) VALUES (%2, %3, %4, %5) RETURNING entryid;"
deleteEntry = "DELETE FROM entries WHERE entryid = %1"
updateEntry = "UPDATE entries SET firstname=%2, lastname=%3, email=%4, phone=%5 WHERE entryid=%1"
checkIfEmailExists = "SELECT entryid, email FROM entries WHERE email=$1"
)
//InitDB takes in a SQL object for package to use
func InitDB(database *sql.DB) *PSQLService {
db := database
return &PSQLService{
DB: db,
}
}
//AllEntries returns a list of all contact entries
func (p *PSQLService) AllEntries() ([]*Entry, error) {
all := []*Entry{}
rows, err := p.DB.Query(allEntries)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
newEntry := &Entry{}
err = rows.Scan(&newEntry.ID, &newEntry.FirstName, &newEntry.LastName, &newEntry.Email, &newEntry.Phone)
if err != nil {
return nil, err
}
all = append(all, newEntry)
}
return all, err
}
//EntryByID returns a single entry by id
func (p *PSQLService) EntryByID(id string) (*Entry, error) {
entry := &Entry{}
if id == "" {
return nil, errNoID
}
row := p.DB.QueryRow(entryByID, id)
if err := row.Scan(&entry.ID, &entry.FirstName, &entry.LastName, &entry.Email, &entry.Phone); err != nil {
return nil, err
}
return entry, nil
}
func (p *PSQLService) checkIfEmailExists(entry *Entry) error {
// selector := Entry{
// Email: entry.Email,
// }
result := &Entry{}
row := p.DB.QueryRow(checkIfEmailExists, entry.Email)
if err := row.Scan(&result.ID, &result.Email); err == nil && result.ID != entry.ID {
return errEmailExists
}
return nil
}
//AddEntry takes an entry and inserts it into the DB generating an ID
func (p *PSQLService) AddEntry(entry *Entry) error {
entry.Email = strings.ToLower(entry.Email)
result := p.checkIfEmailExists(entry)
if result != nil {
return result
}
_, err := p.DB.Exec(createEntry, entry.ID, entry.FirstName, entry.LastName, entry.Email, entry.Phone)
return err
}
//UpdateEntry will replace an existing entry with new values
func (p *PSQLService) UpdateEntry(entry *Entry) error {
entry.Email = strings.ToLower(entry.Email)
result := p.checkIfEmailExists(entry)
if result != nil {
return result
}
_, err := p.DB.Exec(updateEntry, entry.ID, entry.FirstName, entry.LastName, entry.Email, entry.Phone)
return err
}
//DeleteEntry will delete a row with given id
func (p *PSQLService) DeleteEntry(id string) error {
_, err := p.DB.Exec(deleteEntry, id)
return err
}
//EntriesToCSV will get all entries and write them to a CSV file
func (p *PSQLService) EntriesToCSV() (*os.File, error) {
entries := []*Entry{}
rows, err := p.DB.Query(allEntries)
if err != nil {
return nil, err
}
defer rows.Close()
for rows.Next() {
newEntry := &Entry{}
err = rows.Scan(&newEntry.ID, &newEntry.FirstName, &newEntry.LastName, &newEntry.Email, &newEntry.Phone)
if err != nil {
return nil, err
}
entries = append(entries, newEntry)
}
entriesFile, err := ioutil.TempFile(os.TempDir(), "tmp.*.csv")
if err != nil {
return nil, err
}
err = gocsv.MarshalFile(&entries, entriesFile)
if err != nil {
return nil, err
}
return entriesFile, nil
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cpuid
import (
"io/ioutil"
"os"
"regexp"
"strings"
"testing"
"gvisor.dev/gvisor/pkg/hostos"
)
// TestHostFeatureFlags tests that all features detected by HostFeatureSet are
// on the host.
//
// It does *not* verify that all features reported by the host are detected by
// HostFeatureSet. Linux has synthetic Linux-specific features that have no
// analog in the actual CPUID feature set.
func TestHostFeatureFlags(t *testing.T) {
// Extract the kernel version.
version, err := hostos.KernelVersion()
if err != nil {
t.Fatalf("Unable to parse kernel version: %v", err)
}
// Extract all cpuinfo flags.
cpuinfoBytes, _ := ioutil.ReadFile("/proc/cpuinfo")
cpuinfo := string(cpuinfoBytes)
re := regexp.MustCompile(`(?m)^flags\s+: (.*)$`)
m := re.FindStringSubmatch(cpuinfo)
if len(m) != 2 {
t.Fatalf("Unable to extract flags from %q", cpuinfo)
}
cpuinfoFlags := make(map[string]struct{})
for _, f := range strings.Split(m[1], " ") {
cpuinfoFlags[f] = struct{}{}
}
// Check against host flags.
fs := HostFeatureSet()
for feature, info := range allFeatures {
// Special cases not consistently visible. We don't mind if
// they are exposed in earlier versions.
if archSkipFeature(feature, version) {
continue
}
// Check against the flags.
_, ok := cpuinfoFlags[feature.String()]
if !info.shouldAppear && ok {
t.Errorf("Unexpected flag: %v", feature)
} else if info.shouldAppear && fs.HasFeature(feature) && !ok {
t.Errorf("Missing flag: %v", feature)
}
}
}
func TestMain(m *testing.M) {
Initialize()
os.Exit(m.Run())
}
|
package products
import (
"distribution-bridge/env"
"distribution-bridge/http"
"distribution-bridge/logger"
"encoding/json"
"errors"
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"time"
)
// Sync products from seller account to buyer account.
func SyncProducts() {
page := 0
allProductsFound := false
productCount := 0
// Fetch all products from seller accounts
for !allProductsFound {
products, err := getProductsFromAPI(page, env.GetBuyerAPIKey())
if err != nil {
logger.Error(fmt.Sprintf("failed to get products on page %d", page), err)
return
}
productCount = productCount + len(products)
if len(products) == 0 {
logger.Info(fmt.Sprintf("All products have been found [%d]", productCount))
allProductsFound = true
continue
}
// For each product, it's consider to be new or exist on the buyer account
for _, product := range products {
sellerProduct, exists, err := getProductFromAPIUsingCode(product.Code, env.GetSellerAPIKey())
if err != nil {
logger.Error(fmt.Sprintf("failed to get product [%s]", product.ID), err)
return
}
// Apply PIM updates (This would be any configured overwrites that have been setup)
// Not built :: Ex. Loops through Google sheet and when product code = product.Code then updates with columns for corresponding data. Or PIM provider, we make an outbound call to them and they return the updated product
if exists {
logger.Info(fmt.Sprintf("Product [%s] exists and checking for updates.", product.Code))
// Check changes, then update
err := productsMatch(product, sellerProduct)
if err == nil {
logger.Info(fmt.Sprintf("Products match between %s and %s", product.ID, sellerProduct.ID))
continue
} else {
logger.Info(fmt.Sprintf("Products did not match between %s and %s b/c %+v", product.ID, sellerProduct.ID, err))
// Temp save seller product ID
sellerProductID := sellerProduct.ID
sellerProduct = product
sellerProduct.ID = sellerProductID
// Mark updated product as inactive
if env.ProductUpdatesToInActive() {
sellerProduct.Active = false
}
err = updateProductOnAPI(env.GetSellerAPIKey(), sellerProduct)
if err != nil {
logger.Error(fmt.Sprintf("failed to update the product on seller account (Existing) :: %s", sellerProduct.ID), err)
}
}
} else {
logger.Info(fmt.Sprintf("Product [%s] does not exist and creating new instance.", product.Code))
// Create new product on buyer account
productID, err := createProductOnAPI(product, env.GetSellerAPIKey())
if err != nil {
logger.Error(fmt.Sprintf("failed to create new product on seller account :: Seller Product ID [%s]", product.ID), err)
// Not supported but push error to the seller product
return
}
logger.Info(fmt.Sprintf("New product created on buyer account :: %s --> %s", product.ID, productID))
// Mark new product as inactive
if env.NewProductToInActive() {
product.Active = false
sellerProduct, _, err = getProductFromAPIUsingCode(product.Code, env.GetSellerAPIKey())
if err != nil {
logger.Error(fmt.Sprintf("failed to get product for seller [%s]", product.ID), err)
return
}
err := updateProductOnAPI(env.GetSellerAPIKey(), sellerProduct)
logger.Error(fmt.Sprintf("failed to mark product as inactive on seller account (New) :: %s", sellerProduct.ID), err)
// Not supported but push error to the buyer and seller product
return
}
}
time.Sleep(time.Second * 1) // Max of 4 API calls in this block, so this is bad rate limiting
}
page++
}
}
// productsMatch custom method for comparing two products. IDs will be completely different in both.
func productsMatch(product Product, productTwo Product) error {
// Images
if len(product.Images) != len(productTwo.Images) {
return errors.New("unequal number of images between both products")
}
foundSrcs := 0
for _, imageFromOne := range product.Images {
for _, imageFromTwo := range productTwo.Images {
if imageFromOne.Src == imageFromTwo.Src {
foundSrcs++
if imageFromOne.Position != imageFromTwo.Position {
return errors.New(fmt.Sprintf("image positions do not match for %s", imageFromOne.Src))
}
break
}
}
}
if foundSrcs != len(product.Images) {
return errors.New(fmt.Sprintf("did not find all matches for all images (Found %d of %d)", foundSrcs, len(product.Images)))
}
// Variants
if len(product.Variants) != len(productTwo.Variants) {
return errors.New("unequal number of variants")
}
foundVariants := 0
for _, variantOne := range product.Variants {
for _, variantTwo := range product.Variants {
if variantOne.VariantID == variantTwo.VariantID {
foundVariants++
if !cmp.Equal(variantOne, variantTwo, cmpopts.IgnoreFields(Variants{}, "ID")) {
return errors.New(fmt.Sprintf("variants do not match for %s and %s", variantOne.ID, variantTwo.ID))
}
break
}
}
}
if foundVariants != len(product.Variants) {
return errors.New(fmt.Sprintf("did not find all variants (Found %d of %d)", foundVariants, len(product.Variants)))
}
// Options
if len(product.Options) != len(productTwo.Options) {
return errors.New("unequal number of options")
}
foundOptions := 0
for _, optionOne := range product.Options {
for _, optionTwo := range product.Options {
if optionOne.Name == optionTwo.Name {
foundOptions++
if !cmp.Equal(optionOne, optionTwo, cmpopts.IgnoreFields(Options{}, "ID")) {
return errors.New(fmt.Sprintf("did not find matching options between %s and %s", optionOne.Name, optionTwo.Name))
}
break
}
}
}
if foundOptions != len(product.Options) {
return errors.New(fmt.Sprintf("did not find all options (Found %d of %d)", foundOptions, len(product.Options)))
}
// All other fields that should match
if !cmp.Equal(product, productTwo, cmpopts.IgnoreFields(Product{}, "ID", "Active", "Images", "Variants", "Options", "Created", "Updated", "CompanyObjectID","CompanyID")) {
return errors.New("products do not match")
}
return nil
}
// getProductsFromAPI calls the get products endpoint
func getProductsFromAPI(page int, apiKey string) ([]Product, error) {
resp, err := http.GetRequest("/products", page, apiKey)
if err != nil {
return []Product{}, err
}
var response []Product
err = json.Unmarshal(resp, &response)
if err != nil {
return []Product{}, err
}
return response, nil
}
func getProductFromAPIUsingCode(code string, apiKey string) (Product, bool, error) {
resp, err := http.GetRequest(fmt.Sprintf("/products?productCode=%s", code), 0, apiKey)
if err != nil {
return Product{}, false, err
}
// Capture 404
fmt.Printf("getProductFromAPI :: err :: %+v\n", err)
var response []Product
err = json.Unmarshal(resp, &response)
if err != nil {
return Product{}, false, err
}
if len(response) == 0 {
return Product{}, false, nil
}
return response[0], true, nil
}
func createProductOnAPI(product Product, apiKey string) (string, error) {
fmt.Printf("product :: %+v", product)
jsonPayload, err := json.Marshal(product)
if err != nil {
return "", err
}
resp, err := http.PostRequest("/products", apiKey, jsonPayload)
if err != nil {
return "", err
}
var response Product
err = json.Unmarshal(resp, &response)
if err != nil {
return "", err
}
return product.ID, nil
}
func updateProductOnAPI(apiKey string, product Product) error {
jsonPayload, err := json.Marshal(product)
if err != nil {
return err
}
resp, err := http.PutRequest(fmt.Sprintf("/products/%s", product.ID), apiKey, jsonPayload)
if err != nil {
return err
}
var response Product
err = json.Unmarshal(resp, &response)
if err != nil {
return err
}
return nil
}
// Buyer account calling a seller endpoint (This should be fixed)
func GetIDOfVariantBySellerVariantCode(apiKey string, sellerVariantCode string) (string, error) {
page := 0
found := false
for !found {
resp, err := http.GetRequest("/products", page, apiKey)
if err != nil {
return "", err
}
var products []Product
err = json.Unmarshal(resp, &products)
if err != nil {
return "", err
}
if len(products) == 0 {
found = true
}
for _, product := range products {
for _, variant := range product.Variants {
if variant.Code == sellerVariantCode {
return variant.ID, nil
}
}
}
page++
time.Sleep(time.Second * 1)
}
return "", errors.New(fmt.Sprintf("ID of variant not found using variantID/Code (%s)", sellerVariantCode))
} |
package main
import (
"fmt"
)
var tabla [10]int
func main() {
tabla[1] = 1
tabla[5] = 15
var tabla2 = [10]int{1, 2, 3, 4, 6, 7, 8, 9, 10, 11}
tabla3 := [10]int{1, 2, 3, 4, 6, 7, 8, 9, 10, 11}
fmt.Println(tabla2)
fmt.Println(tabla3)
//manera de recorrer una tabla 'len' es para saber el tamaño del array
for i := 0; i < len(tabla3); i++ {
fmt.Println(tabla3[i])
}
slices()
variante4()
}
func matriz() {
var matriz [5][7]int // se inicializa en 1
matriz[4][3] = 4 //la array o matriz se inica en 0
fmt.Println(matriz[4][3])
}
func slices() {
matriz := []int{2, 3, 4, 4}
fmt.Println(matriz)
}
func variante2() {
elementos := [5]int{1, 2, 3, 4, 5}
porcion := elementos[2:4] // me permite particionar una array mas grande en una mas pequeña delimitada
fmt.Println(porcion)
}
func variante3() {
elementos := make([]int, 5, 20)
fmt.Printf("largo es %d, capacidad es %d", len(elementos), cap(elementos))
}
func variante4() {
nums := make([]int, 0, 0)
for i := 0; i < 10; i++ {
nums = append(nums, i)
fmt.Println(len(nums), cap(nums))
}
}
|
package join
import (
"crypto/rand"
"encoding/binary"
"encoding/json"
"fmt"
"time"
"github.com/jmoiron/sqlx"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/brocaar/loraserver/api/gw"
"github.com/brocaar/loraserver/internal/common"
"github.com/brocaar/loraserver/internal/node"
"github.com/brocaar/loraserver/internal/storage"
"github.com/brocaar/lorawan"
)
var tasks = []func(*joinContext) error{
setToken,
getJoinAcceptTXInfo,
logJoinAcceptFrame,
sendJoinAcceptResponse,
}
type joinContext struct {
Token uint16
DeviceSession storage.DeviceSession
TXInfo gw.TXInfo
PHYPayload lorawan.PHYPayload
}
// Handle handles a downlink join-response.
func Handle(ds storage.DeviceSession, phy lorawan.PHYPayload) error {
ctx := joinContext{
DeviceSession: ds,
PHYPayload: phy,
}
for _, t := range tasks {
if err := t(&ctx); err != nil {
return err
}
}
return nil
}
func setToken(ctx *joinContext) error {
b := make([]byte, 2)
_, err := rand.Read(b)
if err != nil {
return errors.Wrap(err, "read random error")
}
ctx.Token = binary.BigEndian.Uint16(b)
return nil
}
func getJoinAcceptTXInfo(ctx *joinContext) error {
if len(ctx.DeviceSession.LastRXInfoSet) == 0 {
return errors.New("empty LastRXInfoSet")
}
rxInfo := ctx.DeviceSession.LastRXInfoSet[0]
ctx.TXInfo = gw.TXInfo{
MAC: rxInfo.MAC,
CodeRate: rxInfo.CodeRate,
Power: common.Band.DefaultTXPower,
}
var timestamp uint32
if ctx.DeviceSession.RXWindow == storage.RX1 {
timestamp = rxInfo.Timestamp + uint32(common.Band.JoinAcceptDelay1/time.Microsecond)
// get uplink dr
uplinkDR, err := common.Band.GetDataRate(rxInfo.DataRate)
if err != nil {
return errors.Wrap(err, "get data-rate error")
}
// get RX1 DR
rx1DR, err := common.Band.GetRX1DataRate(uplinkDR, 0)
if err != nil {
return errors.Wrap(err, "get rx1 data-rate error")
}
ctx.TXInfo.DataRate = common.Band.DataRates[rx1DR]
// get RX1 frequency
ctx.TXInfo.Frequency, err = common.Band.GetRX1Frequency(rxInfo.Frequency)
if err != nil {
return errors.Wrap(err, "get rx1 frequency error")
}
} else if ctx.DeviceSession.RXWindow == storage.RX2 {
timestamp = rxInfo.Timestamp + uint32(common.Band.JoinAcceptDelay2/time.Microsecond)
ctx.TXInfo.DataRate = common.Band.DataRates[common.Band.RX2DataRate]
ctx.TXInfo.Frequency = common.Band.RX2Frequency
} else {
return fmt.Errorf("unknown RXWindow defined %d", ctx.DeviceSession.RXWindow)
}
ctx.TXInfo.Timestamp = ×tamp
return nil
}
func logJoinAcceptFrame(ctx *joinContext) error {
logDownlink(common.DB, ctx.DeviceSession.DevEUI, ctx.PHYPayload, ctx.TXInfo)
return nil
}
func sendJoinAcceptResponse(ctx *joinContext) error {
err := common.Gateway.SendTXPacket(gw.TXPacket{
Token: ctx.Token,
TXInfo: ctx.TXInfo,
PHYPayload: ctx.PHYPayload,
})
if err != nil {
return errors.Wrap(err, "send tx-packet error")
}
return nil
}
func logDownlink(db sqlx.Execer, devEUI lorawan.EUI64, phy lorawan.PHYPayload, txInfo gw.TXInfo) {
if !common.LogNodeFrames {
return
}
phyB, err := phy.MarshalBinary()
if err != nil {
log.Errorf("marshal phypayload to binary error: %s", err)
return
}
txB, err := json.Marshal(txInfo)
if err != nil {
log.Errorf("marshal tx-info to json error: %s", err)
}
fl := node.FrameLog{
DevEUI: devEUI,
TXInfo: &txB,
PHYPayload: phyB,
}
err = node.CreateFrameLog(db, &fl)
if err != nil {
log.Errorf("create frame-log error: %s", err)
}
}
|
package data
import (
"github.com/kiali/kiali/kubernetes"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func CreateExternalServiceEntry() kubernetes.IstioObject {
return (&kubernetes.GenericIstioObject{
ObjectMeta: meta_v1.ObjectMeta{
Name: "external-svc-wikipedia",
Namespace: "wikipedia",
},
Spec: map[string]interface{}{
"hosts": []interface{}{
"wikipedia.org",
},
"location": "MESH_EXTERNAL",
"ports": []interface{}{
map[string]interface{}{
"number": uint64(80),
"name": "http-example",
"protocol": "HTTP",
},
},
"resolution": "DNS",
},
}).DeepCopyIstioObject()
}
func CreateEmptyMeshExternalServiceEntry(name, namespace string, hosts []string) kubernetes.IstioObject {
hostsI := make([]interface{}, len(hosts))
for i, h := range hosts {
hostsI[i] = interface{}(h)
}
return (&kubernetes.GenericIstioObject{
ObjectMeta: meta_v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: map[string]interface{}{
"hosts": hostsI,
"location": "MESH_EXTERNAL",
"resolution": "DNS",
},
}).DeepCopyIstioObject()
}
func AddPortDefinitionToServiceEntry(portDef map[string]interface{}, se kubernetes.IstioObject) kubernetes.IstioObject {
if portsSpec, found := se.GetSpec()["ports"]; found {
if portsSlice, ok := portsSpec.([]interface{}); ok {
portsSlice = append(portsSlice, portDef)
se.GetSpec()["ports"] = portsSlice
}
} else {
se.GetSpec()["ports"] = []interface{}{portDef}
}
return se
}
func CreateEmptyPortDefinition(port uint32, portName, protocolName string) map[string]interface{} {
return map[string]interface{}{
"number": port,
"name": portName,
"protocol": protocolName,
}
}
|
package main
import (
"flag"
"log"
artiefact "github.com/hirokazu/artiefact-backend"
)
func main() {
flag.Parse()
configFile := flag.Args()
if len(configFile) == 1 {
artiefact.Serve(configFile[0])
} else {
log.Fatal("configuration file is not specified")
}
}
|
// date: 2019-03-15
package cache
import (
"github.com/Jarvens/Exchange-Agent/itf"
"log"
)
func New(typ string) itf.Cache {
var c itf.Cache
if typ == "inMemory" {
c = newInMemoryCache()
}
if c == nil {
panic("未知缓存类型:" + typ)
}
log.Println(typ, "缓存对象实例化失败")
return c
}
|
package gormzap
import (
"go.uber.org/zap"
)
// New create logger object for *gorm.DB from *zap.Logger
func New(zap *zap.Logger) *Logger {
return &Logger{
zap: zap,
}
}
// Logger is an alternative implementation of *gorm.Logger
type Logger struct {
zap *zap.Logger
}
// Print passes arguments to Println
func (l *Logger) Print(values ...interface{}) {
l.Println(values)
}
// Println format & print log
func (l *Logger) Println(values []interface{}) {
l.zap.Info("gorm", createLog(values).toZapFields()...)
}
|
package deoxysii
// sliceForAppend extends the capacity of `in` by `n` octets, returning the
// potentially new slice and the appended portion.
//
// This routine is cribbed from the Go standard library and `x/crypto`.
func sliceForAppend(in []byte, n int) (head, tail []byte) {
if total := len(in) + n; cap(in) >= total {
head = in[:total]
} else {
head = make([]byte, total)
copy(head, in)
}
tail = head[len(in):]
return
}
|
package testsupport
import (
"strings"
"github.com/bytesparadise/libasciidoc/pkg/configuration"
"github.com/bytesparadise/libasciidoc/pkg/parser"
"github.com/bytesparadise/libasciidoc/pkg/types"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
)
// ParseDocument parses the actual value into a Document
func ParseDocument(actual string, options ...interface{}) (*types.Document, error) {
allSettings := []configuration.Setting{
configuration.WithFilename("test.adoc"),
}
opts := []parser.Option{}
for _, o := range options {
switch o := o.(type) {
case configuration.Setting:
allSettings = append(allSettings, o)
case parser.Option:
opts = append(opts, o)
default:
return nil, errors.Errorf("unexpected type of option: '%T'", o)
}
}
c := configuration.NewConfiguration(allSettings...)
p, err := parser.Preprocess(strings.NewReader(actual), c, opts...)
if err != nil {
return nil, err
}
if log.IsLevelEnabled(log.DebugLevel) {
log.Debugf("preparsed document:\n%s", p)
}
return parser.ParseDocument(strings.NewReader(p), c, opts...)
}
|
package fakes
import (
"time"
"github.com/go-redis/redis"
)
type FakeRClient struct {
getCalledWithKey string
setCalledWithKey string
setCalledWithValue interface{}
users map[string]interface{}
err error
}
func NewFakeRClient() *FakeRClient {
return &FakeRClient{
users: make(map[string]interface{}),
err: nil,
}
}
func (frc *FakeRClient) SetError(err error) {
frc.err = err
}
func (frc *FakeRClient) Set(key string, value interface{}, expiration time.Duration) *redis.StatusCmd {
frc.setCalledWithKey = key
frc.setCalledWithValue = value
frc.users[key] = value
return redis.NewStatusResult("", frc.err)
}
func (frc *FakeRClient) Get(key string) *redis.StringCmd {
frc.getCalledWithKey = key
s, ok := frc.users[key].(string)
if !ok {
return redis.NewStringResult("", frc.err)
}
return redis.NewStringResult(s, frc.err)
}
func (frc *FakeRClient) GetCalledWith() string {
key := frc.getCalledWithKey
frc.getCalledWithKey = ""
return key
}
func (frc *FakeRClient) SetCalledWith() (string, interface{}) {
key := frc.setCalledWithKey
value := frc.setCalledWithValue
frc.setCalledWithKey = ""
frc.setCalledWithValue = nil
return key, value
}
func (frc *FakeRClient) Keys(pattern string) *redis.StringSliceCmd {
keys := make([]string, 0)
for k := range frc.users {
keys = append(keys, k)
}
return redis.NewStringSliceResult(keys, frc.err)
}
|
package msgpack
import (
"reflect"
"testing"
impl "gopkg.in/vmihailenco/msgpack.v2"
)
func TestMarshaler(t *testing.T) {
type Example struct {
Field1 string `json:"field_1"`
Field2 int `json:"field_2"`
}
example := &Example{
Field1: "field1",
Field2: 128,
}
codec := Codec()
marshal := codec.Marshaler()
data, err := marshal(example)
if err != nil {
t.Fatal(err)
}
expData, _ := impl.Marshal(example)
if string(data) != string(expData) {
t.Fatalf("expected marshaled data to be %q; got %q", expData, string(data))
}
}
func TestUnmarshaler(t *testing.T) {
type Example struct {
Field1 string `json:"field_1"`
Field2 int `json:"field_2"`
}
expValue := &Example{
Field1: "field1",
Field2: 128,
}
example := &Example{}
codec := Codec()
unmarshal := codec.Unmarshaler()
data, _ := impl.Marshal(expValue)
err := unmarshal(data, example)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(example, expValue) {
t.Fatalf("expected unmarshaled object to be:\n%#+v\n\ngot:\n%#+v", expValue, example)
}
}
|
package anchorpath_test
import (
"testing"
"github.com/stretchr/testify/assert"
anchorpath "github.com/wetware/ww/pkg/util/anchor/path"
)
func TestParts(t *testing.T) {
t.Parallel()
testCases := []struct {
desc, path string
expected []string
}{{
desc: "empty",
path: "",
expected: []string{},
}, {
desc: "root",
path: "/",
expected: []string{},
}, {
desc: "multipart",
path: "/foo/bar/baz/qux",
expected: []string{"foo", "bar", "baz", "qux"},
}, {
desc: "complex",
path: "////foo/bar//baz/qux///////",
expected: []string{"foo", "bar", "baz", "qux"},
}}
for _, tC := range testCases {
t.Run(tC.desc, func(t *testing.T) {
assert.Equal(t, tC.expected, anchorpath.Parts(tC.path))
})
}
}
func TestJoin(t *testing.T) {
t.Parallel()
testCases := []struct {
desc, expected string
parts []string
}{{
desc: "empty",
parts: []string{},
expected: "/",
}, {
desc: "root",
parts: []string{"/"},
expected: "/",
}, {
desc: "complex",
parts: []string{"foo/", "//bar//"},
expected: "/foo/bar",
}}
for _, tC := range testCases {
t.Run(tC.desc, func(t *testing.T) {
assert.Equal(t, tC.expected, anchorpath.Join(tC.parts))
})
}
}
func TestRoot(t *testing.T) {
t.Parallel()
testCases := []struct {
desc string
parts []string
expected bool
}{{
desc: "empty",
parts: []string{""},
expected: true,
}, {
desc: "clean",
parts: []string{"/"},
expected: true,
}, {
desc: "dirty",
parts: []string{"//////"},
expected: true,
}, {
desc: "nonroot",
parts: []string{"/foo"},
expected: false,
}}
for _, tC := range testCases {
t.Run(tC.desc, func(t *testing.T) {
})
}
}
|
package enums
import (
"bytes"
"encoding"
"errors"
github_com_eden_framework_enumeration "github.com/eden-framework/enumeration"
)
var InvalidRobotType = errors.New("invalid RobotType")
func init() {
github_com_eden_framework_enumeration.RegisterEnums("RobotType", map[string]string{
"SINGLECOPTER": "SINGLECOPTER",
"DUALCOPTER": "DUALCOPTER",
"HEX6H": "HEX6H",
"VTAIL4": "VTAIL4",
"HELI_90_DEG": "HELI_90_DEG",
"HELI_120_CCPM": "HELI_120_CCPM",
"AIRPLANE": "AIRPLANE",
"OCTOFLATX": "OCTOFLATX",
"OCTOFLATP": "OCTOFLATP",
"OCTOX8": "OCTOX8",
"HEX6X": "HEX6X",
"Y4": "Y4",
"FLYING_WING": "FLYING_WING",
"HEX6": "HEX6",
"Y6": "Y6",
"GIMBAL": "GIMBAL",
"BI": "BI",
"QUADX": "QUADX",
"QUADP": "QUADP",
"TRI": "TRI",
})
}
func ParseRobotTypeFromString(s string) (RobotType, error) {
switch s {
case "":
return ROBOT_TYPE_UNKNOWN, nil
case "SINGLECOPTER":
return ROBOT_TYPE__SINGLECOPTER, nil
case "DUALCOPTER":
return ROBOT_TYPE__DUALCOPTER, nil
case "HEX6H":
return ROBOT_TYPE__HEX6H, nil
case "VTAIL4":
return ROBOT_TYPE__VTAIL4, nil
case "HELI_90_DEG":
return ROBOT_TYPE__HELI_90_DEG, nil
case "HELI_120_CCPM":
return ROBOT_TYPE__HELI_120_CCPM, nil
case "AIRPLANE":
return ROBOT_TYPE__AIRPLANE, nil
case "OCTOFLATX":
return ROBOT_TYPE__OCTOFLATX, nil
case "OCTOFLATP":
return ROBOT_TYPE__OCTOFLATP, nil
case "OCTOX8":
return ROBOT_TYPE__OCTOX8, nil
case "HEX6X":
return ROBOT_TYPE__HEX6X, nil
case "Y4":
return ROBOT_TYPE__Y4, nil
case "FLYING_WING":
return ROBOT_TYPE__FLYING_WING, nil
case "HEX6":
return ROBOT_TYPE__HEX6, nil
case "Y6":
return ROBOT_TYPE__Y6, nil
case "GIMBAL":
return ROBOT_TYPE__GIMBAL, nil
case "BI":
return ROBOT_TYPE__BI, nil
case "QUADX":
return ROBOT_TYPE__QUADX, nil
case "QUADP":
return ROBOT_TYPE__QUADP, nil
case "TRI":
return ROBOT_TYPE__TRI, nil
}
return ROBOT_TYPE_UNKNOWN, InvalidRobotType
}
func ParseRobotTypeFromLabelString(s string) (RobotType, error) {
switch s {
case "":
return ROBOT_TYPE_UNKNOWN, nil
case "SINGLECOPTER":
return ROBOT_TYPE__SINGLECOPTER, nil
case "DUALCOPTER":
return ROBOT_TYPE__DUALCOPTER, nil
case "HEX6H":
return ROBOT_TYPE__HEX6H, nil
case "VTAIL4":
return ROBOT_TYPE__VTAIL4, nil
case "HELI_90_DEG":
return ROBOT_TYPE__HELI_90_DEG, nil
case "HELI_120_CCPM":
return ROBOT_TYPE__HELI_120_CCPM, nil
case "AIRPLANE":
return ROBOT_TYPE__AIRPLANE, nil
case "OCTOFLATX":
return ROBOT_TYPE__OCTOFLATX, nil
case "OCTOFLATP":
return ROBOT_TYPE__OCTOFLATP, nil
case "OCTOX8":
return ROBOT_TYPE__OCTOX8, nil
case "HEX6X":
return ROBOT_TYPE__HEX6X, nil
case "Y4":
return ROBOT_TYPE__Y4, nil
case "FLYING_WING":
return ROBOT_TYPE__FLYING_WING, nil
case "HEX6":
return ROBOT_TYPE__HEX6, nil
case "Y6":
return ROBOT_TYPE__Y6, nil
case "GIMBAL":
return ROBOT_TYPE__GIMBAL, nil
case "BI":
return ROBOT_TYPE__BI, nil
case "QUADX":
return ROBOT_TYPE__QUADX, nil
case "QUADP":
return ROBOT_TYPE__QUADP, nil
case "TRI":
return ROBOT_TYPE__TRI, nil
}
return ROBOT_TYPE_UNKNOWN, InvalidRobotType
}
func (RobotType) EnumType() string {
return "RobotType"
}
func (RobotType) Enums() map[int][]string {
return map[int][]string{
int(ROBOT_TYPE__SINGLECOPTER): {"SINGLECOPTER", "SINGLECOPTER"},
int(ROBOT_TYPE__DUALCOPTER): {"DUALCOPTER", "DUALCOPTER"},
int(ROBOT_TYPE__HEX6H): {"HEX6H", "HEX6H"},
int(ROBOT_TYPE__VTAIL4): {"VTAIL4", "VTAIL4"},
int(ROBOT_TYPE__HELI_90_DEG): {"HELI_90_DEG", "HELI_90_DEG"},
int(ROBOT_TYPE__HELI_120_CCPM): {"HELI_120_CCPM", "HELI_120_CCPM"},
int(ROBOT_TYPE__AIRPLANE): {"AIRPLANE", "AIRPLANE"},
int(ROBOT_TYPE__OCTOFLATX): {"OCTOFLATX", "OCTOFLATX"},
int(ROBOT_TYPE__OCTOFLATP): {"OCTOFLATP", "OCTOFLATP"},
int(ROBOT_TYPE__OCTOX8): {"OCTOX8", "OCTOX8"},
int(ROBOT_TYPE__HEX6X): {"HEX6X", "HEX6X"},
int(ROBOT_TYPE__Y4): {"Y4", "Y4"},
int(ROBOT_TYPE__FLYING_WING): {"FLYING_WING", "FLYING_WING"},
int(ROBOT_TYPE__HEX6): {"HEX6", "HEX6"},
int(ROBOT_TYPE__Y6): {"Y6", "Y6"},
int(ROBOT_TYPE__GIMBAL): {"GIMBAL", "GIMBAL"},
int(ROBOT_TYPE__BI): {"BI", "BI"},
int(ROBOT_TYPE__QUADX): {"QUADX", "QUADX"},
int(ROBOT_TYPE__QUADP): {"QUADP", "QUADP"},
int(ROBOT_TYPE__TRI): {"TRI", "TRI"},
}
}
func (v RobotType) String() string {
switch v {
case ROBOT_TYPE_UNKNOWN:
return ""
case ROBOT_TYPE__SINGLECOPTER:
return "SINGLECOPTER"
case ROBOT_TYPE__DUALCOPTER:
return "DUALCOPTER"
case ROBOT_TYPE__HEX6H:
return "HEX6H"
case ROBOT_TYPE__VTAIL4:
return "VTAIL4"
case ROBOT_TYPE__HELI_90_DEG:
return "HELI_90_DEG"
case ROBOT_TYPE__HELI_120_CCPM:
return "HELI_120_CCPM"
case ROBOT_TYPE__AIRPLANE:
return "AIRPLANE"
case ROBOT_TYPE__OCTOFLATX:
return "OCTOFLATX"
case ROBOT_TYPE__OCTOFLATP:
return "OCTOFLATP"
case ROBOT_TYPE__OCTOX8:
return "OCTOX8"
case ROBOT_TYPE__HEX6X:
return "HEX6X"
case ROBOT_TYPE__Y4:
return "Y4"
case ROBOT_TYPE__FLYING_WING:
return "FLYING_WING"
case ROBOT_TYPE__HEX6:
return "HEX6"
case ROBOT_TYPE__Y6:
return "Y6"
case ROBOT_TYPE__GIMBAL:
return "GIMBAL"
case ROBOT_TYPE__BI:
return "BI"
case ROBOT_TYPE__QUADX:
return "QUADX"
case ROBOT_TYPE__QUADP:
return "QUADP"
case ROBOT_TYPE__TRI:
return "TRI"
}
return "UNKNOWN"
}
func (v RobotType) Label() string {
switch v {
case ROBOT_TYPE_UNKNOWN:
return ""
case ROBOT_TYPE__SINGLECOPTER:
return "SINGLECOPTER"
case ROBOT_TYPE__DUALCOPTER:
return "DUALCOPTER"
case ROBOT_TYPE__HEX6H:
return "HEX6H"
case ROBOT_TYPE__VTAIL4:
return "VTAIL4"
case ROBOT_TYPE__HELI_90_DEG:
return "HELI_90_DEG"
case ROBOT_TYPE__HELI_120_CCPM:
return "HELI_120_CCPM"
case ROBOT_TYPE__AIRPLANE:
return "AIRPLANE"
case ROBOT_TYPE__OCTOFLATX:
return "OCTOFLATX"
case ROBOT_TYPE__OCTOFLATP:
return "OCTOFLATP"
case ROBOT_TYPE__OCTOX8:
return "OCTOX8"
case ROBOT_TYPE__HEX6X:
return "HEX6X"
case ROBOT_TYPE__Y4:
return "Y4"
case ROBOT_TYPE__FLYING_WING:
return "FLYING_WING"
case ROBOT_TYPE__HEX6:
return "HEX6"
case ROBOT_TYPE__Y6:
return "Y6"
case ROBOT_TYPE__GIMBAL:
return "GIMBAL"
case ROBOT_TYPE__BI:
return "BI"
case ROBOT_TYPE__QUADX:
return "QUADX"
case ROBOT_TYPE__QUADP:
return "QUADP"
case ROBOT_TYPE__TRI:
return "TRI"
}
return "UNKNOWN"
}
var _ interface {
encoding.TextMarshaler
encoding.TextUnmarshaler
} = (*RobotType)(nil)
func (v RobotType) MarshalText() ([]byte, error) {
str := v.String()
if str == "UNKNOWN" {
return nil, InvalidRobotType
}
return []byte(str), nil
}
func (v *RobotType) UnmarshalText(data []byte) (err error) {
*v, err = ParseRobotTypeFromString(string(bytes.ToUpper(data)))
return
}
|
package sensorsanalytics
import "errors"
var ErrIllegalDataException = errors.New("在发送的数据格式有误时,SDK会抛出此异常,用户应当捕获并处理。")
var ErrNetworkException = errors.New("在因为网络或者不可预知的问题导致数据无法发送时,SDK会抛出此异常,用户应当捕获并处理。")
var ErrDebugException = errors.New("Debug模式专用的异常")
|
package cli
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
)
func newClient(ctx context.Context) (client.Client, error) {
getter, err := wireClientGetter(ctx)
if err != nil {
return nil, err
}
cfg, err := getter.ToRESTConfig()
if err != nil {
return nil, err
}
ctrlclient, err := client.New(cfg, client.Options{Scheme: v1alpha1.NewScheme()})
if err != nil {
return nil, err
}
return ctrlclient, err
}
|
package configuration
var ChatWithMeToken string
var ChatWithMeExtensionUrl string
var MatterMostHost string
var MatterMostAdminUsername string
var MatterMostAdminPassword string
var ChatWithMeTriggerWords []string
var ChatWithMeTriggerWordsEphemeral []string
|
//还需要添加端口
package main1
import (
"flag"
"fmt"
"github.com/PuerkitoBio/goquery"
"github.com/exc/excelize"
"io/ioutil"
"regexp"
"strings"
"os"
)
var dir string
var output string
func main (){
flag.StringVar(&dir,"p",".","报告路径")
flag.StringVar(&output,"o","output.xlsx","结果保存文件")
flag.Parse()
wirte(output)
readdirs(dir)
}
func readdirs(dir string){
dirs,direrr := ioutil.ReadDir(dir)
if direrr != nil{
fmt.Println(direrr,"路径错误")
os.Exit(1)
}
for _,file := range dirs {
if strings.Contains(file.Name(), "main.html") {
fmt.Println(strings.Replace(file.Name(),"_main.html","",-1))
readfile(file.Name())
}
}
}
func readfile(filename string) {
//打开html文件
file, openerr := os.Open(dir + "/" + filename)
if openerr != nil {
fmt.Println("打开文件失败" + filename)
os.Exit(0)
}
doc, docerr := goquery.NewDocumentFromReader(file)
if docerr != nil {
fmt.Println(docerr)
}
ipfile := strings.Replace(filename, "_main.html", "", -1)
//使用Each获取标签内容,这里应该用Map,因为Map返回一个[]string
doc.Find("#section_2_content").Find("table").Each(func(i int, selection *goquery.Selection) {
var text []string
text = append(text,selection.Text())
splitstr(text,ipfile)
})
}
func splitstr(s []string,IP string){
var vuls []string
var dengji,miaoshu,jianyi,url int
for _,arrvul := range s {
a := strings.Replace(arrvul, "\t", "", -1)
str := strings.Split(a, "\n")
for i, vul := range str {
if vul == "危险级别" {
dengji = i+4
}
if vul == "详细描述" {
miaoshu = i+4
}
if vul == "修补建议" {
jianyi = i+4
}
if vul == "参考网址" {
url = i+4
}
}
reg := regexp.MustCompile(`【\d+】`)
biaoti := reg.ReplaceAllString(str[9], "")
vuls = append(vuls,IP)
vuls = append(vuls,biaoti)
vuls = append(vuls,str[dengji])
vuls = append(vuls,str[miaoshu])
vuls = append(vuls,str[jianyi])
vuls = append(vuls,str[url])
}
wirteok(vuls)
}
func wirte(file string){
_, err := os.Stat(file) //os.Stat获取文件信息
if err != nil {
//如果文件存在,就打开
if os.IsExist(err) {
}
//如果文件不存在,就创建
f := excelize.NewFile()
index := f.NewSheet("漏洞列表")
f.SetActiveSheet(index)
if err := f.SaveAs(file); err != nil {
fmt.Println(err)
}
}
}
func wirteok(results []string){
//打开文件
f,err := excelize.OpenFile(output)
if err != nil{
fmt.Println(err)
}
f.SetSheetRow("漏洞列表", "A2", &results)
//插入空白行
err1 := f.InsertRow("漏洞列表",2)
if err1 != nil{
fmt.Println(err1)
}
f.SetCellValue("漏洞列表", "A1", "IP")
f.SetCellValue("漏洞列表", "B1", "漏洞名称")
f.SetCellValue("漏洞列表", "C1", "漏洞等级")
f.SetCellValue("漏洞列表", "D1", "漏洞描述")
f.SetCellValue("漏洞列表", "E1", "修复建议")
f.SetCellValue("漏洞列表", "F1", "参考网址")
f.SetCellValue("漏洞列表", "G1", "端口")
if err1 := f.Save(); err1 != nil {
fmt.Println(err1)
}
}
/*
type vulbox struct {
vulname string //漏洞名称 9
vulid string //漏洞编号 19
vultype string //漏洞类型 29
vulgrade string //危害等级 39
vulos string //影响平台 49
cvss float32 //cvss分值 59
bugtraq string //bugtraq编号 69
cve string //cve编号 79
cncve string //cncve编号 87
chinavul string //国家漏洞库 97
cnvd string //cnvd编号 107
sketch string //简述 115
describe string //详细描述 125
proposal string //修补建议 135
vulurl string //参考网址 145
}
*/
|
/**
* Copyright (c) 2018-present, MultiVAC Foundation.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package heartbeat
import (
"github.com/multivactech/MultiVAC/logger"
"github.com/multivactech/MultiVAC/logger/btclog"
)
var logBackend *btclog.Backend
func init() {
logBackend = logger.BackendLogger()
}
|
package auth
import (
"context"
fbauth "firebase.google.com/go/auth"
"log"
"server/domain/service"
)
type AuthService interface {
Verify(string) (uid string, err error)
}
type authService struct {
Client fbauth.Client
}
func NewAuthService(client fbauth.Client) service.AuthService {
authService := authService{client}
return &authService
}
func (a *authService) Verify(token string) (string, error) {
verified_token, err := a.Client.VerifyIDToken(context.Background(), token)
if err != nil {
log.Printf("error verifying ID token: %v\n", err)
return "", err
}
return verified_token.UID, nil
}
|
package main
import (
"testing"
)
func BenchmarkFibV1(b *testing.B) {
// run the Fib function b.N times
k := make(map[int]int)
k[0] = 0
k[1] = 1
for n := 0; n < b.N; n++ {
Fib(5, k)
}
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package transforms
import (
"time"
"datablocks"
"planners"
"processors"
"sessions"
)
type OrderByTransform struct {
ctx *TransformContext
plan *planners.OrderByPlan
progressValues sessions.ProgressValues
processors.BaseProcessor
}
func NewOrderByTransform(ctx *TransformContext, plan *planners.OrderByPlan) processors.IProcessor {
return &OrderByTransform{
ctx: ctx,
plan: plan,
BaseProcessor: processors.NewBaseProcessor("transform_orderby"),
}
}
func (t *OrderByTransform) Execute() {
var block *datablocks.DataBlock
plan := t.plan
out := t.Out()
defer out.Close()
// Get all base fields by the expression.
fields, err := planners.BuildVariableValues(plan)
if err != nil {
out.Send(err)
return
}
onNext := func(x interface{}) {
switch y := x.(type) {
case *datablocks.DataBlock:
if block == nil {
block = y
} else {
if err := block.Append(y); err != nil {
out.Send(err)
}
}
case error:
out.Send(y)
}
}
onDone := func() {
if block != nil {
start := time.Now()
if err := block.OrderByPlan(fields, t.plan); err != nil {
out.Send(err)
} else {
cost := time.Since(start)
t.progressValues.Cost.Add(cost)
t.progressValues.ReadBytes.Add(int64(block.TotalBytes()))
t.progressValues.ReadRows.Add(int64(block.NumRows()))
t.progressValues.TotalRowsToRead.Add(int64(block.NumRows()))
out.Send(block)
}
}
}
t.Subscribe(onNext, onDone)
}
func (t *OrderByTransform) Stats() sessions.ProgressValues {
return t.progressValues
}
|
package models
import "errors"
var ErrNotFound = errors.New("Requested item is not found!")
var ErrMarshalling = errors.New("Request body could not be marshalled")
var ErrInsert = errors.New("Insert operation failed")
var ErrQuery = errors.New("Error during querying")
|
package blob
// STARTIMPORT, OMIT
import (
"context" // OMIT
"io" // OMIT
"log" // OMIT
//import other packages
"cloud.google.com/go/storage"
)
// STOPIMPORT, OMIT
// STARTGCS, OMIT
type gcs struct {
client *storage.Client
bucket string
object string
}
func (g gcs) Writer() io.WriteCloser {
// gcs sdk luckily return instance of io.Writer and io.Closer
return g.client.Bucket(g.bucket).Object(g.object).NewWriter(context.Background())
}
// STOPGCS, OMIT
func NewGCS(bucket, file string) *gcs {
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatalf("Failed to create client: %v", err)
}
return &gcs{
client: client,
bucket: bucket,
object: file,
}
}
|
package main
func main() {
}
func (n *node) find() *node {
if n.parent != n {
n.parent = n.parent.find()
}
return n.parent
}
func (n *node) union(m *node) *node {
nRoot := n.find()
mRoot := m.find()
root := nRoot
if nRoot.rank < mRoot.rank {
nRoot.parent = mRoot
root = mRoot
} else if mRoot.rank < nRoot.rank {
mRoot.parent = nRoot
} else {
mRoot.parent = nRoot
nRoot.rank++
}
return root
}
type node struct {
label string
rank int
parent *node
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println(superPow(2, []int{1, 1}))
}
const MOD = 1337
// 程式的想法
// 例如:a^123 = ((a^1)^10 * (a^2))^10 * a^3
func superPow(a int, b []int) int {
a %= MOD
if a == 0 {
return 0
}
result := powMod(a, b[0])
for i := 1; i < len(b); i++ {
// 每次都會把前面的結果"十次方",再乘上新數字
result = powMod(result, 10) * powMod(a, b[i]) % MOD
}
return result
}
func powMod(a int, b int) int {
if b == 0 {
return 1
}
result := a
for i := 1; i < b; i++ {
result = result * a % MOD
}
return result
}
|
package utils
import (
"fmt"
"os"
toml "github.com/pelletier/go-toml"
)
var (
// for app config
APP_Address string
// for db config
DB_Driver, DB_Connect string
// for auth config
Auth_Username, Auth_Password, Auth_Secret string
)
func InitConf(confPath string) {
//Check config file
if _, err := os.Stat(confPath); !os.IsNotExist(err) {
if config, err := toml.LoadFile(confPath); err == nil {
APP_Address = config.Get("app.address").(string)
DB_Driver = config.Get("db.driver").(string)
DB_Connect = config.Get("db.connect").(string)
Auth_Username = config.Get("auth.username").(string)
Auth_Password = config.Get("auth.password").(string)
Auth_Secret = config.Get("auth.secret").(string)
}
}
if driver := os.Getenv("DB_DRIVER"); driver != "" {
DB_Driver = driver
}
if connect := os.Getenv("DB_CONNECT"); connect != "" {
DB_Connect = connect
}
if username := os.Getenv("AUTH_USERNAME"); username != "" {
Auth_Username = username
}
if password := os.Getenv("AUTH_PASSWORD"); password != "" {
Auth_Password = password
}
if secret := os.Getenv("Auth_SECRET"); secret != "" {
Auth_Secret = secret
}
fmt.Println("config: ", map[string]interface{}{
"address": APP_Address,
"db_driver": DB_Driver,
"db_connect": DB_Connect,
"auth_username": Auth_Username,
"auth_password": Auth_Password,
"auth_secret": Auth_Secret,
})
}
|
package utils
import (
"fmt"
"net"
)
// FindIPAddress - this will find the address associated with an adapter
func FindIPAddress(addrName string) (string, string, error) {
var address string
list, err := net.Interfaces()
if err != nil {
return "", "", err
}
for _, iface := range list {
addrs, err := iface.Addrs()
if err != nil {
return "", "", err
}
for _, a := range addrs {
if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
address = ipnet.IP.String()
// If we're not searching for a specific adapter return the first one
if addrName == "" {
return iface.Name, address, nil
} else
// If this is the correct adapter return the details
if iface.Name == addrName {
return iface.Name, address, nil
}
}
}
}
}
return "", "", fmt.Errorf("Unknown interface [%s]", addrName)
}
// FindAllIPAddresses - Will return all IP addresses for a server
func FindAllIPAddresses() ([]net.IP, error) {
var IPS []net.IP
ifaces, err := net.Interfaces()
if err != nil {
return nil, err
}
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
return nil, err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip != nil {
IPS = append(IPS, net.IP(ip))
}
// process IP address
}
}
return IPS, nil
}
//ConvertIP -
func ConvertIP(ipAddress string) ([]byte, error) {
// net.ParseIP has returned IPv6 sized allocations o_O
fixIP := net.ParseIP(ipAddress)
if fixIP == nil {
return nil, fmt.Errorf("Couldn't parse the IP address: %s", ipAddress)
}
if len(fixIP) > 4 {
return fixIP[len(fixIP)-4:], nil
}
return fixIP, nil
}
|
// Copyright 2019 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crash
import (
"context"
"io/ioutil"
"strings"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/crash"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: SuspendFailure,
Desc: "Verify suspend failures are logged as expected",
Contacts: []string{
"dbasehore@google.com",
"mutexlox@google.com",
"cros-telemetry@google.com",
},
Attr: []string{"group:mainline", "informational"},
})
}
func SuspendFailure(ctx context.Context, s *testing.State) {
const suspendFailureName = "suspend-failure"
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
// Set up the crash test, ignoring non-suspend-failure crashes.
if err := crash.SetUpCrashTest(ctx, crash.WithMockConsent(), crash.FilterCrashes("suspend_failure")); err != nil {
s.Fatal("SetUpCrashTest failed: ", err)
}
defer crash.TearDownCrashTest(cleanupCtx)
// Restart anomaly detector to clear its cache of recently seen service
// failures and ensure this one is logged.
if err := crash.RestartAnomalyDetectorWithSendAll(ctx, true); err != nil {
s.Fatal("Failed to restart anomaly detector: ", err)
}
// Restart anomaly detector to clear its --testonly-send-all flag at the end of execution.
defer crash.RestartAnomalyDetector(cleanupCtx)
s.Log("Inducing artificial suspend permission failure")
perm, err := testexec.CommandContext(ctx, "stat", "--format=%a", "/sys/power/state").Output(testexec.DumpLogOnError)
if err != nil {
s.Fatal("Failed to run 'stat -c='%a' /sys/power/state': ", err)
}
// Remove all permissions on /sys/power/state to induce the failure on write
err = testexec.CommandContext(ctx, "chmod", "000", "/sys/power/state").Run()
if err != nil {
s.Fatal("Failed to set permissions on /sys/power/state: ", err)
}
// Error is expected here. Set a 60 second wakeup just in case suspend
// somehow works here.
err = testexec.CommandContext(ctx, "powerd_dbus_suspend", "--timeout=30", "--wakeup_timeout=60").Run()
if err == nil {
s.Error("powerd_dbus_suspend didn't fail when we expect it to")
}
// Restart powerd since it's still trying to suspend (which we don't want to
// happen right now).
err = testexec.CommandContext(ctx, "restart", "powerd").Run()
if err != nil {
s.Error("Failed to restart powerd: ", err)
// If we fail to restart powerd, we'll shut down in ~100 seconds, so
// just reboot.
testexec.CommandContext(ctx, "reboot").Run()
}
err = testexec.CommandContext(ctx, "chmod", strings.TrimRight(string(perm), "\r\n"), "/sys/power/state").Run()
if err != nil {
s.Errorf("Failed to reset permissions (%v) on /sys/power/state: %v", perm, err)
// We're messed up enough that rebooting the machine to reset the file
// permissions on /sys/power/state is best here.
testexec.CommandContext(ctx, "reboot").Run()
}
const (
logFileRegex = `suspend_failure\.\d{8}\.\d{6}\.\d+\.0\.log`
metaFileRegex = `suspend_failure\.\d{8}\.\d{6}\.\d+\.0\.meta`
)
expectedRegexes := []string{logFileRegex, metaFileRegex}
crashDirs, err := crash.GetDaemonStoreCrashDirs(ctx)
if err != nil {
s.Fatal("Couldn't get daemon store dirs: ", err)
}
// We might not be logged in, so also allow system crash dir.
crashDirs = append(crashDirs, crash.SystemCrashDir)
files, err := crash.WaitForCrashFiles(ctx, crashDirs, expectedRegexes)
if err != nil {
s.Fatal("Couldn't find expected files: ", err)
}
defer func() {
if err := crash.RemoveAllFiles(cleanupCtx, files); err != nil {
s.Log("Couldn't clean up files: ", err)
}
}()
for _, meta := range files[metaFileRegex] {
contents, err := ioutil.ReadFile(meta)
if err != nil {
s.Errorf("Couldn't read log file %s: %v", meta, err)
}
if !strings.Contains(string(contents), "upload_var_weight=50\n") {
s.Error("Meta file didn't contain weight=50. Saving file")
if err := crash.MoveFilesToOut(ctx, s.OutDir(), meta); err != nil {
s.Error("Could not move meta file to out dir: ", err)
}
}
}
}
|
/**
固定长度的有序数组,动态增删改
*/
package array
type FixedOrderArray struct {
elements []int
length int
capacity int
}
func NewFixedOrderArray(cnt int) *FixedOrderArray {
return &FixedOrderArray{
elements: make([]int, cnt),
capacity: cnt,
}
}
func (foa *FixedOrderArray) add(x int) {
}
func (foa *FixedOrderArray) del() {
}
func (foa *FixedOrderArray) update() {
}
|
package common
import (
"shared/utility/coordinate"
"testing"
)
func TestArea_MergePiece(t *testing.T) {
area := NewArea()
area.AppendPiece(1, Piece{StartY: 1, EndY: 3})
area.AppendPiece(1, Piece{StartY: 4, EndY: 8})
area.AppendPiece(1, Piece{StartY: -1, EndY: 11})
area.AppendPiece(1, Piece{StartY: 2, EndY: 123})
for i, pieces := range area.Pieces {
t.Logf("x :%+v", i)
t.Logf("pieces :%+v", pieces)
}
}
func TestArea_Cut(t *testing.T) {
area := NewArea()
area.AppendPiece(1, Piece{StartY: -1, EndY: 123})
cut := area.CutRectangle(*coordinate.NewPosition(1, 1), 2, 2)
for i, pieces := range cut.Pieces {
t.Logf("x :%+v", i)
t.Logf("pieces :%+v", pieces)
}
}
func TestArea_Contains(t *testing.T) {
area := NewArea()
area.AppendPiece(1, Piece{StartY: -1, EndY: 123})
contains := area.Contains(*coordinate.NewPosition(1, 124))
t.Logf("contains :%+v", contains)
}
|
package abi
import (
"errors"
"fmt"
"github.com/hyperledger/burrow/execution/evm/abi"
)
type ABI struct {
spec *abi.Spec
}
func LoadFile(fpath string) (*ABI, error) {
spec, err := abi.LoadPath(fpath)
if err != nil {
return nil, err
}
return newABI(spec), nil
}
func New(buf []byte) (*ABI, error) {
spec, err := abi.ReadSpec(buf)
if err != nil {
return nil, err
}
return newABI(spec), nil
}
func newABI(spec *abi.Spec) *ABI {
return &ABI{
spec: spec,
}
}
func (a *ABI) Encode(methodName string, args map[string]interface{}) ([]byte, error) {
if methodName == "" {
if a.spec.Constructor != nil {
return a.encodeMethod(a.spec.Constructor, args)
}
return nil, errors.New("missing method mane")
}
method, ok := a.spec.Functions[methodName]
if !ok {
return nil, fmt.Errorf("method %s not found", methodName)
}
return a.encodeMethod(method, args)
}
func (a *ABI) encodeMethod(method *abi.FunctionSpec, args map[string]interface{}) ([]byte, error) {
var inputs []interface{}
for _, input := range method.Inputs {
v, ok := args[input.Name]
if !ok {
return nil, fmt.Errorf("arg name %s not found", input.Name)
}
// v, err := encode(input.Type, v)
// if err != nil {
// return nil, err
// }
// fmt.Printf("encode %s => %v\n", input.Name, v)
inputs = append(inputs, v)
}
out, _, err := a.spec.Pack(method.Name, inputs...)
return out, err
}
//func decodeHex(str string) []byte {
// var buf []byte
// n, err := fmt.Sscanf(str, "0x%x", &buf)
// if err != nil {
// panic(err)
// }
// if n != 1 {
// panic("bad address")
// }
// return buf
//}
// func encodeInt(x interface{}, size int) interface{} {
// if size <= 64 {
// panic(fmt.Sprintf("unsupported int size %d", size))
// }
// return new(big.Int).SetBytes(decodeHex(x.(string)))
// }
// func encodeAddress(x interface{}) interface{} {
// var addr common.Address
// buf := decodeHex(x.(string))
// copy(addr[:], buf)
// return addr
// }
// func encode(t abi.Type, x interface{}) (interface{}, error) {
// switch t.T {
// case abi.IntTy, abi.UintTy:
// return encodeInt(x, t.Size), nil
// case abi.BoolTy, abi.StringTy, abi.SliceTy, abi.ArrayTy:
// return x, nil
// case abi.AddressTy:
// return encodeAddress(x), nil
// default:
// panic("Invalid type")
// }
// }
|
// Copyright 2020 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package wire
// RemittanceData is remittance data
type RemittanceData struct {
// Name
Name string `json:"name,omitempty"`
// DateBirthPlace
DateBirthPlace string `json:"dateBirthPlace,omitempty"`
// AddressType
AddressType string `json:"addressType,omitempty"`
// Department
Department string `json:"department,omitempty"`
// SubDepartment
SubDepartment string `json:"subDepartment,omitempty"`
// StreetName
StreetName string `json:"streetName,omitempty"`
// BuildingNumber
BuildingNumber string `json:"buildingNumber,omitempty"`
// PostCode
PostCode string `json:"postCode,omitempty"`
// TownName
TownName string `json:"townName,omitempty"`
// CountrySubDivisionState
CountrySubDivisionState string `json:"countrySubDivisionState,omitempty"`
// Country
Country string `json:"country,omitempty"`
// AddressLineOne
AddressLineOne string `json:"addressLineOne,omitempty"`
// AddressLineTwo
AddressLineTwo string `json:"addressLineTwo,omitempty"`
// AddressLineThree
AddressLineThree string `json:"addressLineThree,omitempty"`
// AddressLineFour
AddressLineFour string `json:"addressLineFour,omitempty"`
// AddressLineFive
AddressLineFive string `json:"addressLineFive,omitempty"`
// AddressLineSix
AddressLineSix string `json:"addressLineSix,omitempty"`
// AddressLineSeven
AddressLineSeven string `json:"addressLineSeven,omitempty"`
// CountryOfResidence
CountryOfResidence string `json:"countryOfResidence,omitempty"`
}
|
// Copyright 2020 Insolar Network Ltd.
// All rights reserved.
// This material is licensed under the Insolar License version 1.0,
// available at https://github.com/insolar/block-explorer/blob/master/LICENSE.md.
package extractor
import (
"context"
"fmt"
"io"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/insolar/insolar/insolar"
"github.com/insolar/insolar/insolar/pulse"
"github.com/insolar/insolar/ledger/heavy/exporter"
"google.golang.org/grpc/metadata"
"github.com/insolar/block-explorer/etl/interfaces"
"github.com/insolar/block-explorer/etl/types"
"github.com/insolar/block-explorer/instrumentation/belogger"
)
const PlatformAPIVersion = "2"
type PlatformExtractor struct {
hasStarted bool
startStopMutex *sync.Mutex
workers int32
maxWorkers int32
pulseExtractor interfaces.PulseExtractor
client exporter.RecordExporterClient
request *exporter.GetRecords
mainPulseDataChan chan *types.PlatformPulseData
cancel context.CancelFunc
batchSize uint32
continuousPulseRetrievingHalfPulseSeconds uint32
shutdownBE func()
}
func NewPlatformExtractor(
batchSize uint32,
continuousPulseRetrievingHalfPulseSeconds uint32,
maxWorkers int32,
queueLen uint32,
pulseExtractor interfaces.PulseExtractor,
exporterClient exporter.RecordExporterClient,
shutdownBE func(),
) *PlatformExtractor {
request := &exporter.GetRecords{Count: batchSize}
return &PlatformExtractor{
startStopMutex: &sync.Mutex{},
client: exporterClient,
request: request,
mainPulseDataChan: make(chan *types.PlatformPulseData, queueLen),
pulseExtractor: pulseExtractor,
batchSize: batchSize,
continuousPulseRetrievingHalfPulseSeconds: continuousPulseRetrievingHalfPulseSeconds,
maxWorkers: maxWorkers,
shutdownBE: shutdownBE,
}
}
func (e *PlatformExtractor) GetJetDrops(ctx context.Context) <-chan *types.PlatformPulseData {
return e.mainPulseDataChan
}
func (e *PlatformExtractor) LoadJetDrops(ctx context.Context, fromPulseNumber int64, toPulseNumber int64) error {
go e.retrievePulses(ctx, fromPulseNumber, toPulseNumber)
return nil
}
func (e *PlatformExtractor) Stop(ctx context.Context) error {
e.startStopMutex.Lock()
defer e.startStopMutex.Unlock()
if e.hasStarted {
e.cancel()
belogger.FromContext(ctx).Info("Stopping platform extractor...")
e.hasStarted = false
}
return nil
}
func (e *PlatformExtractor) Start(ctx context.Context) error {
e.startStopMutex.Lock()
defer e.startStopMutex.Unlock()
if !e.hasStarted {
belogger.FromContext(ctx).Info("Starting platform extractor main thread...")
e.hasStarted = true
ctx, e.cancel = context.WithCancel(ctx)
go e.retrievePulses(ctx, 0, 0)
}
return nil
}
func closeStream(ctx context.Context, stream exporter.RecordExporter_ExportClient) {
if stream != nil {
streamError := stream.CloseSend()
if streamError != nil {
belogger.FromContext(ctx).Warn("Error closing stream: ", streamError)
}
}
}
// retrievePulses - initiates full pulse retrieving between not including from and until
// zero from is latest pulse, zero until - never stop
func (e *PlatformExtractor) retrievePulses(ctx context.Context, from, until int64) {
RetrievePulsesCount.Inc()
defer RetrievePulsesCount.Dec()
mainThread := until <= 0
pu := &exporter.FullPulse{PulseNumber: insolar.PulseNumber(from)}
var err error
logger := belogger.FromContext(ctx)
if mainThread {
logger = logger.WithField("main", mainThread)
} else {
logger = logger.WithField("from", from).WithField("until", until)
}
ctx = appendPlatformVersionToCtx(ctx)
halfPulse := time.Duration(e.continuousPulseRetrievingHalfPulseSeconds) * time.Second
var nextNotEmptyPulseNumber *insolar.PulseNumber
for {
log := logger.WithField("pulse_number", pu.PulseNumber)
log.Debug("retrievePulses(): Start")
select {
case <-ctx.Done(): // we need context with cancel
log.Debug("retrievePulses(): terminating")
return
default:
}
// check free workers if not main thread
if !mainThread {
for !e.takeWorker() {
sleepMs := rand.Intn(1500) + 500
time.Sleep(time.Millisecond * time.Duration(sleepMs))
}
}
ExtractProcessCount.Set(float64(atomic.LoadInt32(&e.workers)))
before := *pu
pu, err = e.pulseExtractor.GetNextFinalizedPulse(ctx, int64(before.PulseNumber))
if err != nil { // network error ?
pu = &before
if isVersionError(err) {
log.Errorf("retrievePulses(): version error occurred, debug: %s", debugVersionError(ctx))
e.shutdownBE()
break
}
if isRateLimitError(err) {
log.Error("retrievePulses(): on rpc call: ", err.Error())
Errors.With(ErrorTypeRateLimitExceeded).Inc()
if !mainThread {
e.freeWorker()
}
time.Sleep(halfPulse)
continue
}
if strings.Contains(err.Error(), pulse.ErrNotFound.Error()) { // seems this pulse already last
log.Debugf("retrievePulses(): sleep on not found pulse, before=%d err=%s", before.PulseNumber, err)
Errors.With(ErrorTypeNotFound).Inc()
if !mainThread {
e.freeWorker()
}
time.Sleep(halfPulse * 3)
continue
}
log.Errorf("retrievePulses(): before=%d err=%s", before.PulseNumber, err)
if !mainThread {
e.freeWorker()
}
time.Sleep(time.Second)
continue
}
if pu.PulseNumber == before.PulseNumber { // no new pulse happens
time.Sleep(halfPulse)
if !mainThread {
e.freeWorker()
}
continue
}
log.Debugf("retrievePulses(): Done, jets %d, new pulse: %d", len(pu.Jets), pu.PulseNumber)
ReceivedPulses.Inc()
LastPulseFetched.Set(float64(pu.PulseNumber))
if !mainThread && e.maxWorkers <= 3 {
// This hack made for 1 platform only
// If you set maxWorkers in config <=3, then we start receive data in serial 1 by 1.
// 3 threads made for we can get 3 potential skipped spaces between pulses at the same time.
// If some day heavy node will be able to process many parallel getRecords requests - delete this hack
if nextNotEmptyPulseNumber == nil || pu.PulseNumber >= *nextNotEmptyPulseNumber {
sif := "nil"
if nextNotEmptyPulseNumber != nil {
sif = nextNotEmptyPulseNumber.String()
}
log.Debugf("SIF is %s, get records", sif)
nextNotEmptyPulseNumber = e.retrieveRecords(ctx, *pu, mainThread, false)
} else {
log.Debugf("SIF is %d, skipping records", *nextNotEmptyPulseNumber)
e.retrieveRecords(ctx, *pu, mainThread, true)
}
} else {
go e.retrieveRecords(ctx, *pu, mainThread, false)
}
if mainThread { // we are going on the edge of history
time.Sleep(halfPulse * 2)
} else if pu.PulseNumber >= insolar.PulseNumber(until) { // we are at the end
return
}
}
}
// retrieveRecords - retrieves all records for specified pulse and puts this to channel
func (e *PlatformExtractor) retrieveRecords(ctx context.Context, pu exporter.FullPulse, mainThread, skipRequest bool) *insolar.PulseNumber {
startedAt := time.Now()
RetrieveRecordsCount.Inc()
cancelCtx, cancelFunc := context.WithCancel(ctx)
defer func() {
if !mainThread {
e.freeWorker()
}
RetrieveRecordsCount.Dec()
cancelFunc()
}()
// fast return when we don't need to get records
if skipRequest {
e.mainPulseDataChan <- &types.PlatformPulseData{Pulse: &pu}
return nil
}
logger := belogger.FromContext(cancelCtx)
log := logger.WithField("pulse_number", pu.PulseNumber).WithField("main", mainThread)
log.Debug("retrieveRecords(): Start")
pulseData := &types.PlatformPulseData{Pulse: &pu} // save pulse info
halfPulse := time.Duration(e.continuousPulseRetrievingHalfPulseSeconds) * time.Second
for { // each portion
select {
case <-cancelCtx.Done():
return nil
default:
}
stream, err := e.client.Export(cancelCtx, &exporter.GetRecords{PulseNumber: pu.PulseNumber,
RecordNumber: uint32(len(pulseData.Records)),
Count: e.batchSize},
)
if err != nil {
log.Error("retrieveRecords() on rpc call: ", err.Error())
if isVersionError(err) {
e.shutdownBE()
return nil
}
if isRateLimitError(err) {
Errors.With(ErrorTypeRateLimitExceeded).Inc()
time.Sleep(halfPulse)
continue
}
Errors.With(ErrorTypeOnRecordExport).Inc()
time.Sleep(time.Second)
continue
}
for { // per record in request
select {
case <-cancelCtx.Done():
closeStream(cancelCtx, stream)
return nil
default:
}
resp, err := stream.Recv()
if err == io.EOF { // stream ended, we have our portion
break
}
if err != nil && isRateLimitError(err) {
log.Error("retrieveRecords() on rpc call: ", err.Error())
Errors.With(ErrorTypeRateLimitExceeded).Inc()
closeStream(cancelCtx, stream)
time.Sleep(halfPulse)
// we should break inner for loop and reopen a stream because the clientStream finished and can't retry
break
}
if resp == nil { // error, assume the data is broken
if strings.Contains(err.Error(), exporter.ErrNotFinalPulseData.Error()) ||
strings.Contains(err.Error(), pulse.ErrNotFound.Error()) {
Errors.With(ErrorTypeNotFound).Inc()
log.Debugf("retrieveRecords(): GBR Rerequest cur pulse=%d err=%s", pu.PulseNumber, err)
time.Sleep(halfPulse * 2)
closeStream(cancelCtx, stream)
break
}
log.Errorf("retrieveRecords(): empty response: err=%s", err)
closeStream(cancelCtx, stream)
return nil
}
if resp.ShouldIterateFrom != nil || resp.Record.ID.Pulse() != pu.PulseNumber { // next pulse packet
closeStream(cancelCtx, stream)
e.mainPulseDataChan <- pulseData
FromExtractorDataQueue.Set(float64(len(e.mainPulseDataChan)))
log.Debugf("retrieveRecords(): Done in %s, recs: %d", time.Since(startedAt).String(), len(pulseData.Records))
iterateFrom := resp.ShouldIterateFrom
if iterateFrom == nil {
itf := resp.Record.ID.Pulse()
iterateFrom = &itf
}
return iterateFrom // we have whole pulse
}
pulseData.Records = append(pulseData.Records, resp)
ReceivedRecords.Inc()
}
}
}
func (e *PlatformExtractor) takeWorker() bool {
max := e.maxWorkers
if atomic.AddInt32(&e.workers, 1) > max {
atomic.AddInt32(&e.workers, -1)
return false
}
return true
}
func (e *PlatformExtractor) freeWorker() {
atomic.AddInt32(&e.workers, -1)
}
func debugVersionError(ctx context.Context) string {
mtd, ok := metadata.FromOutgoingContext(ctx)
if !ok {
return "metadata not found"
}
return fmt.Sprintf("Client Type: %s, Client version: %s", mtd.Get(exporter.KeyClientType), mtd.Get(exporter.KeyClientVersionHeavy))
}
func appendPlatformVersionToCtx(ctx context.Context) context.Context {
ctx = metadata.AppendToOutgoingContext(ctx, exporter.KeyClientType, exporter.ValidateHeavyVersion.String())
return metadata.AppendToOutgoingContext(ctx, exporter.KeyClientVersionHeavy, PlatformAPIVersion)
}
func isVersionError(err error) bool {
return strings.Contains(err.Error(), exporter.ErrDeprecatedClientVersion.Error()) ||
strings.Contains(err.Error(), "unknown heavy-version") ||
strings.Contains(err.Error(), "unknown type client") ||
strings.Contains(err.Error(), "incorrect format of the heavy-version")
}
func isRateLimitError(err error) bool {
return strings.Contains(err.Error(), exporter.RateLimitExceededMsg)
}
|
package main
import (
"fmt"
"sort"
"sync"
)
type Number interface {
int | float64
}
func swap[T Number](a []T) {
for i := 0; i < len(a)-1; i = i + 2 {
a[i], a[i+1] = a[i+1], a[i]
}
}
func fact(v int) int {
var r int = 1
for i := v; i > 0; i-- {
r = r * i
}
return r
}
func sortMap(v map[int]string) {
keys := make([]int, len(v))
i := 0
for k := range v {
keys[i] = k
i++
}
sort.Slice(keys, func(i, j int) bool {
return keys[i] < keys[j]
})
fmt.Println("map sorted")
for i = 0; i < len(keys); i++ {
fmt.Println("->", keys[i], ":", v[keys[i]])
}
}
// fibonacci
// n = 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ...
// xn = 0 1 1 2 3 5 8 13 21 34 55 89 144 233 377 ...
// xn = xn−1 + xn−2
// fibonacci recursive
func fib(n int) int {
if n == 0 {
return 0
} else if n == 1 {
return 1
}
return fib(n-1) + fib(n-2)
}
// fibonacci iteractive
func fib2(n int) int {
if n == 0 {
return 0
} else if n == 1 {
return 1
}
results := make([]int, n+1)
results[0] = 0
results[1] = 1
for i := 2; i <= n; i++ {
results[i] = results[i-1] + results[i-2]
}
return results[n]
}
// fibonacci iteractive
func fib3(n int) int {
if n == 0 {
return 0
} else if n == 1 {
return 1
}
a := 0
b := 1
var c int
for i := 2; i <= n; i++ {
c = b + a
a = b
b = c
}
return c
}
//
type MyStruct struct {
F int
}
//
type Set struct {
val map[int]struct{}
m sync.Mutex
}
func NewSet() *Set {
return &Set{
val: make(map[int]struct{}),
}
}
func (s *Set) Add(v int) {
defer s.m.Unlock()
s.m.Lock()
s.val[v] = struct{}{}
}
func (s *Set) Dump() []int {
defer s.m.Unlock()
s.m.Lock()
r := []int{}
for k := range s.val {
r = append(r, k)
}
return r
}
func main() {
// swap
s := []int{1, 2, 3, 4, 5}
fmt.Println("origin:", s)
swap(s)
fmt.Println("swap:", s)
// fact
v := fact(5)
// sort map key
fmt.Println("v:", v)
m := map[int]string{
1: "Ricardo",
2: "Miguel",
4: "Leal",
3: "Ferraz",
}
sortMap(m)
// test if key exists in map
if val, ok := m[1]; ok {
fmt.Println("Exists", val)
}
// type discoverer
var x interface{}
x = MyStruct{F: 123}
if xx, ok := x.(MyStruct); ok {
fmt.Println(xx.F)
}
// Set
set := NewSet()
set.Add(1)
set.Add(2)
set.Add(2)
fmt.Println("set:", set.Dump())
// fib
for i := 0; i < 11; i++ {
fmt.Printf("fib (%d)= %d\n", i, fib(i))
fmt.Printf("fib2(%d)= %d\n", i, fib2(i))
fmt.Printf("fib3(%d)= %d\n", i, fib3(i))
}
}
|
// Copyright (C) 2016-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
package delete_all_service_instances_test
import (
"fmt"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/pivotal-cf/on-demand-service-broker/config"
"github.com/pivotal-cf/on-demand-service-broker/deleter"
"github.com/pivotal-cf/on-demand-service-broker/integration_tests/helpers"
"github.com/pivotal-cf/on-demand-service-broker/mockhttp"
"github.com/pivotal-cf/on-demand-service-broker/mockhttp/mockcfapi"
"github.com/pivotal-cf/on-demand-service-broker/mockuaa"
"gopkg.in/yaml.v2"
)
var _ = Describe("delete all service instances tool", func() {
const (
serviceID = "service-id"
planID = "plan-id"
cfAccessToken = "cf-oauth-token"
cfUaaClientID = "cf-uaa-client-id"
cfUaaClientSecret = "cf-uaa-client-secret"
instanceGUID = "some-instance-guid"
boundAppGUID = "some-bound-app-guid"
serviceBindingGUID = "some-binding-guid"
serviceKeyGUID = "some-key-guid"
)
var (
cfAPI *mockhttp.Server
cfUAA *mockuaa.ClientCredentialsServer
deleterSession *gexec.Session
configuration deleter.Config
configFilePath string
)
BeforeEach(func() {
cfAPI = mockcfapi.New()
cfUAA = mockuaa.NewClientCredentialsServer(cfUaaClientID, cfUaaClientSecret, cfAccessToken)
configuration = deleter.Config{
ServiceCatalog: deleter.ServiceCatalog{
ID: serviceID,
},
DisableSSLCertVerification: true,
CF: config.CF{
URL: cfAPI.URL,
UAA: config.UAAConfig{
URL: cfUAA.URL,
Authentication: config.UAACredentials{
ClientCredentials: config.ClientCredentials{
ID: cfUaaClientID,
Secret: cfUaaClientSecret,
},
},
},
},
PollingInitialOffset: 0,
PollingInterval: 0,
}
configYAML, err := yaml.Marshal(configuration)
Expect(err).ToNot(HaveOccurred())
configFilePath = helpers.WriteConfig(configYAML, tempDir)
})
AfterEach(func() {
cfAPI.VerifyMocks()
cfAPI.Close()
cfUAA.Close()
})
Context("when the service is not registered with CF", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().
RespondsOKWith(`{
"total_results": 0,
"total_pages": 0,
"prev_url": null,
"next_url": null,
"resources": []
}`),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("does nothing", func() {
Expect(deleterSession.ExitCode()).To(Equal(0))
Expect(deleterSession).To(gbytes.Say("No service instances found."))
})
})
Context("when there are no service instances", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithNoServiceInstances(),
)
configuration.PollingInitialOffset = 1
configuration.PollingInterval = 1
configYAML, err := yaml.Marshal(configuration)
Expect(err).ToNot(HaveOccurred())
configFilePath = helpers.WriteConfig(configYAML, tempDir)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("does nothing", func() {
Expect(deleterSession.ExitCode()).To(Equal(0))
Expect(deleterSession).To(gbytes.Say("No service instances found."))
})
It("logs that the polling interval values are as configured", func() {
Expect(deleterSession).To(gbytes.Say("Deleter Configuration: polling_intial_offset: 1, polling_interval: 1."))
})
})
Context("when there is one service instance", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithServiceInstances(instanceGUID),
mockcfapi.ListServiceBindings(instanceGUID).RespondsWithServiceBinding(serviceBindingGUID, instanceGUID, boundAppGUID),
mockcfapi.DeleteServiceBinding(boundAppGUID, serviceBindingGUID).RespondsNoContent(),
mockcfapi.ListServiceKeys(instanceGUID).RespondsWithServiceKey(serviceKeyGUID, instanceGUID),
mockcfapi.DeleteServiceKey(serviceKeyGUID).RespondsNoContent(),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithSucceeded(),
mockcfapi.DeleteServiceInstance(instanceGUID).RespondsAcceptedWith(""),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithInProgress(mockcfapi.Delete),
mockcfapi.GetServiceInstance(instanceGUID).RespondsNotFoundWith(""),
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithNoServiceInstances(),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("deletes the service instance", func() {
Expect(deleterSession.ExitCode()).To(BeZero())
Expect(deleterSession).To(gbytes.Say("Deleting binding some-binding-guid of service instance some-instance-guid to app some-bound-app-guid"))
Expect(deleterSession).To(gbytes.Say(fmt.Sprintf("Deleting service key %s of service instance %s", serviceKeyGUID, instanceGUID)))
Expect(deleterSession).To(gbytes.Say(fmt.Sprintf("Deleting service instance %s", instanceGUID)))
Expect(deleterSession).To(gbytes.Say(fmt.Sprintf("Waiting for service instance %s to be deleted", instanceGUID)))
Expect(deleterSession).To(gbytes.Say("FINISHED DELETES"))
})
})
Context("when the configuration file cannot be read", func() {
BeforeEach(func() {
configFilePath := "no/file/here"
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
})
It("fails with error", func() {
Eventually(deleterSession).Should(gexec.Exit(1))
Eventually(deleterSession).Should(gbytes.Say("Error reading config file"))
})
})
Context("when the configuration file is invalid yaml", func() {
BeforeEach(func() {
configFilePath := helpers.WriteConfig([]byte("not:valid:yaml"), tempDir)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
})
It("fails with error", func() {
Eventually(deleterSession).Should(gexec.Exit(1))
Eventually(deleterSession).Should(gbytes.Say("Invalid config file"))
})
})
Context("when CF API cannot be reached", func() {
BeforeEach(func() {
cfAPI.Close()
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("fails to connect", func() {
Expect(deleterSession.ExitCode()).To(Equal(1))
Expect(deleterSession).To(gbytes.Say("connection refused"))
})
})
Context("when a CF API GET request fails", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsInternalServerErrorWith("no services for you"),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("fails and does nothing", func() {
Expect(deleterSession.ExitCode()).To(Equal(1))
Expect(deleterSession).To(gbytes.Say("Unexpected reponse status 500"))
Expect(deleterSession).To(gbytes.Say("no services for you"))
})
})
Context("when CF API DELETE request responds with 404 Not Found", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithServiceInstances(instanceGUID),
mockcfapi.ListServiceBindings(instanceGUID).RespondsWithServiceBinding(serviceBindingGUID, instanceGUID, boundAppGUID),
mockcfapi.DeleteServiceBinding(boundAppGUID, serviceBindingGUID).RespondsNotFoundWith(`{
"code": 111111,
"description": "The app could not be found: some-bound-app-guid",
"error_code": "CF-AppNotFound"
}`),
mockcfapi.ListServiceKeys(instanceGUID).RespondsOKWith(`{
"total_results": 0,
"total_pages": 0,
"prev_url": null,
"next_url": null,
"resources": []
}`),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithSucceeded(),
mockcfapi.DeleteServiceInstance(instanceGUID).RespondsAcceptedWith(""),
mockcfapi.GetServiceInstance(instanceGUID).RespondsNotFoundWith(""),
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithNoServiceInstances(),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("exits with success", func() {
Expect(deleterSession.ExitCode()).To(BeZero())
})
})
Context("when CF API GET service bindings responds with 404 Not Found", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithServiceInstances(instanceGUID),
mockcfapi.ListServiceBindings(instanceGUID).RespondsNotFoundWith(`{
"code": 111111,
"description": "The app could not be found: some-bound-app-guid",
"error_code": "CF-AppNotFound"
}`),
mockcfapi.ListServiceKeys(instanceGUID).RespondsOKWith(`{
"total_results": 0,
"total_pages": 0,
"prev_url": null,
"next_url": null,
"resources": []
}`),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithSucceeded(),
mockcfapi.DeleteServiceInstance(instanceGUID).RespondsAcceptedWith(""),
mockcfapi.GetServiceInstance(instanceGUID).RespondsNotFoundWith(""),
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithNoServiceInstances(),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("exits with success", func() {
Expect(deleterSession.ExitCode()).To(BeZero())
})
})
Context("when CF API GET service keys responds with 404 Not Found", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithServiceInstances(instanceGUID),
mockcfapi.ListServiceBindings(instanceGUID).RespondsWithServiceBinding(serviceBindingGUID, instanceGUID, boundAppGUID),
mockcfapi.DeleteServiceBinding(boundAppGUID, serviceBindingGUID).RespondsNoContent(),
mockcfapi.ListServiceKeys(instanceGUID).RespondsNotFoundWith(`{
"code": 111111,
"description": "The app could not be found: some-bound-app-guid",
"error_code": "CF-AppNotFound"
}`),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithSucceeded(),
mockcfapi.DeleteServiceInstance(instanceGUID).RespondsAcceptedWith(""),
mockcfapi.GetServiceInstance(instanceGUID).RespondsNotFoundWith(""),
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithNoServiceInstances(),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("exits with success", func() {
Expect(deleterSession.ExitCode()).To(BeZero())
})
})
Context("when a CF API DELETE response is unexpected", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithServiceInstances(instanceGUID),
mockcfapi.ListServiceBindings(instanceGUID).RespondsWithServiceBinding(serviceBindingGUID, instanceGUID, boundAppGUID),
mockcfapi.DeleteServiceBinding(boundAppGUID, serviceBindingGUID).RespondsForbiddenWith(`{
"code": 10003,
"description": "You are not authorized to perform the requested action",
"error_code": "CF-NotAuthorized"
}`),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 10*time.Second).Should(gexec.Exit())
})
It("fails to authorize", func() {
Eventually(deleterSession).Should(gexec.Exit(1))
Expect(deleterSession).To(gbytes.Say("Unexpected reponse status 403"))
Expect(deleterSession).To(gbytes.Say("You are not authorized to perform the requested action"))
})
})
Context("when CF API GET instance response is delete failed", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithServiceInstances(instanceGUID),
mockcfapi.ListServiceBindings(instanceGUID).RespondsWithServiceBinding(serviceBindingGUID, instanceGUID, boundAppGUID),
mockcfapi.DeleteServiceBinding(boundAppGUID, serviceBindingGUID).RespondsNoContent(),
mockcfapi.ListServiceKeys(instanceGUID).RespondsWithServiceKey(serviceKeyGUID, instanceGUID),
mockcfapi.DeleteServiceKey(serviceKeyGUID).RespondsNoContent(),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithSucceeded(),
mockcfapi.DeleteServiceInstance(instanceGUID).RespondsAcceptedWith(""),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithFailed(mockcfapi.Delete),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 1*time.Second).Should(gexec.Exit())
})
It("reports the failure", func() {
Eventually(deleterSession).Should(gexec.Exit(1))
Expect(deleterSession).To(gbytes.Say("Result: failed to delete service instance %s. Delete operation failed.", instanceGUID))
})
})
Context("when CF API GET instance response is invalid json", func() {
BeforeEach(func() {
cfAPI.VerifyAndMock(
mockcfapi.ListServiceOfferings().RespondsWithServiceOffering(serviceID, "some-cc-service-offering-guid"),
mockcfapi.ListServicePlans("some-cc-service-offering-guid").RespondsWithServicePlan(planID, "some-cc-plan-guid"),
mockcfapi.ListServiceInstances("some-cc-plan-guid").RespondsWithServiceInstances(instanceGUID),
mockcfapi.ListServiceBindings(instanceGUID).RespondsWithServiceBinding(serviceBindingGUID, instanceGUID, boundAppGUID),
mockcfapi.DeleteServiceBinding(boundAppGUID, serviceBindingGUID).RespondsNoContent(),
mockcfapi.ListServiceKeys(instanceGUID).RespondsWithServiceKey(serviceKeyGUID, instanceGUID),
mockcfapi.DeleteServiceKey(serviceKeyGUID).RespondsNoContent(),
mockcfapi.GetServiceInstance(instanceGUID).RespondsWithSucceeded(),
mockcfapi.DeleteServiceInstance(instanceGUID).RespondsAcceptedWith(""),
mockcfapi.GetServiceInstance(instanceGUID).RespondsOKWith("not valid json"),
)
params := []string{"-configFilePath", configFilePath}
deleterSession = helpers.StartBinaryWithParams(binaryPath, params)
Eventually(deleterSession, 1*time.Second).Should(gexec.Exit())
})
It("fails to delete", func() {
Eventually(deleterSession).Should(gexec.Exit(1))
Expect(deleterSession).To(gbytes.Say("Result: failed to delete service instance %s. Error: Invalid response body", instanceGUID))
})
})
})
|
package server
import (
"context"
"crypto/tls"
"fmt"
"net"
"net/http"
"path/filepath"
"github.com/emicklei/go-restful"
"github.com/rancher/dynamiclistener"
"github.com/rancher/dynamiclistener/factory"
"github.com/rancher/dynamiclistener/storage/file"
"github.com/rancher/dynamiclistener/storage/memory"
"k8s.io/klog"
auth "github.com/baidu/ote-stack/pkg/server/apisauthentication"
authr "github.com/baidu/ote-stack/pkg/server/apisauthorization"
acd "github.com/baidu/ote-stack/pkg/server/apiscoordination"
anw "github.com/baidu/ote-stack/pkg/server/apisnetworking"
an "github.com/baidu/ote-stack/pkg/server/apisnode"
as "github.com/baidu/ote-stack/pkg/server/apisstorage"
"github.com/baidu/ote-stack/pkg/server/apiv1"
crt "github.com/baidu/ote-stack/pkg/server/certificate"
"github.com/baidu/ote-stack/pkg/server/handler"
)
type edgeK3sServer struct {
ctx *ServerContext
}
func NewEdgeK3sServer(ctx *ServerContext) EdgeServer {
return &edgeK3sServer{
ctx: ctx,
}
}
func newListener(serverCtx *ServerContext) (net.Listener, http.Handler, error) {
tcp, err := dynamiclistener.NewTCPListener(serverCtx.BindAddr, serverCtx.BindPort)
if err != nil {
return nil, nil, err
}
cert, key, err := factory.LoadCerts(serverCtx.CertFile, serverCtx.KeyFile)
if err != nil {
return nil, nil, err
}
storage := tlsStorage(serverCtx.CertCtx.DataPath)
return dynamiclistener.NewListener(tcp, storage, cert, key, dynamiclistener.Config{
CN: "edgehub",
Organization: []string{"edgehub"},
TLSConfig: tls.Config{
ClientAuth: tls.RequestClientCert,
},
})
}
func tlsStorage(dataDir string) dynamiclistener.TLSStorage {
fileStorage := file.New(filepath.Join(dataDir, "dynamic-cert.json"))
return memory.NewBacked(fileStorage)
}
func startClusterAndHTTPS(ctx context.Context, serverCtx *ServerContext) error {
l, handler, err := newListener(serverCtx)
if err != nil {
klog.Errorf("new listener failed: %v", err)
return err
}
handler, err = getHandler(handler, serverCtx.HandlerCtx, serverCtx.CertCtx)
if err != nil {
klog.Errorf("get handler failed: %v", err)
return err
}
server := http.Server{
Handler: handler,
}
go func() {
<-ctx.Done()
server.Shutdown(context.Background())
}()
if err := server.Serve(l); err != nil {
return err
}
return nil
}
func getHandler(handler http.Handler, ctx *handler.HandlerContext, certCtx *crt.CertContext) (http.Handler, error) {
// TODO add need http handler here.
wsContainer := restful.NewContainer()
wsContainer.Add(crt.NewAuthWebService(certCtx))
wsContainer.Add(apiv1.NewWebsService(ctx))
wsContainer.Add(anw.NewWebsService(ctx))
wsContainer.Add(as.NewWebsService(ctx))
wsContainer.Add(acd.NewWebsService(ctx))
wsContainer.Add(an.NewWebsService(ctx))
wsContainer.Add(auth.NewWebsService(ctx))
wsContainer.Add(authr.NewWebsService(ctx))
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
handler.ServeHTTP(rw, req)
wsContainer.ServeHTTP(rw, req)
}), nil
}
func (e *edgeK3sServer) StartServer(ctx *ServerContext) error {
if err := e.CheckValid(ctx); err != nil {
return err
}
newCtx, cancel := context.WithCancel(context.Background())
go func() {
<-ctx.StopChan
cancel()
}()
klog.Info("EdgeServer starting...")
if err := startClusterAndHTTPS(newCtx, ctx); err != nil {
if err == http.ErrServerClosed {
klog.Info("EdgeServer stopped.")
} else {
return err
}
}
return nil
}
func (e *edgeK3sServer) CheckValid(ctx *ServerContext) error {
if ctx == nil {
return fmt.Errorf("Server context is nil")
}
if net.ParseIP(ctx.BindAddr) == nil {
return fmt.Errorf("Server bind address %s is an invalid IP.", ctx.BindAddr)
}
if ctx.BindPort < 0 || ctx.BindPort > 65535 {
return fmt.Errorf("Server bind port %d is out of range 0-65535", ctx.BindPort)
}
if ctx.CertFile == "" {
return fmt.Errorf("no specify the server tls cert file")
}
if ctx.KeyFile == "" {
return fmt.Errorf("no specify the server tls key file")
}
if ctx.StopChan == nil {
return fmt.Errorf("stop channel for server is nil")
}
if ctx.HandlerCtx == nil || !ctx.HandlerCtx.IsValid() {
return fmt.Errorf("server handleCtx is invalid")
}
if ctx.CertCtx == nil || !ctx.CertCtx.IsValid() {
return fmt.Errorf("server certCtx is invalid")
}
return nil
}
|
package main
import (
"encoding/json"
"log"
db "github.com/jerrydevin96/lifo-queue/database"
)
func pushHandler(data string) string {
response := ``
log.Println("Pushing item into queue")
var JSONValue map[string]string
responseJSON := make(map[string]string)
json.Unmarshal([]byte(data), &JSONValue)
log.Println("fetching current last index")
index, _, err := db.GetLastRecord()
if err != nil {
log.Println("[ERROR] " + err.Error())
responseJSON["response"] = err.Error()
responseData, _ := json.Marshal(responseJSON)
return string(responseData)
}
err = db.InsertNewRecord((index + 1), JSONValue["value"])
if err != nil {
log.Println("[ERROR] " + err.Error())
responseJSON["response"] = err.Error()
responseData, _ := json.Marshal(responseJSON)
return string(responseData)
}
responseJSON["response"] = "successfully pushed " + JSONValue["value"] + " into the queue"
responseData, _ := json.Marshal(responseJSON)
response = string(responseData)
return response
}
func popHandler() string {
response := ``
log.Println("Pushing item into queue")
responseJSON := make(map[string]string)
log.Println("fetching current last record")
index, value, err := db.GetLastRecord()
if err != nil {
log.Println("[ERROR] " + err.Error())
responseJSON["message"] = err.Error()
responseJSON["value"] = "failed to fetch"
responseData, _ := json.Marshal(responseJSON)
return string(responseData)
}
if index != 0 {
log.Println("Deleting current last record")
err = db.DeleteLastRecord(index)
if err != nil {
log.Println("[ERROR] " + err.Error())
responseJSON["message"] = err.Error()
responseJSON["value"] = "failed to fetch"
responseData, _ := json.Marshal(responseJSON)
return string(responseData)
}
responseJSON["message"] = "successfully performed POP"
responseJSON["value"] = value
responseData, _ := json.Marshal(responseJSON)
response = string(responseData)
} else {
log.Println("no elements are present in queue")
responseJSON["message"] = "No elements available in queue"
responseJSON["value"] = ""
responseData, _ := json.Marshal(responseJSON)
response = string(responseData)
}
return response
}
|
package rfc7807
import (
"bytes"
"encoding/json"
"fmt"
"net/url"
"reflect"
"strings"
)
// DTO error data transfer object is made in an effort to standardize REST API error handling,
// the IETF devised RFC 7807, which creates a generalized error-handling schema.
// https://www.rfc-editor.org/rfc/rfc7807
//
// Example:
//
// {
// "type": "/errors/incorrect-user-pass",
// "title": "Incorrect username or password.",
// "status": 401,
// "detail": "Authentication failed due to incorrect username or password.",
// "instance": "/login/log/abc123"
// }
type DTO struct {
// Type is a URI reference that identifies the problem type.
// Ideally, the URI should resolve to human-readable information describing the type, but that’s not necessary.
// The problem type provides more specific information than the HTTP status code itself.
//
// Type URI reference [RFC3986] identifies the problem type.
// This specification encourages that, when dereferenced,
// it provides human-readable documentation for the problem type (e.g., using HTML [W3C.REC-html5-20141028]).
// When this member is absent, its value is assumed to be "about:blank".
//
// Consumers MUST use the "type" string as the primary identifier for
// the problem type; the "title" string is advisory and included only
// for users who are not aware of the semantics of the URI and do not
// have the ability to discover them (e.g., offline log analysis).
// Consumers SHOULD NOT automatically dereference the type URI.
//
// Example: "/errors/incorrect-user-pass"
Type Type
// Title is a human-readable description of the problem type,
// meaning that it should always be the same for the same type.
//
// Example: "Incorrect username or password."
Title string
// Status The status reflectkit the HTTP status code and is a convenient way to make problem details self-contained.
// That way, error replies can interpret outside the context of the HTTP interaction.
// Status is an optional field.
//
// Example: 401
Status int
// Detail is a human-readable description of the problem instance,
// explaining why the problem occurred in this specific case.
//
// Example: "Authentication failed due to incorrect username or password."
Detail string
// Instance is a URI that identifies the specific occurrence of the error
// Instance is optional
//
// Example: "/login/log/abc123"
Instance string
// Extensions is a user-defined optional generic type that holds additional details in your error reply.
// For example, suppose your company already has its error reply convention.
// In that case, you can use the extension as a backward compatibility layer
// to roll out the Handler standard in your project
// without breaking any API contract between your server and its clients.
//
// Example: {...,"error":{"code":"foo-bar-baz","message":"foo bar baz"}}
Extensions any
}
type Type struct {
ID string
BaseURL string
}
func (typ *Type) String() string {
return strings.TrimSuffix(typ.BaseURL, "/") + "/" + url.PathEscape(typ.ID)
}
func (typ *Type) Parse(raw string) error {
u, err := url.Parse(raw)
if err != nil {
return err
}
ps := strings.Split(u.Path, "/")
if 0 == len(ps) {
return fmt.Errorf("missing ID from type URI")
}
id, err := url.PathUnescape(ps[len(ps)-1])
if err != nil {
return err
}
typ.ID = id
typ.BaseURL = strings.TrimSuffix(strings.TrimSuffix(raw, typ.ID), "/")
return nil
}
type baseDTO struct {
Type string `json:"type" xml:"type"`
Title string `json:"title" xml:"title"`
Status int `json:"status,omitempty" xml:"status,omitempty"`
Detail string `json:"detail,omitempty" xml:"detail,omitempty"`
Instance string `json:"instance,omitempty" xml:"instance,omitempty"`
}
func (v DTO) MarshalJSON() ([]byte, error) {
base, err := json.Marshal(baseDTO{
Type: v.Type.String(),
Title: v.Title,
Status: v.Status,
Detail: v.Detail,
Instance: v.Instance,
})
if err != nil {
return nil, err
}
if !v.hasExtensions() {
return base, err
}
extra, err := json.Marshal(v.Extensions)
if err != nil {
return nil, err
}
var out []byte
out = append(out, bytes.TrimSuffix(base, []byte("}"))...)
out = append(out, []byte(",")...)
out = append(out, bytes.TrimPrefix(extra, []byte("{"))...)
return out, nil
}
func (v *DTO) UnmarshalJSON(bytes []byte) error {
var base baseDTO
if err := json.Unmarshal(bytes, &base); err != nil {
return err
}
var typ Type
if err := typ.Parse(base.Type); err != nil {
return err
}
v.Type = typ
v.Title = base.Title
v.Status = base.Status
v.Detail = base.Detail
v.Instance = base.Instance
var ext map[string]any
if err := json.Unmarshal(bytes, &ext); err != nil {
return err
}
delete(ext, "type")
delete(ext, "title")
delete(ext, "status")
delete(ext, "detail")
delete(ext, "instance")
if len(ext) != 0 {
v.Extensions = ext
}
return nil
}
func (v DTO) hasExtensions() bool {
rt := reflect.TypeOf(v.Extensions)
if rt == nil {
return false
}
if rt.Kind() == reflect.Struct && rt.NumField() != 0 {
return true
}
if rt.Kind() == reflect.Map {
return true
}
return false
}
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compose
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types/registry"
"github.com/compose-spec/compose-go/types"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command/image/build"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/builder/remotecontext/urlutil"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/streamformatter"
"github.com/pkg/errors"
"github.com/docker/compose/v2/pkg/api"
)
//nolint:gocyclo
func (s *composeService) doBuildClassic(ctx context.Context, project *types.Project, service types.ServiceConfig, options api.BuildOptions) (string, error) {
var (
buildCtx io.ReadCloser
dockerfileCtx io.ReadCloser
contextDir string
tempDir string
relDockerfile string
err error
)
dockerfileName := dockerFilePath(service.Build.Context, service.Build.Dockerfile)
specifiedContext := service.Build.Context
progBuff := s.stdout()
buildBuff := s.stdout()
if len(service.Build.Platforms) > 1 {
return "", errors.Errorf("the classic builder doesn't support multi-arch build, set DOCKER_BUILDKIT=1 to use BuildKit")
}
if service.Build.Privileged {
return "", errors.Errorf("the classic builder doesn't support privileged mode, set DOCKER_BUILDKIT=1 to use BuildKit")
}
if len(service.Build.AdditionalContexts) > 0 {
return "", errors.Errorf("the classic builder doesn't support additional contexts, set DOCKER_BUILDKIT=1 to use BuildKit")
}
if len(service.Build.SSH) > 0 {
return "", errors.Errorf("the classic builder doesn't support SSH keys, set DOCKER_BUILDKIT=1 to use BuildKit")
}
if len(service.Build.Secrets) > 0 {
return "", errors.Errorf("the classic builder doesn't support secrets, set DOCKER_BUILDKIT=1 to use BuildKit")
}
if service.Build.Labels == nil {
service.Build.Labels = make(map[string]string)
}
service.Build.Labels[api.ImageBuilderLabel] = "classic"
switch {
case isLocalDir(specifiedContext):
contextDir, relDockerfile, err = build.GetContextFromLocalDir(specifiedContext, dockerfileName)
if err == nil && strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) {
// Dockerfile is outside of build-context; read the Dockerfile and pass it as dockerfileCtx
dockerfileCtx, err = os.Open(dockerfileName)
if err != nil {
return "", errors.Errorf("unable to open Dockerfile: %v", err)
}
defer dockerfileCtx.Close() //nolint:errcheck
}
case urlutil.IsGitURL(specifiedContext):
tempDir, relDockerfile, err = build.GetContextFromGitURL(specifiedContext, dockerfileName)
case urlutil.IsURL(specifiedContext):
buildCtx, relDockerfile, err = build.GetContextFromURL(progBuff, specifiedContext, dockerfileName)
default:
return "", errors.Errorf("unable to prepare context: path %q not found", specifiedContext)
}
if err != nil {
return "", errors.Errorf("unable to prepare context: %s", err)
}
if tempDir != "" {
defer os.RemoveAll(tempDir) //nolint:errcheck
contextDir = tempDir
}
// read from a directory into tar archive
if buildCtx == nil {
excludes, err := build.ReadDockerignore(contextDir)
if err != nil {
return "", err
}
if err := build.ValidateContextDirectory(contextDir, excludes); err != nil {
return "", errors.Wrap(err, "checking context")
}
// And canonicalize dockerfile name to a platform-independent one
relDockerfile = archive.CanonicalTarNameForPath(relDockerfile)
excludes = build.TrimBuildFilesFromExcludes(excludes, relDockerfile, false)
buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{
ExcludePatterns: excludes,
ChownOpts: &idtools.Identity{},
})
if err != nil {
return "", err
}
}
// replace Dockerfile if it was added from stdin or a file outside the build-context, and there is archive context
if dockerfileCtx != nil && buildCtx != nil {
buildCtx, relDockerfile, err = build.AddDockerfileToBuildContext(dockerfileCtx, buildCtx)
if err != nil {
return "", err
}
}
buildCtx, err = build.Compress(buildCtx)
if err != nil {
return "", err
}
progressOutput := streamformatter.NewProgressOutput(progBuff)
body := progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon")
configFile := s.configFile()
creds, err := configFile.GetAllCredentials()
if err != nil {
return "", err
}
authConfigs := make(map[string]registry.AuthConfig, len(creds))
for k, auth := range creds {
authConfigs[k] = registry.AuthConfig(auth)
}
buildOptions := imageBuildOptions(s.dockerCli, project, service, options)
imageName := api.GetImageNameOrDefault(service, project.Name)
buildOptions.Tags = append(buildOptions.Tags, imageName)
buildOptions.Dockerfile = relDockerfile
buildOptions.AuthConfigs = authConfigs
buildOptions.Memory = options.Memory
ctx, cancel := context.WithCancel(ctx)
defer cancel()
response, err := s.apiClient().ImageBuild(ctx, body, buildOptions)
if err != nil {
return "", err
}
defer response.Body.Close() //nolint:errcheck
imageID := ""
aux := func(msg jsonmessage.JSONMessage) {
var result dockertypes.BuildResult
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
fmt.Fprintf(s.stderr(), "Failed to parse aux message: %s", err)
} else {
imageID = result.ID
}
}
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, progBuff.FD(), true, aux)
if err != nil {
if jerr, ok := err.(*jsonmessage.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
return "", cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return "", err
}
// Windows: show error message about modified file permissions if the
// daemon isn't running Windows.
if response.OSType != "windows" && runtime.GOOS == "windows" {
// if response.OSType != "windows" && runtime.GOOS == "windows" && !options.quiet {
fmt.Fprintln(s.stdout(), "SECURITY WARNING: You are building a Docker "+
"image from Windows against a non-Windows Docker host. All files and "+
"directories added to build context will have '-rwxr-xr-x' permissions. "+
"It is recommended to double check and reset permissions for sensitive "+
"files and directories.")
}
return imageID, nil
}
func isLocalDir(c string) bool {
_, err := os.Stat(c)
return err == nil
}
func imageBuildOptions(dockerCli command.Cli, project *types.Project, service types.ServiceConfig, options api.BuildOptions) dockertypes.ImageBuildOptions {
config := service.Build
return dockertypes.ImageBuildOptions{
Version: dockertypes.BuilderV1,
Tags: config.Tags,
NoCache: config.NoCache,
Remove: true,
PullParent: config.Pull,
BuildArgs: resolveAndMergeBuildArgs(dockerCli, project, service, options),
Labels: config.Labels,
NetworkMode: config.Network,
ExtraHosts: config.ExtraHosts.AsList(),
Target: config.Target,
Isolation: container.Isolation(config.Isolation),
}
}
|
package funnystring
// https://www.hackerrank.com/challenges/funny-string
func absDiff(ar, br rune) uint16 {
a := uint16(ar)
b := uint16(br)
if a < b {
return b - a
}
return a - b
}
// FunnyString - implements the solution to the problem
func FunnyString(s string) string {
sr := []rune(s)
n := len(s)
for i := 1; i < n; i++ {
forward := absDiff(sr[i], sr[i-1])
backward := absDiff(sr[n-i-1], sr[n-i])
if forward != backward {
return "Not Funny"
}
}
return "Funny"
}
|
package config
import "testing"
func TestInitConfig(t *testing.T) {
InitConfig("/../config/config.yml")
t.Log(GetService())
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/lib/pq"
)
const (
user = "postgres"
password = "pass"
host = "prix.plus"
dbname = "admin"
sslmode = "disable"
)
func InitDB() (*sql.DB, error) {
dbinfo := fmt.Sprintf("user=%s password=%s host=%s dbname=%s sslmode=%s",
user, password, host, dbname, sslmode)
db, err := sql.Open("postgres", dbinfo)
if err != nil {
return nil, err
}
// Testing DB connection
err = db.Ping()
if err != nil {
return nil, err
}
return db, nil
}
|
package main
import (
"log"
"math/rand"
"sync"
"time"
)
//STARTMAIN, OMIT
func main() {
r := referree{}
p1 := newPlayer("ping")
p2 := newPlayer("pong")
g := newGame(p1, p2) // HL3
r.Start(g) //start on main goroutine // HL3
}
//STOPMAIN, OMIT
type ball struct {
hits int
lastPlayer string
}
//STARTNEWGAME, OMIT
func newGame(player ...*player) *game {
g := &game{ //...
Score: make(map[string]int), //OMIT
Players: player, //OMIT
table: make(chan *ball), //OMIT
done: make(chan struct{}), //OMIT
}
go g.loop() // HL3
return g
}
//STOPNEWGAME, OMIT
//STARTGAMESTRUCT, OMIT
type game struct {
sync.Mutex // HL3
Score map[string]int
Players []*player
table chan *ball
done chan struct{}
}
// Table returns the channel used to
// distribute the ball
func (g *game) Table() chan *ball { // HL3
return g.table
}
//STOPGAMESTRUCT, OMIT
//STARTGAMELOOP, OMIT
func (g *game) loop() {
for {
select {
case <-g.done: // HL3
for _, p := range g.Players {
p.Done()
}
return
}
}
}
//STOPGAMELOOP, OMIT
//STARTGAMESTART, OMIT
// Start tells all player to start playing
// internally it spawn goroutine for each players
func (g *game) Start(done chan *ball) {
for _, p := range g.Players {
go p.Play(g.Table(), done) // HL3
}
}
//STOPGAMESTART, OMIT
//STARTGAMEINCSCORE, OMIT
// IncrementScore increment the score of a player.
// Once the score equal to max score, then the player wins
func (g *game) IncrementScore(p string) bool {
g.Lock() // HL3
defer g.Unlock() // HL3
log.Println("increment score for", p) // OMIT
if _, ok := g.Score[p]; ok { // <- potential race // HL3
g.Score[p]++
} else {
g.Score[p] = 1
}
if g.Score[p] == 3 {
return true
}
return false
}
//STOPGAMEINCSCORE, OMIT
//STARTPLAYER, OMIT
func newPlayer(name string) *player {
return &player{
Name: name,
done: make(chan struct{}),
}
}
type player struct {
Name string
done chan struct{} // HL3
}
//STOPPLAYER, OMIT
//STARTPLAYERDONE, OMIT
func (p player) Done() {
p.done <- struct{}{} // HL3
}
//STOPPLAYERDONE, OMIT
//STARTPLAYERPLAY, OMIT
func (p player) Play(table chan *ball, done chan *ball) { // HL3
for {
s := rand.NewSource(time.Now().UnixNano()) // OMIT
r := rand.New(s) // OMIT
//.....
select {
case ball := <-table:
v := r.Intn(1001) // OMIT
if v%11 == 0 {
log.Println(p.Name, "drop the ball") // OMIT
done <- ball
continue //continue instead of return // HL3
}
ball.hits++
ball.lastPlayer = p.Name
time.Sleep(50 * time.Millisecond)
log.Println(p.Name, "hits ball", ball.hits) // OMIT
table <- ball
case <-p.done: // receive from done channel // HL3
log.Println(p.Name, "done with the game") // OMIT
return // HL3
}
}
}
//STOPPLAYERPLAY, OMIT
//STARTREFERREESTART, OMIT
type referree struct{} // HL3
func (r *referree) Start(g *game) {
done := make(chan *ball) // HL3
g.Start(done)
table := g.Table()
table <- new(ball) // HL3
for {
select {
case b := <-done:
log.Println("ball is returned to referree. point for ", b.lastPlayer) // OMIT
if g.IncrementScore(b.lastPlayer) {
log.Println("game is over. winner is", b.lastPlayer) // OMIT
g.done <- struct{}{} // HL3
return // HL3
}
table <- new(ball) // start new round // HL3
}
}
}
//STOPREFERREESTART, OMIT
|
// Copyright 2021 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration_test
import (
"testing"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
type inputIfNameMatcher struct {
name string
}
var _ stack.Matcher = (*inputIfNameMatcher)(nil)
func (*inputIfNameMatcher) Name() string {
return "inputIfNameMatcher"
}
func (im *inputIfNameMatcher) Match(hook stack.Hook, _ *stack.PacketBuffer, inNicName, _ string) (bool, bool) {
return (hook == stack.Input && im.name != "" && im.name == inNicName), false
}
const (
nicID = 1
nicName = "nic1"
anotherNicName = "nic2"
linkAddr = tcpip.LinkAddress("\x0a\x0b\x0c\x0d\x0e\x0e")
srcAddrV4 = "\x0a\x00\x00\x01"
dstAddrV4 = "\x0a\x00\x00\x02"
srcAddrV6 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01"
dstAddrV6 = "\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02"
payloadSize = 20
)
func genStackV6(t *testing.T) (*stack.Stack, *channel.Endpoint) {
t.Helper()
s := stack.New(stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{ipv6.NewProtocol},
})
e := channel.New(0, header.IPv6MinimumMTU, linkAddr)
nicOpts := stack.NICOptions{Name: nicName}
if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
t.Fatalf("CreateNICWithOptions(%d, _, %#v) = %s", nicID, nicOpts, err)
}
if err := s.AddAddress(nicID, header.IPv6ProtocolNumber, dstAddrV6); err != nil {
t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv6ProtocolNumber, dstAddrV6, err)
}
return s, e
}
func genStackV4(t *testing.T) (*stack.Stack, *channel.Endpoint) {
t.Helper()
s := stack.New(stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
})
e := channel.New(0, header.IPv4MinimumMTU, linkAddr)
nicOpts := stack.NICOptions{Name: nicName}
if err := s.CreateNICWithOptions(nicID, e, nicOpts); err != nil {
t.Fatalf("CreateNICWithOptions(%d, _, %#v) = %s", nicID, nicOpts, err)
}
if err := s.AddAddress(nicID, header.IPv4ProtocolNumber, dstAddrV4); err != nil {
t.Fatalf("AddAddress(%d, %d, %s) = %s", nicID, header.IPv4ProtocolNumber, dstAddrV4, err)
}
return s, e
}
func genPacketV6() *stack.PacketBuffer {
pktSize := header.IPv6MinimumSize + payloadSize
hdr := buffer.NewPrependable(pktSize)
ip := header.IPv6(hdr.Prepend(pktSize))
ip.Encode(&header.IPv6Fields{
PayloadLength: payloadSize,
TransportProtocol: 99,
HopLimit: 255,
SrcAddr: srcAddrV6,
DstAddr: dstAddrV6,
})
vv := hdr.View().ToVectorisedView()
return stack.NewPacketBuffer(stack.PacketBufferOptions{Data: vv})
}
func genPacketV4() *stack.PacketBuffer {
pktSize := header.IPv4MinimumSize + payloadSize
hdr := buffer.NewPrependable(pktSize)
ip := header.IPv4(hdr.Prepend(pktSize))
ip.Encode(&header.IPv4Fields{
TOS: 0,
TotalLength: uint16(pktSize),
ID: 1,
Flags: 0,
FragmentOffset: 16,
TTL: 48,
Protocol: 99,
SrcAddr: srcAddrV4,
DstAddr: dstAddrV4,
})
ip.SetChecksum(0)
ip.SetChecksum(^ip.CalculateChecksum())
vv := hdr.View().ToVectorisedView()
return stack.NewPacketBuffer(stack.PacketBufferOptions{Data: vv})
}
func TestIPTablesStatsForInput(t *testing.T) {
tests := []struct {
name string
setupStack func(*testing.T) (*stack.Stack, *channel.Endpoint)
setupFilter func(*testing.T, *stack.Stack)
genPacket func() *stack.PacketBuffer
proto tcpip.NetworkProtocolNumber
expectReceived int
expectInputDropped int
}{
{
name: "IPv6 Accept",
setupStack: genStackV6,
setupFilter: func(*testing.T, *stack.Stack) { /* no filter */ },
genPacket: genPacketV6,
proto: header.IPv6ProtocolNumber,
expectReceived: 1,
expectInputDropped: 0,
},
{
name: "IPv4 Accept",
setupStack: genStackV4,
setupFilter: func(*testing.T, *stack.Stack) { /* no filter */ },
genPacket: genPacketV4,
proto: header.IPv4ProtocolNumber,
expectReceived: 1,
expectInputDropped: 0,
},
{
name: "IPv6 Drop (input interface matches)",
setupStack: genStackV6,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: nicName}
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{nicName}}
// Make sure the packet is not dropped by the next rule.
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, true, err)
}
},
genPacket: genPacketV6,
proto: header.IPv6ProtocolNumber,
expectReceived: 1,
expectInputDropped: 1,
},
{
name: "IPv4 Drop (input interface matches)",
setupStack: genStackV4,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: nicName}
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{nicName}}
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, false, err)
}
},
genPacket: genPacketV4,
proto: header.IPv4ProtocolNumber,
expectReceived: 1,
expectInputDropped: 1,
},
{
name: "IPv6 Accept (input interface does not match)",
setupStack: genStackV6,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: anotherNicName}
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, true, err)
}
},
genPacket: genPacketV6,
proto: header.IPv6ProtocolNumber,
expectReceived: 1,
expectInputDropped: 0,
},
{
name: "IPv4 Accept (input interface does not match)",
setupStack: genStackV4,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{InputInterface: anotherNicName}
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, false, err)
}
},
genPacket: genPacketV4,
proto: header.IPv4ProtocolNumber,
expectReceived: 1,
expectInputDropped: 0,
},
{
name: "IPv6 Drop (input interface does not match but invert is true)",
setupStack: genStackV6,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{
InputInterface: anotherNicName,
InputInterfaceInvert: true,
}
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, true, err)
}
},
genPacket: genPacketV6,
proto: header.IPv6ProtocolNumber,
expectReceived: 1,
expectInputDropped: 1,
},
{
name: "IPv4 Drop (input interface does not match but invert is true)",
setupStack: genStackV4,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Filter = stack.IPHeaderFilter{
InputInterface: anotherNicName,
InputInterfaceInvert: true,
}
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, false, err)
}
},
genPacket: genPacketV4,
proto: header.IPv4ProtocolNumber,
expectReceived: 1,
expectInputDropped: 1,
},
{
name: "IPv6 Accept (input interface does not match using a matcher)",
setupStack: genStackV6,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, true /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{anotherNicName}}
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, true /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, true, err)
}
},
genPacket: genPacketV6,
proto: header.IPv6ProtocolNumber,
expectReceived: 1,
expectInputDropped: 0,
},
{
name: "IPv4 Accept (input interface does not match using a matcher)",
setupStack: genStackV4,
setupFilter: func(t *testing.T, s *stack.Stack) {
t.Helper()
ipt := s.IPTables()
filter := ipt.GetTable(stack.FilterID, false /* ipv6 */)
ruleIdx := filter.BuiltinChains[stack.Input]
filter.Rules[ruleIdx].Target = &stack.DropTarget{}
filter.Rules[ruleIdx].Matchers = []stack.Matcher{&inputIfNameMatcher{anotherNicName}}
filter.Rules[ruleIdx+1].Target = &stack.AcceptTarget{}
if err := ipt.ReplaceTable(stack.FilterID, filter, false /* ipv6 */); err != nil {
t.Fatalf("ipt.RelaceTable(%d, _, %t): %s", stack.FilterID, false, err)
}
},
genPacket: genPacketV4,
proto: header.IPv4ProtocolNumber,
expectReceived: 1,
expectInputDropped: 0,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
s, e := test.setupStack(t)
test.setupFilter(t, s)
e.InjectInbound(test.proto, test.genPacket())
if got := int(s.Stats().IP.PacketsReceived.Value()); got != test.expectReceived {
t.Errorf("got PacketReceived = %d, want = %d", got, test.expectReceived)
}
if got := int(s.Stats().IP.IPTablesInputDropped.Value()); got != test.expectInputDropped {
t.Errorf("got IPTablesInputDropped = %d, want = %d", got, test.expectInputDropped)
}
})
}
}
|
package torproxy
import (
"crypto/rand"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"log"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/caddyserver/certmagic"
"github.com/tdex-network/tor-proxy/pkg/registry"
"golang.org/x/net/http2"
"golang.org/x/net/proxy"
)
type TorClient struct {
Host string
Port int
}
// TorProxy holds the tor client details and the list cleartext addresses to be redirect to the onions URLs
type TorProxy struct {
Address string
Domains []string
Client *TorClient
Registry registry.Registry
Redirects []*url.URL
Listener net.Listener
useTLS bool
closeAutoUpdaterFunc func()
}
// NewTorProxyFromHostAndPort returns a *TorProxy with givnen host and port
func NewTorProxyFromHostAndPort(torHost string, torPort int) (*TorProxy, error) {
// dial to check if socks5 proxy is listening
dialer, err := proxy.SOCKS5("tcp", fmt.Sprintf("%s:%d", torHost, torPort), nil, proxy.Direct)
if err != nil {
return nil, fmt.Errorf("couldn't connect to socks proxy: %w", err)
}
tr := &http.Transport{Dial: dialer.Dial}
c := &http.Client{
Transport: tr,
}
req, err := http.NewRequest(http.MethodGet, "https://check.torproject.org", nil)
if err != nil {
return nil, fmt.Errorf("couldn't create request : %w", err)
}
_, err = c.Do(req)
if err != nil {
return nil, fmt.Errorf("couldn't make request: %w", err)
}
return &TorProxy{
Client: &TorClient{
Host: torHost,
Port: torPort,
},
}, nil
}
func (tp *TorProxy) WithRegistry(regis registry.Registry) error {
tp.Registry = regis
registryJSON, err := tp.Registry.GetJSON()
if err != nil {
return err
}
return tp.setRedirectsFromRegistry(registryJSON)
}
// WithRedirects modify the TorProxy struct with givend from -> to map
// add the redirect URL if and only if the tor proxy doesn't know the new origin
func (tp *TorProxy) setRedirectsFromRegistry(registryJSON []byte) error {
redirects, err := parseRegistryJSONtoRedirects(registryJSON)
if err != nil {
return err
}
for _, to := range redirects {
// we parse the destination upstram which should be on *.onion address
origin, err := url.Parse(to)
if err != nil {
return fmt.Errorf("failed to parse address : %v", err)
}
if !tp.includesRedirect(origin) {
tp.Redirects = append(tp.Redirects, origin)
}
}
return err
}
func (tp TorProxy) includesRedirect(redirect *url.URL) bool {
for _, proxyRedirect := range tp.Redirects {
if proxyRedirect.Host == redirect.Host {
return true
}
}
return false
}
// WithAutoUpdater starts a go-routine selecting results of registry.Observe
// set up a stop function in TorProxy to stop the go-routine in Close method
func (tp *TorProxy) WithAutoUpdater(period time.Duration, errorHandler func(err error)) {
observeRegistryChan, stop := registry.Observe(tp.Registry, period)
go func() {
for newGetJSONResult := range observeRegistryChan {
if newGetJSONResult.Err != nil {
errorHandler(newGetJSONResult.Err)
continue
}
err := tp.setRedirectsFromRegistry(newGetJSONResult.Json)
if err != nil {
errorHandler(err)
}
}
}()
tp.closeAutoUpdaterFunc = stop
}
// TLSOptions defines the domains we need to obtain and renew a TLS cerficate
type TLSOptions struct {
Domains []string
Email string
UseStaging bool
TLSKey string
TLSCert string
}
// Serve starts a HTTP/1.x reverse proxy for all cleartext requests to the registered Onion addresses.
// An address to listent for TCP packets must be given.
// TLS will be enabled if a non-nil *TLSOptions is given. CertMagic will obtain, store and renew certificates for the domains.
// By default, CertMagic stores assets on the local file system in $HOME/.local/share/certmagic (and honors $XDG_DATA_HOME if set).
// CertMagic will create the directory if it does not exist.
// If writes are denied, things will not be happy, so make sure CertMagic can write to it!
// For each onion address we get to know thanks the WithRedirects method, we register a URL.path like
// host:port/<just_onion_host_without_dot_onion>/[<grpc_package>.<grpc_service>/<grpc_method>]
// Each incoming request will be proxied to <just_onion_host_without_dot_onion>.onion/[<grpc_package>.<grpc_service>/<grpc_method>]
func (tp *TorProxy) Serve(address string, options *TLSOptions) error {
if options != nil {
var tlsConfig *tls.Config
// if key and certificate filesystem paths are given, do NOT use certmagic.
if len(options.TLSKey) > 0 && len(options.TLSCert) > 0 {
certificate, err := tls.LoadX509KeyPair(options.TLSCert, options.TLSKey)
if err != nil {
return err
}
tlsConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
NextProtos: []string{"http/1.1", http2.NextProtoTLS, "h2-14"}, // h2-14 is just for compatibility. will be eventually removed.
Certificates: []tls.Certificate{certificate},
CipherSuites: []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
},
}
tlsConfig.Rand = rand.Reader
} else {
// read and agree to your CA's legal documents
certmagic.DefaultACME.Agreed = true
// provide an email address
if len(options.Email) > 0 {
certmagic.DefaultACME.Email = options.Email
}
// use the staging endpoint while we're developing
if options.UseStaging {
certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA
}
// config
tlsConfig, err := certmagic.TLS(options.Domains)
if err != nil {
return err
}
tlsConfig.NextProtos = []string{"http/1.1", http2.NextProtoTLS, "h2-14"} // h2-14 is just for compatibility. will be eventually removed.
}
// get a TLS listener
lis, err := tls.Listen("tcp", address, tlsConfig)
if err != nil {
return err
}
// Set address and listener
tp.Address = address
tp.Listener = lis
// Set with TLS stuff
tp.Domains = options.Domains
tp.useTLS = true
} else {
lis, err := net.Listen("tcp", address)
if err != nil {
return err
}
// Set address and listener
tp.Address = address
tp.Listener = lis
}
// Create a socks5 dialer
dialer, err := proxy.SOCKS5("tcp", fmt.Sprintf("%s:%d", tp.Client.Host, tp.Client.Port), nil, proxy.Direct)
if err != nil {
log.Fatalf("couldn't connect to socks proxy: %s", err.Error())
}
// Now we can reverse proxy all the redirects
if err := reverseProxy(tp.Redirects, tp.Listener, dialer); err != nil {
return err
}
return err
}
func (tp *TorProxy) Close() error {
err := tp.Listener.Close()
if err != nil {
return err
}
if tp.closeAutoUpdaterFunc != nil {
tp.closeAutoUpdaterFunc()
}
return nil
}
// reverseProxy takes an address where to listen, a dialer with SOCKS5 proxy and a list of redirects as a list of URLs
// the incoming request should match the pattern host:port/<just_onion_host_without_dot_onion>/<grpc_package>.<grpc_service>/<grpc_method>
func reverseProxy(redirects []*url.URL, lis net.Listener, dialer proxy.Dialer) error {
for _, to := range redirects {
removeForUpstream := "/" + withoutOnion(to.Host)
// get a simple reverse proxy
revproxy := generateReverseProxy(to, dialer)
http.HandleFunc(removeForUpstream+"/", func(w http.ResponseWriter, r *http.Request) {
// add cors headers
addCorsHeader(w, r)
// Handler pre-flight requests
if r.Method == http.MethodOptions {
return
}
// prepare request removing useless headers
if err := prepareRequest(r); err != nil {
http.Error(w, fmt.Errorf("preparation request in reverse proxy: %w", err).Error(), http.StatusInternalServerError)
return
}
// remove the <just_onion_host_without_dot_onion> from the upstream path
pathWithOnion := r.URL.Path
pathWithoutOnion := strings.ReplaceAll(pathWithOnion, removeForUpstream, "")
r.URL.Path = pathWithoutOnion
revproxy.ServeHTTP(w, r)
})
}
return http.Serve(lis, nil)
}
func withoutOnion(host string) string {
hostWithoutPort, _, _ := net.SplitHostPort(host)
return strings.ReplaceAll(hostWithoutPort, ".onion", "")
}
func addCorsHeader(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodOptions {
return
}
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
w.Header().Set("Access-Control-Allow-Headers", "*")
}
func parseRegistryJSONtoRedirects(registryJSON []byte) ([]string, error) {
var data []map[string]string
err := json.Unmarshal(registryJSON, &data)
if err != nil {
return nil, fmt.Errorf("invalid JSON: %w", err)
}
redirects := make([]string, 0)
for _, v := range data {
if strings.Contains(v["endpoint"], "onion") {
redirects = append(redirects, v["endpoint"])
}
}
if len(redirects) == 0 {
return nil, errors.New("no valid onion endpoints found")
}
return redirects, nil
}
|
package main
import (
"fmt"
"os"
"strconv"
)
func fbzrecur(number int) {
if number < 3 {
os.Exit(0)
}
if number%15 == 0 {
fmt.Println("Number:", number, "is fizzbuzz")
} else if number%5 == 0 {
fmt.Println("Number:", number, "is buzz")
} else if number%3 == 0 {
fmt.Println("Number:", number, "is fizz")
}
fbzrecur(number - 1)
}
func main() {
if len(os.Args) < 2 {
fmt.Println("Please provide a number")
os.Exit(1)
}
num, _ := strconv.Atoi(os.Args[1])
fbzrecur(num)
}
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"bytes"
"context"
"fmt"
"net"
"net/url"
"reflect"
"regexp"
"sort"
"strings"
"time"
"github.com/cockroachdb/apd/v2"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/clusterversion"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/config/zonepb"
"github.com/cockroachdb/cockroach/pkg/featureflag"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangecache"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/rangefeed"
"github.com/cockroachdb/cockroach/pkg/kv/kvserver/protectedts"
"github.com/cockroachdb/cockroach/pkg/migration"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/status/statuspb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/accessors"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/hydratedtables"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/lease"
"github.com/cockroachdb/cockroach/pkg/sql/contention"
"github.com/cockroachdb/cockroach/pkg/sql/distsql"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/execinfrapb"
"github.com/cockroachdb/cockroach/pkg/sql/execstats"
"github.com/cockroachdb/cockroach/pkg/sql/gcjob/gcjobnotifier"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/physicalplan"
"github.com/cockroachdb/cockroach/pkg/sql/querycache"
"github.com/cockroachdb/cockroach/pkg/sql/rowenc"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/stmtdiagnostics"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/bitarray"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/tracing/tracingpb"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
)
// ClusterOrganization is the organization name.
var ClusterOrganization = settings.RegisterStringSetting(
"cluster.organization",
"organization name",
"",
).WithPublic()
// ClusterIsInternal returns true if the cluster organization contains
// "Cockroach Labs", indicating an internal cluster.
func ClusterIsInternal(sv *settings.Values) bool {
return strings.Contains(ClusterOrganization.Get(sv), "Cockroach Labs")
}
// ClusterSecret is a cluster specific secret. This setting is
// non-reportable.
var ClusterSecret = func() *settings.StringSetting {
s := settings.RegisterStringSetting(
"cluster.secret",
"cluster specific secret",
"",
)
// Even though string settings are non-reportable by default, we
// still mark them explicitly in case a future code change flips the
// default.
s.SetReportable(false)
return s
}()
// defaultIntSize controls how a "naked" INT type will be parsed.
// TODO(bob): Change this to 4 in v2.3; https://github.com/cockroachdb/cockroach/issues/32534
// TODO(bob): Remove or n-op this in v2.4: https://github.com/cockroachdb/cockroach/issues/32844
var defaultIntSize = func() *settings.IntSetting {
s := settings.RegisterIntSetting(
"sql.defaults.default_int_size",
"the size, in bytes, of an INT type", 8, func(i int64) error {
if i != 4 && i != 8 {
return errors.New("only 4 or 8 are valid values")
}
return nil
})
s.SetVisibility(settings.Public)
return s
}()
const allowCrossDatabaseFKsSetting = "sql.cross_db_fks.enabled"
var allowCrossDatabaseFKs = settings.RegisterBoolSetting(
allowCrossDatabaseFKsSetting,
"if true, creating foreign key references across databases is allowed",
false,
).WithPublic()
const allowCrossDatabaseViewsSetting = "sql.cross_db_views.enabled"
var allowCrossDatabaseViews = settings.RegisterBoolSetting(
allowCrossDatabaseViewsSetting,
"if true, creating views that refer to other databases is allowed",
false,
).WithPublic()
const allowCrossDatabaseSeqOwnerSetting = "sql.cross_db_sequence_owners.enabled"
var allowCrossDatabaseSeqOwner = settings.RegisterBoolSetting(
allowCrossDatabaseSeqOwnerSetting,
"if true, creating sequences owned by tables from other databases is allowed",
false,
).WithPublic()
// traceTxnThreshold can be used to log SQL transactions that take
// longer than duration to complete. For example, traceTxnThreshold=1s
// will log the trace for any transaction that takes 1s or longer. To
// log traces for all transactions use traceTxnThreshold=1ns. Note
// that any positive duration will enable tracing and will slow down
// all execution because traces are gathered for all transactions even
// if they are not output.
var traceTxnThreshold = settings.RegisterDurationSetting(
"sql.trace.txn.enable_threshold",
"duration beyond which all transactions are traced (set to 0 to "+
"disable). This setting is coarser grained than"+
"sql.trace.stmt.enable_threshold because it applies to all statements "+
"within a transaction as well as client communication (e.g. retries).", 0,
).WithPublic()
// traceStmtThreshold is identical to traceTxnThreshold except it applies to
// individual statements in a transaction. The motivation for this setting is
// to be able to reduce the noise associated with a larger transaction (e.g.
// round trips to client).
var traceStmtThreshold = settings.RegisterDurationSetting(
"sql.trace.stmt.enable_threshold",
"duration beyond which all statements are traced (set to 0 to disable). "+
"This applies to individual statements within a transaction and is therefore "+
"finer-grained than sql.trace.txn.enable_threshold.",
0,
).WithPublic()
// traceSessionEventLogEnabled can be used to enable the event log
// that is normally kept for every SQL connection. The event log has a
// non-trivial performance impact and also reveals SQL statements
// which may be a privacy concern.
var traceSessionEventLogEnabled = settings.RegisterBoolSetting(
"sql.trace.session_eventlog.enabled",
"set to true to enable session tracing. "+
"Note that enabling this may have a non-trivial negative performance impact.",
false,
).WithPublic()
// ReorderJoinsLimitClusterSettingName is the name of the cluster setting for
// the maximum number of joins to reorder.
const ReorderJoinsLimitClusterSettingName = "sql.defaults.reorder_joins_limit"
// ReorderJoinsLimitClusterValue controls the cluster default for the maximum
// number of joins reordered.
var ReorderJoinsLimitClusterValue = settings.RegisterIntSetting(
ReorderJoinsLimitClusterSettingName,
"default number of joins to reorder",
opt.DefaultJoinOrderLimit,
func(limit int64) error {
if limit < 0 || limit > opt.MaxReorderJoinsLimit {
return pgerror.Newf(pgcode.InvalidParameterValue,
"cannot set %s to a value less than 0 or greater than %v",
ReorderJoinsLimitClusterSettingName,
opt.MaxReorderJoinsLimit,
)
}
return nil
},
)
var requireExplicitPrimaryKeysClusterMode = settings.RegisterBoolSetting(
"sql.defaults.require_explicit_primary_keys.enabled",
"default value for requiring explicit primary keys in CREATE TABLE statements",
false,
)
var temporaryTablesEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_temporary_tables.enabled",
"default value for experimental_enable_temp_tables; allows for use of temporary tables by default",
false,
)
var implicitColumnPartitioningEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_implicit_column_partitioning.enabled",
"default value for experimental_enable_temp_tables; allows for the use of implicit column partitioning",
false,
)
var dropEnumValueEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.drop_enum_value.enabled",
"default value for enable_drop_enum_value; allows for dropping enum values",
false,
)
var overrideMultiRegionZoneConfigClusterMode = settings.RegisterBoolSetting(
"sql.defaults.override_multi_region_zone_config.enabled",
"default value for override_multi_region_zone_config; "+
"allows for overriding the zone configs of a multi-region table or database",
false,
)
var hashShardedIndexesEnabledClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_hash_sharded_indexes.enabled",
"default value for experimental_enable_hash_sharded_indexes; allows for creation of hash sharded indexes by default",
false,
)
var zigzagJoinClusterMode = settings.RegisterBoolSetting(
"sql.defaults.zigzag_join.enabled",
"default value for enable_zigzag_join session setting; allows use of zig-zag join by default",
true,
)
var optDrivenFKCascadesClusterLimit = settings.RegisterIntSetting(
"sql.defaults.foreign_key_cascades_limit",
"default value for foreign_key_cascades_limit session setting; limits the number of cascading operations that run as part of a single query",
10000,
settings.NonNegativeInt,
)
var preferLookupJoinsForFKs = settings.RegisterBoolSetting(
"sql.defaults.prefer_lookup_joins_for_fks.enabled",
"default value for prefer_lookup_joins_for_fks session setting; causes foreign key operations to use lookup joins when possible",
false,
)
// InterleavedTablesEnabled is the setting that controls whether it's possible
// to create interleaved indexes or tables.
var InterleavedTablesEnabled = settings.RegisterBoolSetting(
"sql.defaults.interleaved_tables.enabled",
"allows creation of interleaved tables or indexes",
false,
)
// optUseHistogramsClusterMode controls the cluster default for whether
// histograms are used by the optimizer for cardinality estimation.
// Note that it does not control histogram collection; regardless of the
// value of this setting, the optimizer cannot use histograms if they
// haven't been collected. Collection of histograms is controlled by the
// cluster setting sql.stats.histogram_collection.enabled.
var optUseHistogramsClusterMode = settings.RegisterBoolSetting(
"sql.defaults.optimizer_use_histograms.enabled",
"default value for optimizer_use_histograms session setting; enables usage of histograms in the optimizer by default",
true,
)
// optUseMultiColStatsClusterMode controls the cluster default for whether
// multi-column stats are used by the optimizer for cardinality estimation.
// Note that it does not control collection of multi-column stats; regardless
// of the value of this setting, the optimizer cannot use multi-column stats
// if they haven't been collected. Collection of multi-column stats is
// controlled by the cluster setting sql.stats.multi_column_collection.enabled.
var optUseMultiColStatsClusterMode = settings.RegisterBoolSetting(
"sql.defaults.optimizer_use_multicol_stats.enabled",
"default value for optimizer_use_multicol_stats session setting; enables usage of multi-column stats in the optimizer by default",
true,
)
// localityOptimizedSearchMode controls the cluster default for the use of
// locality optimized search. If enabled, the optimizer will try to plan scans
// and lookup joins in which local nodes (i.e., nodes in the gateway region) are
// searched for matching rows before remote nodes, in the hope that the
// execution engine can avoid visiting remote nodes.
var localityOptimizedSearchMode = settings.RegisterBoolSetting(
"sql.defaults.locality_optimized_partitioned_index_scan.enabled",
"default value for locality_optimized_partitioned_index_scan session setting; "+
"enables searching for rows in the current region before searching remote regions",
true,
)
var implicitSelectForUpdateClusterMode = settings.RegisterBoolSetting(
"sql.defaults.implicit_select_for_update.enabled",
"default value for enable_implicit_select_for_update session setting; enables FOR UPDATE locking during the row-fetch phase of mutation statements",
true,
)
var insertFastPathClusterMode = settings.RegisterBoolSetting(
"sql.defaults.insert_fast_path.enabled",
"default value for enable_insert_fast_path session setting; enables a specialized insert path",
true,
)
var experimentalAlterColumnTypeGeneralMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_alter_column_type.enabled",
"default value for experimental_alter_column_type session setting; "+
"enables the use of ALTER COLUMN TYPE for general conversions",
false,
)
var clusterStatementTimeout = settings.RegisterDurationSetting(
"sql.defaults.statement_timeout",
"default value for the statement_timeout; "+
"default value for the statement_timeout session setting; controls the "+
"duration a query is permitted to run before it is canceled; if set to 0, "+
"there is no timeout",
0,
settings.NonNegativeDuration,
).WithPublic()
var clusterIdleInSessionTimeout = settings.RegisterDurationSetting(
"sql.defaults.idle_in_session_timeout",
"default value for the idle_in_session_timeout; "+
"default value for the idle_in_session_timeout session setting; controls the "+
"duration a session is permitted to idle before the session is terminated; "+
"if set to 0, there is no timeout",
0,
settings.NonNegativeDuration,
).WithPublic()
var clusterIdleInTransactionSessionTimeout = settings.RegisterDurationSetting(
"sql.defaults.idle_in_transaction_session_timeout",
"default value for the idle_in_transaction_session_timeout; controls the "+
"duration a session is permitted to idle in a transaction before the "+
"session is terminated; if set to 0, there is no timeout",
0,
settings.NonNegativeDuration,
).WithPublic()
// TODO(rytaft): remove this once unique without index constraints are fully
// supported.
var experimentalUniqueWithoutIndexConstraintsMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_enable_unique_without_index_constraints.enabled",
"default value for experimental_enable_unique_without_index_constraints session setting;"+
"disables unique without index constraints by default",
false,
)
// DistSQLClusterExecMode controls the cluster default for when DistSQL is used.
var experimentalUseNewSchemaChanger = settings.RegisterEnumSetting(
"sql.defaults.experimental_new_schema_changer.enabled",
"default value for experimental_use_new_schema_changer session setting;"+
"disables new schema changer by default",
"off",
map[int64]string{
int64(sessiondata.UseNewSchemaChangerOff): "off",
int64(sessiondata.UseNewSchemaChangerOn): "on",
int64(sessiondata.UseNewSchemaChangerUnsafeAlways): "unsafe_always",
},
)
var experimentalStreamReplicationEnabled = settings.RegisterBoolSetting(
"sql.defaults.experimental_stream_replication.enabled",
"default value for experimental_stream_replication session setting;"+
"enables the ability to setup a replication stream",
false,
)
var stubCatalogTablesEnabledClusterValue = settings.RegisterBoolSetting(
`sql.defaults.stub_catalog_tables.enabled`,
`default value for stub_catalog_tables session setting`,
true,
)
// ExperimentalDistSQLPlanningClusterSettingName is the name for the cluster
// setting that controls experimentalDistSQLPlanningClusterMode below.
const ExperimentalDistSQLPlanningClusterSettingName = "sql.defaults.experimental_distsql_planning"
// experimentalDistSQLPlanningClusterMode can be used to enable
// optimizer-driven DistSQL planning that sidesteps intermediate planNode
// transition when going from opt.Expr to DistSQL processor specs.
var experimentalDistSQLPlanningClusterMode = settings.RegisterEnumSetting(
ExperimentalDistSQLPlanningClusterSettingName,
"default experimental_distsql_planning mode; enables experimental opt-driven DistSQL planning",
"off",
map[int64]string{
int64(sessiondata.ExperimentalDistSQLPlanningOff): "off",
int64(sessiondata.ExperimentalDistSQLPlanningOn): "on",
},
)
// VectorizeClusterSettingName is the name for the cluster setting that controls
// the VectorizeClusterMode below.
const VectorizeClusterSettingName = "sql.defaults.vectorize"
// VectorizeClusterMode controls the cluster default for when automatic
// vectorization is enabled.
var VectorizeClusterMode = settings.RegisterEnumSetting(
VectorizeClusterSettingName,
"default vectorize mode",
"on",
map[int64]string{
int64(sessiondatapb.VectorizeOff): "off",
int64(sessiondatapb.VectorizeOn): "on",
},
)
// DistSQLClusterExecMode controls the cluster default for when DistSQL is used.
var DistSQLClusterExecMode = settings.RegisterEnumSetting(
"sql.defaults.distsql",
"default distributed SQL execution mode",
"auto",
map[int64]string{
int64(sessiondata.DistSQLOff): "off",
int64(sessiondata.DistSQLAuto): "auto",
int64(sessiondata.DistSQLOn): "on",
},
)
// SerialNormalizationMode controls how the SERIAL type is interpreted in table
// definitions.
var SerialNormalizationMode = settings.RegisterEnumSetting(
"sql.defaults.serial_normalization",
"default handling of SERIAL in table definitions",
"rowid",
map[int64]string{
int64(sessiondata.SerialUsesRowID): "rowid",
int64(sessiondata.SerialUsesVirtualSequences): "virtual_sequence",
int64(sessiondata.SerialUsesSQLSequences): "sql_sequence",
int64(sessiondata.SerialUsesCachedSQLSequences): "sql_sequence_cached",
},
).WithPublic()
var disallowFullTableScans = settings.RegisterBoolSetting(
`sql.defaults.disallow_full_table_scans.enabled`,
"setting to true rejects queries that have planned a full table scan",
false,
).WithPublic()
var errNoTransactionInProgress = errors.New("there is no transaction in progress")
var errTransactionInProgress = errors.New("there is already a transaction in progress")
const sqlTxnName string = "sql txn"
const metricsSampleInterval = 10 * time.Second
// Fully-qualified names for metrics.
var (
MetaSQLExecLatency = metric.Metadata{
Name: "sql.exec.latency",
Help: "Latency of SQL statement execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLServiceLatency = metric.Metadata{
Name: "sql.service.latency",
Help: "Latency of SQL request execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLOptFallback = metric.Metadata{
Name: "sql.optimizer.fallback.count",
Help: "Number of statements which the cost-based optimizer was unable to plan",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLOptPlanCacheHits = metric.Metadata{
Name: "sql.optimizer.plan_cache.hits",
Help: "Number of non-prepared statements for which a cached plan was used",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLOptPlanCacheMisses = metric.Metadata{
Name: "sql.optimizer.plan_cache.misses",
Help: "Number of non-prepared statements for which a cached plan was not used",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDistSQLSelect = metric.Metadata{
Name: "sql.distsql.select.count",
Help: "Number of DistSQL SELECT statements",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDistSQLExecLatency = metric.Metadata{
Name: "sql.distsql.exec.latency",
Help: "Latency of DistSQL statement execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaDistSQLServiceLatency = metric.Metadata{
Name: "sql.distsql.service.latency",
Help: "Latency of DistSQL request execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaTxnAbort = metric.Metadata{
Name: "sql.txn.abort.count",
Help: "Number of SQL transaction abort errors",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaFailure = metric.Metadata{
Name: "sql.failure.count",
Help: "Number of statements resulting in a planning or runtime error",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLTxnLatency = metric.Metadata{
Name: "sql.txn.latency",
Help: "Latency of SQL transactions",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLTxnsOpen = metric.Metadata{
Name: "sql.txns.open",
Help: "Number of currently open SQL transactions",
Measurement: "Open SQL Transactions",
Unit: metric.Unit_COUNT,
}
MetaFullTableOrIndexScan = metric.Metadata{
Name: "sql.full.scan.count",
Help: "Number of full table or index scans",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
// Below are the metadata for the statement started counters.
MetaQueryStarted = metric.Metadata{
Name: "sql.query.started.count",
Help: "Number of SQL queries started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnBeginStarted = metric.Metadata{
Name: "sql.txn.begin.started.count",
Help: "Number of SQL transaction BEGIN statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnCommitStarted = metric.Metadata{
Name: "sql.txn.commit.started.count",
Help: "Number of SQL transaction COMMIT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnRollbackStarted = metric.Metadata{
Name: "sql.txn.rollback.started.count",
Help: "Number of SQL transaction ROLLBACK statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSelectStarted = metric.Metadata{
Name: "sql.select.started.count",
Help: "Number of SQL SELECT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaUpdateStarted = metric.Metadata{
Name: "sql.update.started.count",
Help: "Number of SQL UPDATE statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaInsertStarted = metric.Metadata{
Name: "sql.insert.started.count",
Help: "Number of SQL INSERT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDeleteStarted = metric.Metadata{
Name: "sql.delete.started.count",
Help: "Number of SQL DELETE statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSavepointStarted = metric.Metadata{
Name: "sql.savepoint.started.count",
Help: "Number of SQL SAVEPOINT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseSavepointStarted = metric.Metadata{
Name: "sql.savepoint.release.started.count",
Help: "Number of `RELEASE SAVEPOINT` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToSavepointStarted = metric.Metadata{
Name: "sql.savepoint.rollback.started.count",
Help: "Number of `ROLLBACK TO SAVEPOINT` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.started.count",
Help: "Number of `SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.release.started.count",
Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.rollback.started.count",
Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDdlStarted = metric.Metadata{
Name: "sql.ddl.started.count",
Help: "Number of SQL DDL statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaMiscStarted = metric.Metadata{
Name: "sql.misc.started.count",
Help: "Number of other SQL statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
// Below are the metadata for the statement executed counters.
MetaQueryExecuted = metric.Metadata{
Name: "sql.query.count",
Help: "Number of SQL queries executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnBeginExecuted = metric.Metadata{
Name: "sql.txn.begin.count",
Help: "Number of SQL transaction BEGIN statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnCommitExecuted = metric.Metadata{
Name: "sql.txn.commit.count",
Help: "Number of SQL transaction COMMIT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnRollbackExecuted = metric.Metadata{
Name: "sql.txn.rollback.count",
Help: "Number of SQL transaction ROLLBACK statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSelectExecuted = metric.Metadata{
Name: "sql.select.count",
Help: "Number of SQL SELECT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaUpdateExecuted = metric.Metadata{
Name: "sql.update.count",
Help: "Number of SQL UPDATE statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaInsertExecuted = metric.Metadata{
Name: "sql.insert.count",
Help: "Number of SQL INSERT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDeleteExecuted = metric.Metadata{
Name: "sql.delete.count",
Help: "Number of SQL DELETE statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSavepointExecuted = metric.Metadata{
Name: "sql.savepoint.count",
Help: "Number of SQL SAVEPOINT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseSavepointExecuted = metric.Metadata{
Name: "sql.savepoint.release.count",
Help: "Number of `RELEASE SAVEPOINT` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToSavepointExecuted = metric.Metadata{
Name: "sql.savepoint.rollback.count",
Help: "Number of `ROLLBACK TO SAVEPOINT` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.count",
Help: "Number of `SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.release.count",
Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.rollback.count",
Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDdlExecuted = metric.Metadata{
Name: "sql.ddl.count",
Help: "Number of SQL DDL statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaMiscExecuted = metric.Metadata{
Name: "sql.misc.count",
Help: "Number of other SQL statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
)
func getMetricMeta(meta metric.Metadata, internal bool) metric.Metadata {
if internal {
meta.Name += ".internal"
meta.Help += " (internal queries)"
meta.Measurement = "SQL Internal Statements"
}
return meta
}
// NodeInfo contains metadata about the executing node and cluster.
type NodeInfo struct {
ClusterID func() uuid.UUID
NodeID *base.SQLIDContainer
AdminURL func() *url.URL
PGURL func(*url.Userinfo) (*url.URL, error)
}
// nodeStatusGenerator is a limited portion of the status.MetricsRecorder
// struct, to avoid having to import all of status in sql.
type nodeStatusGenerator interface {
GenerateNodeStatus(ctx context.Context) *statuspb.NodeStatus
}
// An ExecutorConfig encompasses the auxiliary objects and configuration
// required to create an executor.
// All fields holding a pointer or an interface are required to create
// a Executor; the rest will have sane defaults set if omitted.
type ExecutorConfig struct {
Settings *cluster.Settings
NodeInfo
Codec keys.SQLCodec
DefaultZoneConfig *zonepb.ZoneConfig
Locality roachpb.Locality
AmbientCtx log.AmbientContext
DB *kv.DB
Gossip gossip.OptionalGossip
SystemConfig config.SystemConfigProvider
DistSender *kvcoord.DistSender
RPCContext *rpc.Context
LeaseManager *lease.Manager
Clock *hlc.Clock
DistSQLSrv *distsql.ServerImpl
// NodesStatusServer gives access to the NodesStatus service and is only
// available when running as a system tenant.
NodesStatusServer serverpb.OptionalNodesStatusServer
// SQLStatusServer gives access to a subset of the Status service and is
// available when not running as a system tenant.
SQLStatusServer serverpb.SQLStatusServer
MetricsRecorder nodeStatusGenerator
SessionRegistry *SessionRegistry
SQLLivenessReader sqlliveness.Reader
JobRegistry *jobs.Registry
VirtualSchemas *VirtualSchemaHolder
DistSQLPlanner *DistSQLPlanner
TableStatsCache *stats.TableStatisticsCache
StatsRefresher *stats.Refresher
InternalExecutor *InternalExecutor
QueryCache *querycache.C
SchemaChangerMetrics *SchemaChangerMetrics
FeatureFlagMetrics *featureflag.DenialMetrics
TestingKnobs ExecutorTestingKnobs
PGWireTestingKnobs *PGWireTestingKnobs
SchemaChangerTestingKnobs *SchemaChangerTestingKnobs
TypeSchemaChangerTestingKnobs *TypeSchemaChangerTestingKnobs
GCJobTestingKnobs *GCJobTestingKnobs
DistSQLRunTestingKnobs *execinfra.TestingKnobs
EvalContextTestingKnobs tree.EvalContextTestingKnobs
TenantTestingKnobs *TenantTestingKnobs
BackupRestoreTestingKnobs *BackupRestoreTestingKnobs
// HistogramWindowInterval is (server.Config).HistogramWindowInterval.
HistogramWindowInterval time.Duration
// RangeDescriptorCache is updated by DistSQL when it finds out about
// misplanned spans.
RangeDescriptorCache *rangecache.RangeCache
// Role membership cache.
RoleMemberCache *MembershipCache
// ProtectedTimestampProvider encapsulates the protected timestamp subsystem.
ProtectedTimestampProvider protectedts.Provider
// StmtDiagnosticsRecorder deals with recording statement diagnostics.
StmtDiagnosticsRecorder *stmtdiagnostics.Registry
ExternalIODirConfig base.ExternalIODirConfig
// HydratedTables is a node-level cache of table descriptors which utilize
// user-defined types.
HydratedTables *hydratedtables.Cache
GCJobNotifier *gcjobnotifier.Notifier
RangeFeedFactory *rangefeed.Factory
// VersionUpgradeHook is called after validating a `SET CLUSTER SETTING
// version` but before executing it. It can carry out arbitrary migrations
// that allow us to eventually remove legacy code. It will only be populated
// on the system tenant.
//
// TODO(tbg,irfansharif,ajwerner): Hook up for secondary tenants.
VersionUpgradeHook func(ctx context.Context, user security.SQLUsername, from, to clusterversion.ClusterVersion) error
// MigrationJobDeps is used to drive migrations.
//
// TODO(tbg,irfansharif,ajwerner): Hook up for secondary tenants.
MigrationJobDeps migration.JobDeps
// IndexBackfiller is used to backfill indexes. It is another rather circular
// object which mostly just holds on to an ExecConfig.
IndexBackfiller *IndexBackfillPlanner
// ContentionRegistry is a node-level registry of contention events used for
// contention observability.
ContentionRegistry *contention.Registry
}
// Organization returns the value of cluster.organization.
func (cfg *ExecutorConfig) Organization() string {
return ClusterOrganization.Get(&cfg.Settings.SV)
}
// GetFeatureFlagMetrics returns the value of the FeatureFlagMetrics struct.
func (cfg *ExecutorConfig) GetFeatureFlagMetrics() *featureflag.DenialMetrics {
return cfg.FeatureFlagMetrics
}
// SV returns the setting values.
func (cfg *ExecutorConfig) SV() *settings.Values {
return &cfg.Settings.SV
}
var _ base.ModuleTestingKnobs = &ExecutorTestingKnobs{}
// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.
func (*ExecutorTestingKnobs) ModuleTestingKnobs() {}
// StatementFilter is the type of callback that
// ExecutorTestingKnobs.StatementFilter takes.
type StatementFilter func(context.Context, string, error)
// ExecutorTestingKnobs is part of the context used to control parts of the
// system during testing.
type ExecutorTestingKnobs struct {
// StatementFilter can be used to trap execution of SQL statements and
// optionally change their results. The filter function is invoked after each
// statement has been executed.
StatementFilter StatementFilter
// BeforePrepare can be used to trap execution of SQL statement preparation.
// If a nil error is returned, planning continues as usual.
BeforePrepare func(ctx context.Context, stmt string, txn *kv.Txn) error
// BeforeExecute is called by the Executor before plan execution. It is useful
// for synchronizing statement execution.
BeforeExecute func(ctx context.Context, stmt string)
// AfterExecute is like StatementFilter, but it runs in the same goroutine of the
// statement.
AfterExecute func(ctx context.Context, stmt string, err error)
// AfterExecCmd is called after successful execution of any command.
AfterExecCmd func(ctx context.Context, cmd Command, buf *StmtBuf)
// DisableAutoCommit, if set, disables the auto-commit functionality of some
// SQL statements. That functionality allows some statements to commit
// directly when they're executed in an implicit SQL txn, without waiting for
// the Executor to commit the implicit txn.
// This has to be set in tests that need to abort such statements using a
// StatementFilter; otherwise, the statement commits immediately after
// execution so there'll be nothing left to abort by the time the filter runs.
DisableAutoCommit bool
// BeforeAutoCommit is called when the Executor is about to commit the KV
// transaction after running a statement in an implicit transaction, allowing
// tests to inject errors into that commit.
// If an error is returned, that error will be considered the result of
// txn.Commit(), and the txn.Commit() call will not actually be
// made. If no error is returned, txn.Commit() is called normally.
//
// Note that this is not called if the SQL statement representing the implicit
// transaction has committed the KV txn itself (e.g. if it used the 1-PC
// optimization). This is only called when the Executor is the one doing the
// committing.
BeforeAutoCommit func(ctx context.Context, stmt string) error
// DisableTempObjectsCleanupOnSessionExit disables cleaning up temporary schemas
// and tables when a session is closed.
DisableTempObjectsCleanupOnSessionExit bool
// TempObjectsCleanupCh replaces the time.Ticker.C channel used for scheduling
// a cleanup on every temp object in the cluster. If this is set, the job
// will now trigger when items come into this channel.
TempObjectsCleanupCh chan time.Time
// OnTempObjectsCleanupDone will trigger when the temporary objects cleanup
// job is done.
OnTempObjectsCleanupDone func()
// WithStatementTrace is called after the statement is executed in
// execStmtInOpenState.
WithStatementTrace func(trace tracing.Recording, stmt string)
// RunAfterSCJobsCacheLookup is called after the SchemaChangeJobCache is checked for
// a given table id.
RunAfterSCJobsCacheLookup func(*jobs.Job)
// TestingSaveFlows, if set, will be called with the given stmt. The resulting
// function will be called with the physical plan of that statement's main
// query (i.e. no subqueries). The physical plan is only safe for use for the
// lifetime of this function. Note that returning a nil function is
// unsupported and will lead to a panic.
TestingSaveFlows func(stmt string) func(map[roachpb.NodeID]*execinfrapb.FlowSpec) error
// DeterministicExplain, if set, will result in overriding fields in EXPLAIN
// and EXPLAIN ANALYZE that can vary between runs (like elapsed times).
//
// TODO(radu): this flag affects EXPLAIN and EXPLAIN ANALYZE differently. It
// hides the vectorization, distribution, and cluster nodes in EXPLAIN ANALYZE
// but not in EXPLAIN. This is just a consequence of how the tests we have are
// written. We should replace this knob with a session setting that allows
// exact control of the redaction flags (and have each test set it as
// necessary).
DeterministicExplain bool
// ForceRealTracingSpans, if set, forces the use of real (i.e. not no-op)
// tracing spans for every statement.
ForceRealTracingSpans bool
// DistSQLReceiverPushCallbackFactory, if set, will be called every time a
// DistSQLReceiver is created for a new query execution, and it should
// return, possibly nil, a callback that will be called every time
// DistSQLReceiver.Push is called.
DistSQLReceiverPushCallbackFactory func(query string) func(rowenc.EncDatumRow, *execinfrapb.ProducerMetadata)
}
// PGWireTestingKnobs contains knobs for the pgwire module.
type PGWireTestingKnobs struct {
// CatchPanics causes the pgwire.conn to recover from panics in its execution
// thread and return them as errors to the client, closing the connection
// afterward.
CatchPanics bool
// AuthHook is used to override the normal authentication handling on new
// connections.
AuthHook func(context.Context) error
}
var _ base.ModuleTestingKnobs = &PGWireTestingKnobs{}
// ModuleTestingKnobs implements the base.ModuleTestingKnobs interface.
func (*PGWireTestingKnobs) ModuleTestingKnobs() {}
// TenantTestingKnobs contains knobs for tenant behavior.
type TenantTestingKnobs struct {
// ClusterSettingsUpdater is a field that if set, allows the tenant to set
// in-memory cluster settings. SQL tenants are otherwise prohibited from
// setting cluster settings.
ClusterSettingsUpdater settings.Updater
// TenantIDCodecOverride overrides the tenant ID used to construct the SQL
// server's codec, but nothing else (e.g. its certs). Used for testing.
TenantIDCodecOverride roachpb.TenantID
// IdleExitCountdownDuration is set will overwrite the default countdown
// duration of the countdown timer that leads to shutdown in case of no SQL
// connections.
IdleExitCountdownDuration time.Duration
}
var _ base.ModuleTestingKnobs = &TenantTestingKnobs{}
// ModuleTestingKnobs implements the base.ModuleTestingKnobs interface.
func (*TenantTestingKnobs) ModuleTestingKnobs() {}
// BackupRestoreTestingKnobs contains knobs for backup and restore behavior.
type BackupRestoreTestingKnobs struct {
// AllowImplicitAccess allows implicit access to data sources for non-admin
// users. This enables using nodelocal for testing BACKUP/RESTORE permissions.
AllowImplicitAccess bool
// CaptureResolvedTableDescSpans allows for intercepting the spans which are
// resolved during backup planning, and will eventually be backed up during
// execution.
CaptureResolvedTableDescSpans func([]roachpb.Span)
// RunAfterProcessingRestoreSpanEntry allows blocking the RESTORE job after a
// single RestoreSpanEntry has been processed and added to the SSTBatcher.
RunAfterProcessingRestoreSpanEntry func(ctx context.Context)
// RunAfterExportingSpanEntry allows blocking the BACKUP job after a single
// span has been exported.
RunAfterExportingSpanEntry func(ctx context.Context)
}
var _ base.ModuleTestingKnobs = &BackupRestoreTestingKnobs{}
// ModuleTestingKnobs implements the base.ModuleTestingKnobs interface.
func (*BackupRestoreTestingKnobs) ModuleTestingKnobs() {}
func shouldDistributeGivenRecAndMode(
rec distRecommendation, mode sessiondata.DistSQLExecMode,
) bool {
switch mode {
case sessiondata.DistSQLOff:
return false
case sessiondata.DistSQLAuto:
return rec == shouldDistribute
case sessiondata.DistSQLOn, sessiondata.DistSQLAlways:
return rec != cannotDistribute
}
panic(errors.AssertionFailedf("unhandled distsql mode %v", mode))
}
// getPlanDistribution returns the PlanDistribution that plan will have. If
// plan already has physical representation, then the stored PlanDistribution
// is reused, but if plan has logical representation (i.e. it is a planNode
// tree), then we traverse that tree in order to determine the distribution of
// the plan.
func getPlanDistribution(
ctx context.Context,
p *planner,
nodeID *base.SQLIDContainer,
distSQLMode sessiondata.DistSQLExecMode,
plan planMaybePhysical,
) physicalplan.PlanDistribution {
if plan.isPhysicalPlan() {
return plan.physPlan.Distribution
}
// If this transaction has modified or created any types, it is not safe to
// distribute due to limitations around leasing descriptors modified in the
// current transaction.
if p.Descriptors().HasUncommittedTypes() {
return physicalplan.LocalPlan
}
if _, singleTenant := nodeID.OptionalNodeID(); !singleTenant {
return physicalplan.LocalPlan
}
if distSQLMode == sessiondata.DistSQLOff {
return physicalplan.LocalPlan
}
// Don't try to run empty nodes (e.g. SET commands) with distSQL.
if _, ok := plan.planNode.(*zeroNode); ok {
return physicalplan.LocalPlan
}
rec, err := checkSupportForPlanNode(plan.planNode)
if err != nil {
// Don't use distSQL for this request.
log.VEventf(ctx, 1, "query not supported for distSQL: %s", err)
return physicalplan.LocalPlan
}
if shouldDistributeGivenRecAndMode(rec, distSQLMode) {
return physicalplan.FullyDistributedPlan
}
return physicalplan.LocalPlan
}
// golangFillQueryArguments transforms Go values into datums.
// Some of the args can be datums (in which case the transformation is a no-op).
//
// TODO: This does not support arguments of the SQL 'Date' type, as there is not
// an equivalent type in Go's standard library. It's not currently needed by any
// of our internal tables.
func golangFillQueryArguments(args ...interface{}) (tree.Datums, error) {
res := make(tree.Datums, len(args))
for i, arg := range args {
if arg == nil {
res[i] = tree.DNull
continue
}
// A type switch to handle a few explicit types with special semantics:
// - Datums are passed along as is.
// - Time datatypes get special representation in the database.
// - Usernames are assumed pre-normalized for lookup and validation.
var d tree.Datum
switch t := arg.(type) {
case tree.Datum:
d = t
case time.Time:
var err error
d, err = tree.MakeDTimestamp(t, time.Microsecond)
if err != nil {
return nil, err
}
case time.Duration:
d = &tree.DInterval{Duration: duration.MakeDuration(t.Nanoseconds(), 0, 0)}
case bitarray.BitArray:
d = &tree.DBitArray{BitArray: t}
case *apd.Decimal:
dd := &tree.DDecimal{}
dd.Set(t)
d = dd
case security.SQLUsername:
d = tree.NewDString(t.Normalized())
}
if d == nil {
// Handle all types which have an underlying type that can be stored in the
// database.
// Note: if this reflection becomes a performance concern in the future,
// commonly used types could be added explicitly into the type switch above
// for a performance gain.
val := reflect.ValueOf(arg)
switch val.Kind() {
case reflect.Bool:
d = tree.MakeDBool(tree.DBool(val.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
d = tree.NewDInt(tree.DInt(val.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
d = tree.NewDInt(tree.DInt(val.Uint()))
case reflect.Float32, reflect.Float64:
d = tree.NewDFloat(tree.DFloat(val.Float()))
case reflect.String:
d = tree.NewDString(val.String())
case reflect.Slice:
switch {
case val.IsNil():
d = tree.DNull
case val.Type().Elem().Kind() == reflect.String:
a := tree.NewDArray(types.String)
for v := 0; v < val.Len(); v++ {
if err := a.Append(tree.NewDString(val.Index(v).String())); err != nil {
return nil, err
}
}
d = a
case val.Type().Elem().Kind() == reflect.Uint8:
d = tree.NewDBytes(tree.DBytes(val.Bytes()))
}
}
if d == nil {
panic(errors.AssertionFailedf("unexpected type %T", arg))
}
}
res[i] = d
}
return res, nil
}
// checkResultType verifies that a table result can be returned to the
// client.
func checkResultType(typ *types.T) error {
// Compare all types that can rely on == equality.
switch typ.Family() {
case types.UnknownFamily:
case types.BitFamily:
case types.BoolFamily:
case types.IntFamily:
case types.FloatFamily:
case types.DecimalFamily:
case types.BytesFamily:
case types.Box2DFamily:
case types.GeographyFamily:
case types.GeometryFamily:
case types.StringFamily:
case types.CollatedStringFamily:
case types.DateFamily:
case types.TimestampFamily:
case types.TimeFamily:
case types.TimeTZFamily:
case types.TimestampTZFamily:
case types.IntervalFamily:
case types.JsonFamily:
case types.UuidFamily:
case types.INetFamily:
case types.OidFamily:
case types.TupleFamily:
case types.EnumFamily:
case types.ArrayFamily:
if typ.ArrayContents().Family() == types.ArrayFamily {
// Technically we could probably return arrays of arrays to a
// client (the encoding exists) but we don't want to give
// mixed signals -- that nested arrays appear to be supported
// in this case, and not in other cases (eg. CREATE). So we
// reject them in every case instead.
return unimplemented.NewWithIssueDetail(32552,
"result", "arrays cannot have arrays as element type")
}
case types.AnyFamily:
// Placeholder case.
return errors.Errorf("could not determine data type of %s", typ)
default:
return errors.Errorf("unsupported result type: %s", typ)
}
return nil
}
// EvalAsOfTimestamp evaluates and returns the timestamp from an AS OF SYSTEM
// TIME clause.
func (p *planner) EvalAsOfTimestamp(
ctx context.Context, asOf tree.AsOfClause,
) (_ hlc.Timestamp, err error) {
ts, err := tree.EvalAsOfTimestamp(ctx, asOf, &p.semaCtx, p.EvalContext())
if err != nil {
return hlc.Timestamp{}, err
}
if now := p.execCfg.Clock.Now(); now.Less(ts) && !ts.Synthetic {
return hlc.Timestamp{}, errors.Errorf(
"AS OF SYSTEM TIME: cannot specify timestamp in the future (%s > %s)", ts, now)
}
return ts, nil
}
// ParseHLC parses a string representation of an `hlc.Timestamp`.
// This differs from hlc.ParseTimestamp in that it parses the decimal
// serialization of an hlc timestamp as opposed to the string serialization
// performed by hlc.Timestamp.String().
//
// This function is used to parse:
//
// 1580361670629466905.0000000001
//
// hlc.ParseTimestamp() would be used to parse:
//
// 1580361670.629466905,1
//
func ParseHLC(s string) (hlc.Timestamp, error) {
dec, _, err := apd.NewFromString(s)
if err != nil {
return hlc.Timestamp{}, err
}
return tree.DecimalToHLC(dec)
}
// isAsOf analyzes a statement to bypass the logic in newPlan(), since
// that requires the transaction to be started already. If the returned
// timestamp is not nil, it is the timestamp to which a transaction
// should be set. The statements that will be checked are Select,
// ShowTrace (of a Select statement), Scrub, Export, and CreateStats.
func (p *planner) isAsOf(ctx context.Context, stmt tree.Statement) (*hlc.Timestamp, error) {
var asOf tree.AsOfClause
switch s := stmt.(type) {
case *tree.Select:
selStmt := s.Select
var parenSel *tree.ParenSelect
var ok bool
for parenSel, ok = selStmt.(*tree.ParenSelect); ok; parenSel, ok = selStmt.(*tree.ParenSelect) {
selStmt = parenSel.Select.Select
}
sc, ok := selStmt.(*tree.SelectClause)
if !ok {
return nil, nil
}
if sc.From.AsOf.Expr == nil {
return nil, nil
}
asOf = sc.From.AsOf
case *tree.Scrub:
if s.AsOf.Expr == nil {
return nil, nil
}
asOf = s.AsOf
case *tree.Export:
return p.isAsOf(ctx, s.Query)
case *tree.CreateStats:
if s.Options.AsOf.Expr == nil {
return nil, nil
}
asOf = s.Options.AsOf
case *tree.Explain:
return p.isAsOf(ctx, s.Statement)
default:
return nil, nil
}
ts, err := p.EvalAsOfTimestamp(ctx, asOf)
return &ts, err
}
// isSavepoint returns true if ast is a SAVEPOINT statement.
func isSavepoint(ast tree.Statement) bool {
_, isSavepoint := ast.(*tree.Savepoint)
return isSavepoint
}
// isSetTransaction returns true if ast is a "SET TRANSACTION ..." statement.
func isSetTransaction(ast tree.Statement) bool {
_, isSet := ast.(*tree.SetTransaction)
return isSet
}
// queryPhase represents a phase during a query's execution.
type queryPhase int
const (
// The phase before start of execution (includes parsing, building a plan).
preparing queryPhase = 0
// Execution phase.
executing queryPhase = 1
)
// queryMeta stores metadata about a query. Stored as reference in
// session.mu.ActiveQueries.
type queryMeta struct {
// The ID of the transaction that this query is running within.
txnID uuid.UUID
// The timestamp when this query began execution.
start time.Time
// The string of the SQL statement being executed. This string may
// contain sensitive information, so it must be converted back into
// an AST and dumped before use in logging.
rawStmt string
// States whether this query is distributed. Note that all queries,
// including those that are distributed, have this field set to false until
// start of execution; only at that point can we can actually determine whether
// this query will be distributed. Use the phase variable below
// to determine whether this query has entered execution yet.
isDistributed bool
// Current phase of execution of query.
phase queryPhase
// Cancellation function for the context associated with this query's transaction.
ctxCancel context.CancelFunc
// If set, this query will not be reported as part of SHOW QUERIES. This is
// set based on the statement implementing tree.HiddenFromShowQueries.
hidden bool
progressAtomic uint64
}
// cancel cancels the query associated with this queryMeta, by closing the associated
// txn context.
func (q *queryMeta) cancel() {
q.ctxCancel()
}
// getStatement returns a cleaned version of the query associated
// with this queryMeta.
func (q *queryMeta) getStatement() (tree.Statement, error) {
parsed, err := parser.ParseOne(q.rawStmt)
if err != nil {
return nil, err
}
return parsed.AST, nil
}
// SessionDefaults mirrors fields in Session, for restoring default
// configuration values in SET ... TO DEFAULT (or RESET ...) statements.
type SessionDefaults map[string]string
// SessionArgs contains arguments for serving a client connection.
type SessionArgs struct {
User security.SQLUsername
SessionDefaults SessionDefaults
// RemoteAddr is the client's address. This is nil iff this is an internal
// client.
RemoteAddr net.Addr
ConnResultsBufferSize int64
}
// SessionRegistry stores a set of all sessions on this node.
// Use register() and deregister() to modify this registry.
type SessionRegistry struct {
syncutil.Mutex
sessions map[ClusterWideID]registrySession
}
// NewSessionRegistry creates a new SessionRegistry with an empty set
// of sessions.
func NewSessionRegistry() *SessionRegistry {
return &SessionRegistry{sessions: make(map[ClusterWideID]registrySession)}
}
func (r *SessionRegistry) register(id ClusterWideID, s registrySession) {
r.Lock()
r.sessions[id] = s
r.Unlock()
}
func (r *SessionRegistry) deregister(id ClusterWideID) {
r.Lock()
delete(r.sessions, id)
r.Unlock()
}
type registrySession interface {
user() security.SQLUsername
cancelQuery(queryID ClusterWideID) bool
cancelSession()
// serialize serializes a Session into a serverpb.Session
// that can be served over RPC.
serialize() serverpb.Session
}
// CancelQuery looks up the associated query in the session registry and cancels
// it. The caller is responsible for all permission checks.
func (r *SessionRegistry) CancelQuery(queryIDStr string) (bool, error) {
queryID, err := StringToClusterWideID(queryIDStr)
if err != nil {
return false, fmt.Errorf("query ID %s malformed: %s", queryID, err)
}
r.Lock()
defer r.Unlock()
for _, session := range r.sessions {
if session.cancelQuery(queryID) {
return true, nil
}
}
return false, fmt.Errorf("query ID %s not found", queryID)
}
// CancelSession looks up the specified session in the session registry and
// cancels it. The caller is responsible for all permission checks.
func (r *SessionRegistry) CancelSession(
sessionIDBytes []byte,
) (*serverpb.CancelSessionResponse, error) {
if len(sessionIDBytes) != 16 {
return nil, errors.Errorf("invalid non-16-byte UUID %v", sessionIDBytes)
}
sessionID := BytesToClusterWideID(sessionIDBytes)
r.Lock()
defer r.Unlock()
for id, session := range r.sessions {
if id == sessionID {
session.cancelSession()
return &serverpb.CancelSessionResponse{Canceled: true}, nil
}
}
return &serverpb.CancelSessionResponse{
Error: fmt.Sprintf("session ID %s not found", sessionID),
}, nil
}
// SerializeAll returns a slice of all sessions in the registry, converted to serverpb.Sessions.
func (r *SessionRegistry) SerializeAll() []serverpb.Session {
r.Lock()
defer r.Unlock()
response := make([]serverpb.Session, 0, len(r.sessions))
for _, s := range r.sessions {
response = append(response, s.serialize())
}
return response
}
func newSchemaInterface(descsCol *descs.Collection, vs catalog.VirtualSchemas) *schemaInterface {
return &schemaInterface{logical: accessors.NewLogicalAccessor(descsCol, vs)}
}
// MaxSQLBytes is the maximum length in bytes of SQL statements serialized
// into a serverpb.Session. Exported for testing.
const MaxSQLBytes = 1000
type jobsCollection []jobspb.JobID
// truncateStatementStringForTelemetry truncates the string
// representation of a statement to a maximum length, so as to not
// create unduly large logging and error payloads.
func truncateStatementStringForTelemetry(stmt string) string {
// panicLogOutputCutoiffChars is the maximum length of the copy of the
// current statement embedded in telemetry reports and panic errors in
// logs.
const panicLogOutputCutoffChars = 10000
if len(stmt) > panicLogOutputCutoffChars {
stmt = stmt[:len(stmt)-6] + " [...]"
}
return stmt
}
// hideNonVirtualTableNameFunc returns a function that can be used with
// FmtCtx.SetReformatTableNames. It hides all table names that are not virtual
// tables.
func hideNonVirtualTableNameFunc(vt VirtualTabler) func(ctx *tree.FmtCtx, name *tree.TableName) {
reformatFn := func(ctx *tree.FmtCtx, tn *tree.TableName) {
virtual, err := vt.getVirtualTableEntry(tn)
if err != nil || virtual == nil {
ctx.WriteByte('_')
return
}
// Virtual table: we want to keep the name; however
// we need to scrub the database name prefix.
newTn := *tn
newTn.CatalogName = "_"
ctx.WithFlags(tree.FmtParsable, func() {
ctx.WithReformatTableNames(nil, func() {
ctx.FormatNode(&newTn)
})
})
}
return reformatFn
}
func anonymizeStmtAndConstants(stmt tree.Statement, vt VirtualTabler) string {
// Re-format to remove most names.
f := tree.NewFmtCtx(tree.FmtAnonymize | tree.FmtHideConstants)
if vt != nil {
f.SetReformatTableNames(hideNonVirtualTableNameFunc(vt))
}
f.FormatNode(stmt)
return f.CloseAndGetString()
}
// WithAnonymizedStatement attaches the anonymized form of a statement
// to an error object.
func WithAnonymizedStatement(err error, stmt tree.Statement, vt VirtualTabler) error {
anonStmtStr := anonymizeStmtAndConstants(stmt, vt)
anonStmtStr = truncateStatementStringForTelemetry(anonStmtStr)
return errors.WithSafeDetails(err,
"while executing: %s", errors.Safe(anonStmtStr))
}
// SessionTracing holds the state used by SET TRACING statements in the context
// of one SQL session.
// It holds the current trace being collected (or the last trace collected, if
// tracing is not currently ongoing).
//
// SessionTracing and its interactions with the connExecutor are thread-safe;
// tracing can be turned on at any time.
type SessionTracing struct {
// enabled is set at times when "session enabled" is active - i.e. when
// transactions are being recorded.
enabled bool
// kvTracingEnabled is set at times when KV tracing is active. When
// KV tracing is enabled, the SQL/KV interface logs individual K/V
// operators to the current context.
kvTracingEnabled bool
// showResults, when set, indicates that the result rows produced by
// the execution statement must be reported in the
// trace. showResults can be set manually by SET TRACING = ...,
// results
showResults bool
// If recording==true, recordingType indicates the type of the current
// recording.
recordingType tracing.RecordingType
// ex is the connExecutor to which this SessionTracing is tied.
ex *connExecutor
// firstTxnSpan is the span of the first txn that was active when session
// tracing was enabled.
firstTxnSpan *tracing.Span
// connSpan is the connection's span. This is recording.
connSpan *tracing.Span
// lastRecording will collect the recording when stopping tracing.
lastRecording []traceRow
}
// getSessionTrace returns the session trace. If we're not currently tracing,
// this will be the last recorded trace. If we are currently tracing, we'll
// return whatever was recorded so far.
func (st *SessionTracing) getSessionTrace() ([]traceRow, error) {
if !st.enabled {
return st.lastRecording, nil
}
return generateSessionTraceVTable(st.getRecording())
}
// getRecording returns the recorded spans of the current trace.
func (st *SessionTracing) getRecording() []tracingpb.RecordedSpan {
var spans []tracingpb.RecordedSpan
if st.firstTxnSpan != nil {
spans = append(spans, st.firstTxnSpan.GetRecording()...)
}
return append(spans, st.connSpan.GetRecording()...)
}
// StartTracing starts "session tracing". From this moment on, everything
// happening on both the connection's context and the current txn's context (if
// any) will be traced.
// StopTracing() needs to be called to finish this trace.
//
// There's two contexts on which we must record:
// 1) If we're inside a txn, we start recording on the txn's span. We assume
// that the txn's ctx has a recordable span on it.
// 2) Regardless of whether we're in a txn or not, we need to record the
// connection's context. This context generally does not have a span, so we
// "hijack" it with one that does. Whatever happens on that context, plus
// whatever happens in future derived txn contexts, will be recorded.
//
// Args:
// kvTracingEnabled: If set, the traces will also include "KV trace" messages -
// verbose messages around the interaction of SQL with KV. Some of the messages
// are per-row.
// showResults: If set, result rows are reported in the trace.
func (st *SessionTracing) StartTracing(
recType tracing.RecordingType, kvTracingEnabled, showResults bool,
) error {
if st.enabled {
// We're already tracing. Only treat as no-op if the same options
// are requested.
if kvTracingEnabled != st.kvTracingEnabled ||
showResults != st.showResults ||
recType != st.recordingType {
var desiredOptions bytes.Buffer
comma := ""
if kvTracingEnabled {
desiredOptions.WriteString("kv")
comma = ", "
}
if showResults {
fmt.Fprintf(&desiredOptions, "%sresults", comma)
comma = ", "
}
recOption := "cluster"
fmt.Fprintf(&desiredOptions, "%s%s", comma, recOption)
err := pgerror.Newf(pgcode.ObjectNotInPrerequisiteState,
"tracing is already started with different options")
return errors.WithHintf(err,
"reset with SET tracing = off; SET tracing = %s", desiredOptions.String())
}
return nil
}
// If we're inside a transaction, hijack the txn's ctx with one that has a
// recording span.
if _, ok := st.ex.machine.CurState().(stateNoTxn); !ok {
txnCtx := st.ex.state.Ctx
if sp := tracing.SpanFromContext(txnCtx); sp == nil {
return errors.Errorf("no txn span for SessionTracing")
}
newTxnCtx, sp := tracing.EnsureChildSpan(txnCtx, st.ex.server.cfg.AmbientCtx.Tracer,
"session tracing", tracing.WithForceRealSpan())
sp.SetVerbose(true)
st.ex.state.Ctx = newTxnCtx
st.firstTxnSpan = sp
}
st.enabled = true
st.kvTracingEnabled = kvTracingEnabled
st.showResults = showResults
st.recordingType = recType
// Now hijack the conn's ctx with one that has a recording span.
connCtx := st.ex.ctxHolder.connCtx
opName := "session recording"
newConnCtx, sp := tracing.EnsureChildSpan(
connCtx,
st.ex.server.cfg.AmbientCtx.Tracer,
opName,
tracing.WithForceRealSpan(),
)
sp.SetVerbose(true)
st.connSpan = sp
// Hijack the connections context.
st.ex.ctxHolder.hijack(newConnCtx)
return nil
}
// StopTracing stops the trace that was started with StartTracing().
// An error is returned if tracing was not active.
func (st *SessionTracing) StopTracing() error {
if !st.enabled {
// We're not currently tracing. No-op.
return nil
}
st.enabled = false
st.kvTracingEnabled = false
st.showResults = false
st.recordingType = tracing.RecordingOff
var spans []tracingpb.RecordedSpan
if st.firstTxnSpan != nil {
spans = append(spans, st.firstTxnSpan.GetRecording()...)
st.firstTxnSpan.SetVerbose(false)
}
st.connSpan.Finish()
spans = append(spans, st.connSpan.GetRecording()...)
// NOTE: We're stopping recording on the connection's ctx only; the stopping
// is not inherited by children. If we are inside of a txn, that span will
// continue recording, even though nobody will collect its recording again.
st.connSpan.SetVerbose(false)
st.ex.ctxHolder.unhijack()
var err error
st.lastRecording, err = generateSessionTraceVTable(spans)
return err
}
// KVTracingEnabled checks whether KV tracing is currently enabled.
func (st *SessionTracing) KVTracingEnabled() bool {
return st.kvTracingEnabled
}
// Enabled checks whether session tracing is currently enabled.
func (st *SessionTracing) Enabled() bool {
return st.enabled
}
// TracePlanStart conditionally emits a trace message at the moment
// logical planning starts.
func (st *SessionTracing) TracePlanStart(ctx context.Context, stmtTag string) {
if st.enabled {
log.VEventf(ctx, 2, "planning starts: %s", stmtTag)
}
}
// TracePlanEnd conditionally emits a trace message at the moment
// logical planning ends.
func (st *SessionTracing) TracePlanEnd(ctx context.Context, err error) {
log.VEventfDepth(ctx, 2, 1, "planning ends")
if err != nil {
log.VEventfDepth(ctx, 2, 1, "planning error: %v", err)
}
}
// TracePlanCheckStart conditionally emits a trace message at the
// moment the test of which execution engine to use starts.
func (st *SessionTracing) TracePlanCheckStart(ctx context.Context) {
log.VEventfDepth(ctx, 2, 1, "checking distributability")
}
// TracePlanCheckEnd conditionally emits a trace message at the moment
// the engine check ends.
func (st *SessionTracing) TracePlanCheckEnd(ctx context.Context, err error, dist bool) {
if err != nil {
log.VEventfDepth(ctx, 2, 1, "distributability check error: %v", err)
} else {
log.VEventfDepth(ctx, 2, 1, "will distribute plan: %v", dist)
}
}
// TraceExecStart conditionally emits a trace message at the moment
// plan execution starts.
func (st *SessionTracing) TraceExecStart(ctx context.Context, engine string) {
log.VEventfDepth(ctx, 2, 1, "execution starts: %s engine", engine)
}
// TraceExecConsume creates a context for TraceExecRowsResult below.
func (st *SessionTracing) TraceExecConsume(ctx context.Context) (context.Context, func()) {
if st.enabled {
consumeCtx, sp := tracing.ChildSpan(ctx, "consuming rows")
return consumeCtx, sp.Finish
}
return ctx, func() {}
}
// TraceExecRowsResult conditionally emits a trace message for a single output row.
func (st *SessionTracing) TraceExecRowsResult(ctx context.Context, values tree.Datums) {
if st.showResults {
log.VEventfDepth(ctx, 2, 1, "output row: %s", values)
}
}
// TraceExecEnd conditionally emits a trace message at the moment
// plan execution completes.
func (st *SessionTracing) TraceExecEnd(ctx context.Context, err error, count int) {
log.VEventfDepth(ctx, 2, 1, "execution ends")
if err != nil {
log.VEventfDepth(ctx, 2, 1, "execution failed after %d rows: %v", count, err)
} else {
log.VEventfDepth(ctx, 2, 1, "rows affected: %d", count)
}
}
const (
// span_idx INT NOT NULL, -- The span's index.
traceSpanIdxCol = iota
// message_idx INT NOT NULL, -- The message's index within its span.
_
// timestamp TIMESTAMPTZ NOT NULL,-- The message's timestamp.
traceTimestampCol
// duration INTERVAL, -- The span's duration.
// -- NULL if the span was not finished at the time
// -- the trace has been collected.
traceDurationCol
// operation STRING NULL, -- The span's operation.
traceOpCol
// loc STRING NOT NULL, -- The file name / line number prefix, if any.
traceLocCol
// tag STRING NOT NULL, -- The logging tag, if any.
traceTagCol
// message STRING NOT NULL, -- The logged message.
traceMsgCol
// age INTERVAL NOT NULL -- The age of the message.
traceAgeCol
// traceNumCols must be the last item in the enumeration.
traceNumCols
)
// traceRow is the type of a single row in the session_trace vtable.
type traceRow [traceNumCols]tree.Datum
// A regular expression to split log messages.
// It has three parts:
// - the (optional) code location, with at least one forward slash and a period
// in the file name:
// ((?:[^][ :]+/[^][ :]+\.[^][ :]+:[0-9]+)?)
// - the (optional) tag: ((?:\[(?:[^][]|\[[^]]*\])*\])?)
// - the message itself: the rest.
var logMessageRE = regexp.MustCompile(
`(?s:^((?:[^][ :]+/[^][ :]+\.[^][ :]+:[0-9]+)?) *((?:\[(?:[^][]|\[[^]]*\])*\])?) *(.*))`)
// generateSessionTraceVTable generates the rows of said table by using the log
// messages from the session's trace (i.e. the ongoing trace, if any, or the
// last one recorded).
//
// All the log messages from the current recording are returned, in
// the order in which they should be presented in the crdb_internal.session_info
// virtual table. Messages from child spans are inserted as a block in between
// messages from the parent span. Messages from sibling spans are not
// interleaved.
//
// Here's a drawing showing the order in which messages from different spans
// will be interleaved. Each box is a span; inner-boxes are child spans. The
// numbers indicate the order in which the log messages will appear in the
// virtual table.
//
// +-----------------------+
// | 1 |
// | +-------------------+ |
// | | 2 | |
// | | +----+ | |
// | | | | +----+ | |
// | | | 3 | | 4 | | |
// | | | | | | 5 | |
// | | | | | | ++ | |
// | | | | | | | |
// | | +----+ | | | |
// | | +----+ | |
// | | | |
// | | 6 | |
// | +-------------------+ |
// | 7 |
// +-----------------------+
//
// Note that what's described above is not the order in which SHOW TRACE FOR SESSION
// displays the information: SHOW TRACE will sort by the age column.
func generateSessionTraceVTable(spans []tracingpb.RecordedSpan) ([]traceRow, error) {
// Get all the log messages, in the right order.
var allLogs []logRecordRow
// NOTE: The spans are recorded in the order in which they are started.
seenSpans := make(map[uint64]struct{})
for spanIdx, span := range spans {
if _, ok := seenSpans[span.SpanID]; ok {
continue
}
spanWithIndex := spanWithIndex{
RecordedSpan: &spans[spanIdx],
index: spanIdx,
}
msgs, err := getMessagesForSubtrace(spanWithIndex, spans, seenSpans)
if err != nil {
return nil, err
}
allLogs = append(allLogs, msgs...)
}
// Transform the log messages into table rows.
// We need to populate "operation" later because it is only
// set for the first row in each span.
opMap := make(map[tree.DInt]*tree.DString)
durMap := make(map[tree.DInt]*tree.DInterval)
var res []traceRow
var minTimestamp, zeroTime time.Time
for _, lrr := range allLogs {
// The "operation" column is only set for the first row in span.
// We'll populate the rest below.
if lrr.index == 0 {
spanIdx := tree.DInt(lrr.span.index)
opMap[spanIdx] = tree.NewDString(lrr.span.Operation)
if lrr.span.Duration != 0 {
durMap[spanIdx] = &tree.DInterval{
Duration: duration.MakeDuration(lrr.span.Duration.Nanoseconds(), 0, 0),
}
}
}
// We'll need the lowest timestamp to compute ages below.
if minTimestamp == zeroTime || lrr.timestamp.Before(minTimestamp) {
minTimestamp = lrr.timestamp
}
// Split the message into component parts.
//
// The result of FindStringSubmatchIndex is a 1D array of pairs
// [start, end) of positions in the input string. The first pair
// identifies the entire match; the 2nd pair corresponds to the
// 1st parenthetized expression in the regexp, and so on.
loc := logMessageRE.FindStringSubmatchIndex(lrr.msg)
if loc == nil {
return nil, fmt.Errorf("unable to split trace message: %q", lrr.msg)
}
tsDatum, err := tree.MakeDTimestampTZ(lrr.timestamp, time.Nanosecond)
if err != nil {
return nil, err
}
row := traceRow{
tree.NewDInt(tree.DInt(lrr.span.index)), // span_idx
tree.NewDInt(tree.DInt(lrr.index)), // message_idx
tsDatum, // timestamp
tree.DNull, // duration, will be populated below
tree.DNull, // operation, will be populated below
tree.NewDString(lrr.msg[loc[2]:loc[3]]), // location
tree.NewDString(lrr.msg[loc[4]:loc[5]]), // tag
tree.NewDString(lrr.msg[loc[6]:loc[7]]), // message
tree.DNull, // age, will be populated below
}
res = append(res, row)
}
if len(res) == 0 {
// Nothing to do below. Shortcut.
return res, nil
}
// Populate the operation and age columns.
for i := range res {
spanIdx := res[i][traceSpanIdxCol]
if opStr, ok := opMap[*(spanIdx.(*tree.DInt))]; ok {
res[i][traceOpCol] = opStr
}
if dur, ok := durMap[*(spanIdx.(*tree.DInt))]; ok {
res[i][traceDurationCol] = dur
}
ts := res[i][traceTimestampCol].(*tree.DTimestampTZ)
res[i][traceAgeCol] = &tree.DInterval{
Duration: duration.MakeDuration(ts.Sub(minTimestamp).Nanoseconds(), 0, 0),
}
}
return res, nil
}
// getOrderedChildSpans returns all the spans in allSpans that are children of
// spanID. It assumes the input is ordered by start time, in which case the
// output is also ordered.
func getOrderedChildSpans(spanID uint64, allSpans []tracingpb.RecordedSpan) []spanWithIndex {
children := make([]spanWithIndex, 0)
for i := range allSpans {
if allSpans[i].ParentSpanID == spanID {
children = append(
children,
spanWithIndex{
RecordedSpan: &allSpans[i],
index: i,
})
}
}
return children
}
// getMessagesForSubtrace takes a span and interleaves its log messages with
// those from its children (recursively). The order is the one defined in the
// comment on generateSessionTraceVTable().
//
// seenSpans is modified to record all the spans that are part of the subtrace
// rooted at span.
func getMessagesForSubtrace(
span spanWithIndex, allSpans []tracingpb.RecordedSpan, seenSpans map[uint64]struct{},
) ([]logRecordRow, error) {
if _, ok := seenSpans[span.SpanID]; ok {
return nil, errors.Errorf("duplicate span %d", span.SpanID)
}
var allLogs []logRecordRow
const spanStartMsgTemplate = "=== SPAN START: %s ==="
// spanStartMsgs are metadata about the span, e.g. the operation name and tags
// contained in the span. They are added as one log message.
spanStartMsgs := make([]string, 0, len(span.Tags)+1)
spanStartMsgs = append(spanStartMsgs, fmt.Sprintf(spanStartMsgTemplate, span.Operation))
// Add recognized tags to the output.
for name, value := range span.Tags {
if !strings.HasPrefix(name, tracing.TagPrefix) {
// Not a tag to be output.
continue
}
spanStartMsgs = append(spanStartMsgs, fmt.Sprintf("%s: %s", name, value))
}
sort.Strings(spanStartMsgs[1:])
// This message holds all the spanStartMsgs and marks the beginning of the
// span, to indicate the start time and duration of the span.
allLogs = append(
allLogs,
logRecordRow{
timestamp: span.StartTime,
msg: strings.Join(spanStartMsgs, "\n"),
span: span,
index: 0,
},
)
seenSpans[span.SpanID] = struct{}{}
childSpans := getOrderedChildSpans(span.SpanID, allSpans)
var i, j int
// Sentinel value - year 6000.
maxTime := time.Date(6000, 0, 0, 0, 0, 0, 0, time.UTC)
// Merge the logs with the child spans.
for i < len(span.Logs) || j < len(childSpans) {
logTime := maxTime
childTime := maxTime
if i < len(span.Logs) {
logTime = span.Logs[i].Time
}
if j < len(childSpans) {
childTime = childSpans[j].StartTime
}
if logTime.Before(childTime) {
allLogs = append(allLogs,
logRecordRow{
timestamp: logTime,
msg: span.Logs[i].Msg(),
span: span,
// Add 1 to the index to account for the first dummy message in a
// span.
index: i + 1,
})
i++
} else {
// Recursively append messages from the trace rooted at the child.
childMsgs, err := getMessagesForSubtrace(childSpans[j], allSpans, seenSpans)
if err != nil {
return nil, err
}
allLogs = append(allLogs, childMsgs...)
j++
}
}
return allLogs, nil
}
// logRecordRow is used to temporarily hold on to log messages and their
// metadata while flattening a trace.
type logRecordRow struct {
timestamp time.Time
msg string
span spanWithIndex
// index of the log message within its span.
index int
}
type spanWithIndex struct {
*tracingpb.RecordedSpan
index int
}
// paramStatusUpdater is a subset of RestrictedCommandResult which allows sending
// status updates.
type paramStatusUpdater interface {
BufferParamStatusUpdate(string, string)
}
// noopParamStatusUpdater implements paramStatusUpdater by performing a no-op.
type noopParamStatusUpdater struct{}
var _ paramStatusUpdater = (*noopParamStatusUpdater)(nil)
func (noopParamStatusUpdater) BufferParamStatusUpdate(string, string) {}
// sessionDataMutator is the interface used by sessionVars to change the session
// state. It mostly mutates the Session's SessionData, but not exclusively (e.g.
// see curTxnReadOnly).
type sessionDataMutator struct {
data *sessiondata.SessionData
defaults SessionDefaults
settings *cluster.Settings
paramStatusUpdater paramStatusUpdater
// setCurTxnReadOnly is called when we execute SET transaction_read_only = ...
setCurTxnReadOnly func(val bool)
// onTempSchemaCreation is called when the temporary schema is set
// on the search path (the first and only time).
onTempSchemaCreation func()
// onSessionDataChangeListeners stores all the observers to execute when
// session data is modified, keyed by the value to change on.
onSessionDataChangeListeners map[string][]func(val string)
}
// RegisterOnSessionDataChange adds a listener to execute when a change on the
// given key is made using the mutator object.
func (m *sessionDataMutator) RegisterOnSessionDataChange(key string, f func(val string)) {
if m.onSessionDataChangeListeners == nil {
m.onSessionDataChangeListeners = make(map[string][]func(val string))
}
m.onSessionDataChangeListeners[key] = append(m.onSessionDataChangeListeners[key], f)
}
func (m *sessionDataMutator) notifyOnDataChangeListeners(key string, val string) {
for _, f := range m.onSessionDataChangeListeners[key] {
f(val)
}
}
// SetApplicationName sets the application name.
func (m *sessionDataMutator) SetApplicationName(appName string) {
m.data.ApplicationName = appName
m.notifyOnDataChangeListeners("application_name", appName)
m.paramStatusUpdater.BufferParamStatusUpdate("application_name", appName)
}
func (m *sessionDataMutator) SetBytesEncodeFormat(val sessiondatapb.BytesEncodeFormat) {
m.data.DataConversionConfig.BytesEncodeFormat = val
}
func (m *sessionDataMutator) SetExtraFloatDigits(val int32) {
m.data.DataConversionConfig.ExtraFloatDigits = val
}
func (m *sessionDataMutator) SetDatabase(dbName string) {
m.data.Database = dbName
}
func (m *sessionDataMutator) SetTemporarySchemaName(scName string) {
m.onTempSchemaCreation()
m.data.SearchPath = m.data.SearchPath.WithTemporarySchemaName(scName)
}
func (m *sessionDataMutator) SetTemporarySchemaIDForDatabase(dbID uint32, tempSchemaID uint32) {
if m.data.DatabaseIDToTempSchemaID == nil {
m.data.DatabaseIDToTempSchemaID = make(map[uint32]uint32)
}
m.data.DatabaseIDToTempSchemaID[dbID] = tempSchemaID
}
func (m *sessionDataMutator) SetDefaultIntSize(size int32) {
m.data.DefaultIntSize = size
}
func (m *sessionDataMutator) SetDefaultTransactionPriority(val tree.UserPriority) {
m.data.DefaultTxnPriority = int(val)
}
func (m *sessionDataMutator) SetDefaultTransactionReadOnly(val bool) {
m.data.DefaultTxnReadOnly = val
}
func (m *sessionDataMutator) SetDefaultTransactionUseFollowerReads(val bool) {
m.data.DefaultTxnUseFollowerReads = val
}
func (m *sessionDataMutator) SetEnableSeqScan(val bool) {
m.data.EnableSeqScan = val
}
func (m *sessionDataMutator) SetSynchronousCommit(val bool) {
m.data.SynchronousCommit = val
}
func (m *sessionDataMutator) SetDistSQLMode(val sessiondata.DistSQLExecMode) {
m.data.DistSQLMode = val
}
func (m *sessionDataMutator) SetForceSavepointRestart(val bool) {
m.data.ForceSavepointRestart = val
}
func (m *sessionDataMutator) SetZigzagJoinEnabled(val bool) {
m.data.ZigzagJoinEnabled = val
}
func (m *sessionDataMutator) SetExperimentalDistSQLPlanning(
val sessiondata.ExperimentalDistSQLPlanningMode,
) {
m.data.ExperimentalDistSQLPlanningMode = val
}
func (m *sessionDataMutator) SetPartiallyDistributedPlansDisabled(val bool) {
m.data.PartiallyDistributedPlansDisabled = val
}
func (m *sessionDataMutator) SetRequireExplicitPrimaryKeys(val bool) {
m.data.RequireExplicitPrimaryKeys = val
}
func (m *sessionDataMutator) SetReorderJoinsLimit(val int) {
m.data.ReorderJoinsLimit = val
}
func (m *sessionDataMutator) SetVectorize(val sessiondatapb.VectorizeExecMode) {
m.data.VectorizeMode = val
}
func (m *sessionDataMutator) SetTestingVectorizeInjectPanics(val bool) {
m.data.TestingVectorizeInjectPanics = val
}
func (m *sessionDataMutator) SetOptimizerFKCascadesLimit(val int) {
m.data.OptimizerFKCascadesLimit = val
}
func (m *sessionDataMutator) SetOptimizerUseHistograms(val bool) {
m.data.OptimizerUseHistograms = val
}
func (m *sessionDataMutator) SetOptimizerUseMultiColStats(val bool) {
m.data.OptimizerUseMultiColStats = val
}
func (m *sessionDataMutator) SetLocalityOptimizedSearch(val bool) {
m.data.LocalityOptimizedSearch = val
}
func (m *sessionDataMutator) SetImplicitSelectForUpdate(val bool) {
m.data.ImplicitSelectForUpdate = val
}
func (m *sessionDataMutator) SetInsertFastPath(val bool) {
m.data.InsertFastPath = val
}
func (m *sessionDataMutator) SetSerialNormalizationMode(val sessiondata.SerialNormalizationMode) {
m.data.SerialNormalizationMode = val
}
func (m *sessionDataMutator) SetSafeUpdates(val bool) {
m.data.SafeUpdates = val
}
func (m *sessionDataMutator) SetPreferLookupJoinsForFKs(val bool) {
m.data.PreferLookupJoinsForFKs = val
}
func (m *sessionDataMutator) UpdateSearchPath(paths []string) {
m.data.SearchPath = m.data.SearchPath.UpdatePaths(paths)
}
func (m *sessionDataMutator) SetLocation(loc *time.Location) {
m.data.Location = loc
m.paramStatusUpdater.BufferParamStatusUpdate("TimeZone", sessionDataTimeZoneFormat(loc))
}
func (m *sessionDataMutator) SetReadOnly(val bool) {
// The read-only state is special; it's set as a session variable (SET
// transaction_read_only=<>), but it represents per-txn state, not
// per-session. There's no field for it in the SessionData struct. Instead, we
// call into the connEx, which modifies its TxnState.
// NOTE(andrei): I couldn't find good documentation on transaction_read_only,
// but I've tested its behavior in Postgres 11.
if m.setCurTxnReadOnly != nil {
m.setCurTxnReadOnly(val)
}
}
func (m *sessionDataMutator) SetStmtTimeout(timeout time.Duration) {
m.data.StmtTimeout = timeout
}
func (m *sessionDataMutator) SetIdleInSessionTimeout(timeout time.Duration) {
m.data.IdleInSessionTimeout = timeout
}
func (m *sessionDataMutator) SetIdleInTransactionSessionTimeout(timeout time.Duration) {
m.data.IdleInTransactionSessionTimeout = timeout
}
func (m *sessionDataMutator) SetAllowPrepareAsOptPlan(val bool) {
m.data.AllowPrepareAsOptPlan = val
}
func (m *sessionDataMutator) SetSaveTablesPrefix(prefix string) {
m.data.SaveTablesPrefix = prefix
}
func (m *sessionDataMutator) SetTempTablesEnabled(val bool) {
m.data.TempTablesEnabled = val
}
func (m *sessionDataMutator) SetImplicitColumnPartitioningEnabled(val bool) {
m.data.ImplicitColumnPartitioningEnabled = val
}
func (m *sessionDataMutator) SetDropEnumValueEnabled(val bool) {
m.data.DropEnumValueEnabled = val
}
func (m *sessionDataMutator) SetOverrideMultiRegionZoneConfigEnabled(val bool) {
m.data.OverrideMultiRegionZoneConfigEnabled = val
}
func (m *sessionDataMutator) SetHashShardedIndexesEnabled(val bool) {
m.data.HashShardedIndexesEnabled = val
}
func (m *sessionDataMutator) SetDisallowFullTableScans(val bool) {
m.data.DisallowFullTableScans = val
}
func (m *sessionDataMutator) SetAlterColumnTypeGeneral(val bool) {
m.data.AlterColumnTypeGeneralEnabled = val
}
// TODO(rytaft): remove this once unique without index constraints are fully
// supported.
func (m *sessionDataMutator) SetUniqueWithoutIndexConstraints(val bool) {
m.data.EnableUniqueWithoutIndexConstraints = val
}
func (m *sessionDataMutator) SetUseNewSchemaChanger(val sessiondata.NewSchemaChangerMode) {
m.data.NewSchemaChangerMode = val
}
func (m *sessionDataMutator) SetStreamReplicationEnabled(val bool) {
m.data.EnableStreamReplication = val
}
// RecordLatestSequenceValue records that value to which the session incremented
// a sequence.
func (m *sessionDataMutator) RecordLatestSequenceVal(seqID uint32, val int64) {
m.data.SequenceState.RecordValue(seqID, val)
}
// SetNoticeDisplaySeverity sets the NoticeDisplaySeverity for the given session.
func (m *sessionDataMutator) SetNoticeDisplaySeverity(severity pgnotice.DisplaySeverity) {
m.data.NoticeDisplaySeverity = severity
}
// initSequenceCache creates an empty sequence cache instance for the session.
func (m *sessionDataMutator) initSequenceCache() {
m.data.SequenceCache = sessiondata.SequenceCache{}
}
// SetStubCatalogTableEnabled sets default value for stub_catalog_tables.
func (m *sessionDataMutator) SetStubCatalogTablesEnabled(enabled bool) {
m.data.StubCatalogTablesEnabled = enabled
}
type sqlStatsCollector struct {
// sqlStats tracks per-application statistics for all applications on each
// node.
sqlStats *sqlStats
// appStats track per-application SQL usage statistics. This is a pointer
// into sqlStats set as the session's current app.
appStats *appStats
// phaseTimes tracks session-level phase times.
phaseTimes phaseTimes
// previousPhaseTimes tracks the session-level phase times for the previous
// query. This enables the `SHOW LAST QUERY STATISTICS` observer statement.
previousPhaseTimes phaseTimes
}
// newSQLStatsCollector creates an instance of sqlStatsCollector. Note that
// phaseTimes is an array, not a slice, so this performs a copy-by-value.
func newSQLStatsCollector(
sqlStats *sqlStats, appStats *appStats, phaseTimes *phaseTimes,
) *sqlStatsCollector {
return &sqlStatsCollector{
sqlStats: sqlStats,
appStats: appStats,
phaseTimes: *phaseTimes,
}
}
// recordStatement records stats for one statement. samplePlanDescription can
// be nil, as these are only sampled periodically per unique fingerprint. It
// returns the statement ID of the recorded statement.
func (s *sqlStatsCollector) recordStatement(
stmt *Statement,
samplePlanDescription *roachpb.ExplainTreePlanNode,
distSQLUsed bool,
vectorized bool,
implicitTxn bool,
fullScan bool,
automaticRetryCount int,
numRows int,
err error,
parseLat, planLat, runLat, svcLat, ovhLat float64,
stats topLevelQueryStats,
) roachpb.StmtID {
return s.appStats.recordStatement(
stmt, samplePlanDescription, distSQLUsed, vectorized, implicitTxn, fullScan,
automaticRetryCount, numRows, err, parseLat, planLat, runLat, svcLat,
ovhLat, stats,
)
}
// recordTransaction records statistics for one transaction.
func (s *sqlStatsCollector) recordTransaction(
key txnKey,
txnTimeSec float64,
ev txnEvent,
implicit bool,
retryCount int,
statementIDs []roachpb.StmtID,
serviceLat time.Duration,
retryLat time.Duration,
commitLat time.Duration,
numRows int,
collectedExecStats bool,
execStats execstats.QueryLevelStats,
rowsRead int64,
bytesRead int64,
) {
s.appStats.recordTransactionCounts(txnTimeSec, ev, implicit)
s.appStats.recordTransaction(
key, int64(retryCount), statementIDs, serviceLat, retryLat, commitLat,
numRows, collectedExecStats, execStats, rowsRead, bytesRead,
)
}
func (s *sqlStatsCollector) reset(sqlStats *sqlStats, appStats *appStats, phaseTimes *phaseTimes) {
previousPhaseTimes := &s.phaseTimes
*s = sqlStatsCollector{
sqlStats: sqlStats,
appStats: appStats,
previousPhaseTimes: *previousPhaseTimes,
phaseTimes: *phaseTimes,
}
}
|
package cmd
import (
"auth/internal/config"
"database/sql"
"errors"
"fmt"
_ "github.com/lib/pq"
migrate "github.com/rubenv/sql-migrate"
"github.com/spf13/cobra"
"go.uber.org/zap"
)
var (
migrateCmd = &cobra.Command{
Use: "migrate [sub]",
Short: "migration db auth",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
zap.L().Fatal("Wrong arrgument enter up or down")
}
switch args[0] {
case "up":
upCmd.Run(cmd, args)
case "down":
downCmd.Run(cmd, args)
}
},
}
upCmd = &cobra.Command{
Use: "up [no options!]",
Short: "migrate up",
Run: func(cmd *cobra.Command, args []string) {
var err error
defer func() {
if err != nil {
zap.L().Fatal("", zap.Error(err))
}
}()
db, err := migrationInit()
if err != nil {
return
}
defer db.Close()
up(db)
},
}
downCmd = &cobra.Command{
Use: "down [no options!]",
Short: "migrate down",
Run: func(cmd *cobra.Command, args []string) {
var err error
defer func() {
if err != nil {
zap.L().Fatal("", zap.Error(err))
}
}()
db, err := migrationInit()
if err != nil {
return
}
defer db.Close()
down(db)
},
}
migrations = &migrate.MemoryMigrationSource{
Migrations: []*migrate.Migration{
&migrate.Migration{
Id: "1",
Up: []string{
`CREATE TABLE users(
uid uuid PRIMARY KEY NOT NULL,
email text NOT NULL,
password text NOT NULL,
first_name text,
last_name text,
created_at TIMESTAMP
);`,
`CREATE TABLE sessions(
session_hash text PRIMARY KEY,
user_id uuid references users(uid),
ip_address text not null,
user_agent text default '',
created_at TIMESTAMP
);`,
`CREATE EXTENSION pgcrypto;`},
Down: []string{
"DROP TABLE sessions;",
"DROP TABLE users;",
"DROP EXTENSION pgcrypto;"},
},
},
}
)
func init() {
RootCmd.AddCommand(migrateCmd)
}
func migrationInit() (*sql.DB, error) {
log, err := zap.NewProduction()
if err != nil {
log.Fatal("", zap.Error(err))
}
cfg, err := config.Read()
if err != nil {
log.Warn("", zap.Error(errors.New("wrong cfg")))
return nil, err
}
args := fmt.Sprintf(
"sslmode=%s host=%s port=%s user=%s password='%s' dbname=%s",
cfg.DB.SSL,
cfg.DB.Host,
cfg.DB.Port,
cfg.DB.User,
cfg.DB.Password,
cfg.DB.DatabaseName,
)
postDB, err := sql.Open("postgres", args)
if err != nil {
zap.L().Warn("", zap.Error(err))
return nil, err
}
return postDB, nil
}
func up(postDB *sql.DB) {
affected, err := migrate.Exec(postDB, "postgres", migrations, migrate.Up)
if err != nil {
zap.L().Warn("", zap.Error(err))
return
}
zap.L().Info("", zap.Int("up migration applied:", affected))
}
func down(postDB *sql.DB) {
affected, err := migrate.ExecMax(postDB, "postgres", migrations, migrate.Down, 1)
if err != nil {
zap.L().Warn("", zap.Error(err))
return
}
zap.L().Info("", zap.Int("down migration applied:", affected))
}
|
package fakes
import "github.com/kkallday/tracker-cli/trackerapi"
type Logger struct {
LogCall struct {
CallCount int
Receives struct {
Message string
}
}
LogStoriesCall struct {
CallCount int
Receives struct {
Stories []trackerapi.Story
}
}
}
func (l *Logger) Log(message string) {
l.LogCall.CallCount++
l.LogCall.Receives.Message = message
}
func (l *Logger) LogStories(stories ...trackerapi.Story) {
l.LogStoriesCall.CallCount++
l.LogStoriesCall.Receives.Stories = stories
}
|
package leetcode
import (
"sort"
"strings"
)
func isAnagram(s string, t string) bool {
if len(s) != len(t) {
return false
}
s1:=strings.Split(s,"")
t1:=strings.Split(t,"")
sort.Strings(s1)
sort.Strings(t1)
s=strings.Join(s1,"")
t=strings.Join(t1,"")
return strings.Compare(s, t) == 0
}
|
package contextkeys
type key int
const (
// KeyAPIVersion define the key to the api version value in the context
KeyAPIVersion key = 1
)
|
package version
// Version is the git version that produced this binary.
var Version string
// When is the datestamp that produced this binary.
var When string
|
package utils
// === IMPORTS ===
import (
"regexp"
)
// === PUBLIC METHODS ===
// IsValidDomain checks if a domain has a valid format
func IsValidDomain(domain string) bool {
pattern := `^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$`
expression := regexp.MustCompile(pattern)
return expression.MatchString(domain)
}
// IsValidURL checks if a URL has a valid format
func IsValidURL(url string) bool {
pattern := `^((([A-Za-z]{3,9}:(?:\/\/)?)(?:[-;:&=\+\$,\w]+@)?[A-Za-z0-9.-]+(:[0-9]+)?|(?:www.|[-;:&=\+\$,\w]+@)[A-Za-z0-9.-]+)((?:\/[\+~%\/.\w-_]*)?\??(?:[-\+=&;%@.\w_]*)#?(?:[\w]*))?)$`
expression := regexp.MustCompile(pattern)
return expression.MatchString(url)
}
|
package goderp
import (
"fmt"
"github.com/BurntSushi/toml"
"os"
"reflect"
"strconv"
"strings"
"unicode/utf8"
)
type Info struct {
descr string
group string
def interface{}
}
type Config struct {
Records map[string]interface{}
Descriptions map[string]Info
enableEnvVars bool
}
func New() *Config {
c := Config{}
c.Records = make(map[string]interface{})
c.Descriptions = make(map[string]Info)
c.enableEnvVars = false
return &c
}
func (c *Config) EnableEnv() {
c.enableEnvVars = true
}
func (c *Config) Define(key string, value interface{}, descr string, group string) {
c.Records[key] = value
//fmt.Printf("%s:(%s) %s\n", key, reflect.TypeOf(c.Records[key]), c.Records[key])
c.Descriptions[key] = Info{descr: descr, group: group, def: value}
}
func (c *Config) Get(key string) interface{} {
if c.enableEnvVars {
tmp := os.Getenv(key)
if tmp != "" {
value, err := coerce(c.Records[key], tmp)
if err != nil {
fmt.Printf("%s\n", err)
}
return value
}
}
return c.Records[key]
}
func (c *Config) GetInt(key string) int {
return c.Get(key).(int)
}
func (c *Config) GetFloat(key string) float64 {
return c.Get(key).(float64)
}
func (c *Config) GetString(key string) string {
return c.Get(key).(string)
}
func (c *Config) GetBool(key string) bool {
return c.Get(key).(bool)
}
func (c *Config) GetDescription(key string) string {
return c.Descriptions[key].descr
}
func (c *Config) GetGroup(key string) string {
return c.Descriptions[key].group
}
func (c *Config) GetDefault(key string) interface{} {
return c.Descriptions[key].def
}
func (c *Config) Parse(filename string) (err error) {
if _, err := toml.DecodeFile(filename, &c.Records); err != nil {
return err
}
return nil
}
func (c *Config) Dump() {
groupkeys := make(map[string][]string)
var group string
for k := range c.Records {
group = c.GetGroup(k)
if _, ok := groupkeys[group]; !ok {
groupkeys[group] = make([]string, 0)
}
groupkeys[group] = append(groupkeys[group], k)
}
const SEPARATOR = "#"
const MAX_LEN = 80
for k := range groupkeys {
sep_size := ((MAX_LEN - utf8.RuneCountInString(k)) / 2) - 1
fmt.Print(strings.Repeat(SEPARATOR, sep_size) + " " + k + " " + strings.Repeat(SEPARATOR, sep_size))
if sep_size*2+utf8.RuneCountInString(k)+2 < MAX_LEN {
fmt.Print(SEPARATOR)
}
fmt.Print("\n")
for _, i := range groupkeys[k] {
fmt.Printf("%s %s\n", strings.Repeat(SEPARATOR, 2), c.GetDescription(i))
fmt.Printf("%s Defaults to: ", strings.Repeat(SEPARATOR, 2))
fmt.Print(c.GetDefault(i))
fmt.Printf("\n%s=", i)
v := reflect.ValueOf(c.Get(i))
switch v.Kind() {
case reflect.String:
fmt.Printf("\"%s\"\n", c.Get(i))
break
default:
fmt.Println(c.Get(i))
}
}
fmt.Print("\n")
}
}
func coerce(current interface{}, replacement string) (interface{}, error) {
v := reflect.ValueOf(current)
switch v.Kind() {
case reflect.String:
return replacement, nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
intValue, err := strconv.ParseInt(replacement, 0, 64)
if err != nil {
return current, err
}
return intValue, nil
case reflect.Bool:
boolValue, err := strconv.ParseBool(replacement)
if err != nil {
return current, err
}
return boolValue, nil
case reflect.Float32, reflect.Float64:
floatValue, err := strconv.ParseFloat(replacement, 64)
if err != nil {
return current, err
}
return floatValue, nil
}
return current, nil
}
|
package model
import (
"github.com/holywolfchan/yuncang/db"
"github.com/holywolfchan/yuncang/utils/logs"
)
var dropAndCreate bool = false
var DB = db.Engine
var TableStruct = []interface{}{
new(User),
new(Role),
new(Domain),
new(MaterialList),
new(MaterialLogin),
new(MaterialLogout),
new(MaterialOrder),
new(MaterialType),
new(MaterialWarehouse),
new(EntityStatus),
new(UnitType),
new(Factory),
new(Supplier),
new(MaterialStockpartial),
new(MaterialLogoutdoc),
}
func init() {
logs.Info("同步表结构开始...")
for _, v := range TableStruct {
Migrate(v)
}
logs.Info("同步表结构结束!\n")
}
func Migrate(v interface{}) {
if has, err := DB.IsTableExist(v); err == nil && has && dropAndCreate {
err := DB.DropTables(v)
if err != nil {
logs.Errorf("删除表出错:表%T,%v", v, err)
return
}
logs.Infof("删除表成功:表%T", v)
}
err := DB.Sync2(v)
if err != nil {
logs.Errorf("同步表结构出错:表[%T],%v", v, err)
return
}
logs.Infof("同步表结构成功:表%T", v)
}
|
package model
import (
"time"
"gorm.io/gorm"
)
const CtxKeyAuthorizedUser = "ckau"
const CtxKeyViewPasswordVerified = "ckvpv"
const CacheKeyOauth2State = "p:a:state"
type Common struct {
ID uint64 `gorm:"primaryKey"`
CreatedAt time.Time `gorm:"index;<-:create"`
UpdatedAt time.Time `gorm:"autoUpdateTime"`
DeletedAt gorm.DeletedAt `gorm:"index"`
}
type Response struct {
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
Result interface{} `json:"result,omitempty"`
}
|
package main
import (
"fmt"
"log"
"os"
"runtime"
"time"
"gopkg.in/alecthomas/kingpin.v1"
)
const (
defaultBufsizeTCP = 128000 // 128K
defaultBufsizeUDP = 8000 // 8K
)
var (
bufsize int
unit *Unit
makeStopTimer func() <-chan time.Time
makeIntervalTimer func() <-chan time.Time
)
const (
unitAuto = 'A'
unitKbps = 'k'
unitMbps = 'm'
unitGbps = 'g'
unitKB_s = 'K'
unitMB_s = 'M'
unitGB_s = 'G'
)
var unitMultipliers = map[rune]int{
unitKB_s: 1024, unitMB_s: 1024 * 1024, unitGB_s: 1024 * 1024 * 1024,
}
type Unit struct {
Divisor float64
Label string
}
var autoUnit = &Unit{}
var units = map[rune]*Unit{
unitAuto: autoUnit,
unitKbps: &Unit{125, "kbps"}, unitMbps: &Unit{1.25e5, "mbps"},
unitGbps: &Unit{1.25e8, "gbps"}, unitKB_s: &Unit{1e3, "KB/s"},
unitMB_s: &Unit{1e6, "MB/s"}, unitGB_s: &Unit{1e9, "GB/s"},
}
func allUnits() []string {
return []string{string(unitAuto),
string(unitKbps), string(unitMbps), string(unitGbps),
string(unitKB_s), string(unitMB_s), string(unitGB_s),
}
}
func die(msg string) {
fmt.Println("error:", msg)
os.Exit(1)
}
func makeTimer(seconds float64) func() <-chan time.Time {
if seconds <= 0 {
return func() <-chan time.Time {
return nil
}
}
return func() <-chan time.Time {
return time.After(time.Duration(seconds * float64(time.Second)))
}
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
log.SetFlags(log.LstdFlags | log.Lshortfile)
app := kingpin.New("thruput", "a tcp/udp network throughput measurement tool")
port := app.Flag("port", "specify port").Short('p').Default("10101").Int()
udp := app.Flag("udp", "use UDP instead of TCP").Short('u').Bool()
length := app.Flag("len", "application buffer size (128K for tcp, 8K for udp)").Short('l').PlaceHolder("#[KMG]").String()
clientCmd := app.Command("client", "run as client")
host := clientCmd.Arg("host", "host to connect to").Required().String()
numConns := clientCmd.Flag("nclients", "number of concurrent clients").Short('P').Default("1").Int()
format := clientCmd.Flag("format", "transfer rate units [A=auto]").Short('f').Default(string(unitAuto)).Enum(allUnits()...)
duration := clientCmd.Flag("duration", "duration in seconds [0=forever]").Short('t').Default("10").Float()
interval := clientCmd.Flag("interval", "interval in seconds [0=none]").Short('i').Default("1").Float()
sysBufSize := clientCmd.Flag("sys", "OS socket buffer size").Int()
serverCmd := app.Command("server", "run as server")
parsed := kingpin.MustParse(app.Parse(os.Args[1:]))
var protocol = "tcp"
bufsize = defaultBufsizeTCP
if *udp {
protocol = "udp"
bufsize = defaultBufsizeUDP
}
if len(*length) > 0 {
var label rune
_, err := fmt.Sscanf(*length, "%d%c", &bufsize, &label)
if err != nil {
log.Fatal(err)
}
if bufsize < 0 {
die("buffer size must be > 0")
}
bufsize *= unitMultipliers[label]
if bufsize == 0 {
die("invalid unit label")
}
}
address := fmt.Sprintf("%s:%d", *host, *port)
type Runnable interface {
Run() error
}
var runnable Runnable
var err error
switch {
case parsed == clientCmd.FullCommand():
u := []rune(*format)[0]
unit = units[u]
if unit == nil {
die("invalid format")
}
if *numConns < 1 {
die("nclients must be > 0")
}
makeIntervalTimer = makeTimer(*interval)
makeStopTimer = makeTimer(*duration)
runnable, err = NewClient(protocol, address, *sysBufSize, *numConns)
if err != nil {
log.Fatal(err)
}
case parsed == serverCmd.FullCommand():
runnable, err = NewServer(protocol, address)
if err != nil {
log.Fatal(err)
}
default:
die("Must run as *either* server or client")
}
if err = runnable.Run(); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"time"
"os"
"fmt"
"context"
"os/signal"
"net/http"
"encoding/json"
"github.com/cap-diego/dfa-minimization-algorithm"
)
func enableCors(w *http.ResponseWriter) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
}
func minimizeAutomata(w http.ResponseWriter, r *http.Request) {
enableCors(&w)
w.Header().Set("Content-type", "application/json")
w.Header().Add("Access-Control-Allow-Headers", "Content-Type")
var M dfa.DFA
if r.Method != http.MethodPost {
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusAccepted)
return
}
http.Error(w, "error, method post expected", 400)
return
}
if r.Body == nil {
http.Error(w, "error, body expected", 400)
return
}
err := json.NewDecoder(r.Body).Decode(&M)
if err != nil {
http.Error(w, fmt.Sprintf("error while decoding dfa, %s\n", err.Error()), 404)
return
}
if !hasMinimumFields(&M) {
http.Error(w, "error, faltan campos obligatorios", 404)
return
}
Min := dfa.HopcroftDFAMin(M)
w.WriteHeader(http.StatusAccepted)
json.NewEncoder(w).Encode(Min)
}
func main() {
fmt.Print("Inicializando server\n")
http.HandleFunc("/minimize", minimizeAutomata)
go func() {
// err := http.ListenAndServe(":8080", nil)
err := http.ListenAndServeTLS(":443", "certificate.crt", "private.key", nil)
if err != nil {
panic(err)
}
}()
sigChan := make(chan os.Signal)
signal.Notify(sigChan, os.Interrupt)
signal.Notify(sigChan, os.Kill)
sig := <- sigChan // Block
fmt.Print("Terminate: ", sig)
context.WithTimeout(context.Background(), 10 * time.Second)
}
func hasMinimumFields(M *dfa.DFA) bool {
if M.States.IsEmpty() {
return false
}
if len(M.Alphabet) == 0{
return false
}
if M.FinalStates.IsEmpty() {
return false
}
return true
} |
package main
import (
"bufio"
"fmt"
"log"
"os"
)
var (
code map[rune]int
tile map[int]rune
)
func init() {
code = map[rune]int{' ': 0, '\\': 0, '/': 0, 'X': 0, '*': 1, '#': 2, 'o': 3}
tile = map[int]rune{0: ' ', 1: '*', 2: '#', 3: 'o'}
}
type ray struct {
x int
y int
dir int
ttl int
}
func (a ray) nextTile(s []int) (int, int, int) {
switch a.dir {
case 0:
if a.x == 9 || a.y == 0 {
return -1, 0, 0
}
return s[a.x+1+(a.y-1)*10], a.x + 1, a.y - 1
case 1:
if a.x == 9 || a.y == 9 {
return -1, 0, 0
}
return s[a.x+1+(a.y+1)*10], a.x + 1, a.y + 1
case 2:
if a.x == 0 || a.y == 9 {
return -1, 0, 0
}
return s[a.x-1+(a.y+1)*10], a.x - 1, a.y + 1
case 3:
if a.x == 0 || a.y == 0 {
return -1, 0, 0
}
return s[a.x-1+(a.y-1)*10], a.x - 1, a.y - 1
}
return -1, 0, 0
}
func (a ray) lrTile(s []int) (int, int, int, int, int, int) {
switch a.dir {
case 0:
return s[a.x+(a.y-1)*10], a.x, a.y - 1, s[a.x+1+a.y*10], a.x + 1, a.y
case 1:
return s[a.x+1+a.y*10], a.x + 1, a.y, s[a.x+(a.y+1)*10], a.x, a.y + 1
case 2:
return s[a.x+(a.y+1)*10], a.x, a.y + 1, s[a.x-1+a.y*10], a.x - 1, a.y
case 3:
return s[a.x-1+a.y*10], a.x - 1, a.y, s[a.x+(a.y-1)*10], a.x, a.y - 1
}
return -1, 0, 0, 0, 0, 0
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
s := make([]int, 100)
scanner := bufio.NewScanner(data)
for scanner.Scan() {
var todo []ray
been := make(map[int][]ray)
for ix, i := range scanner.Text() {
if i == '\\' || i == '/' || i == 'X' {
x, y := ix%10, ix/10
if i == '\\' || i == 'X' {
todo = append(todo, ray{x, y, 1, 20})
todo = append(todo, ray{x, y, 3, 20})
}
if i == '/' || i == 'X' {
todo = append(todo, ray{x, y, 0, 20})
todo = append(todo, ray{x, y, 2, 20})
}
}
s[ix] = code[i]
}
for len(todo) > 0 {
b := todo[0]
todo = todo[1:]
nt, x, y := b.nextTile(s)
if b.ttl > 1 {
if nt == 0 || nt == 1 {
if nb, f := been[x+10*y]; f {
for _, i := range nb {
if i.dir == b.dir && i.ttl >= b.ttl {
continue
}
}
}
if nt == 0 {
todo = append(todo, ray{x, y, b.dir, b.ttl - 1})
} else {
todo = append(todo, ray{x, y, b.dir, b.ttl})
todo = append(todo, ray{x, y, (b.dir + 1) % 4, b.ttl})
todo = append(todo, ray{x, y, (b.dir + 3) % 4, b.ttl})
}
} else if nt == 2 {
lt, lx, ly, rt, rx, ry := b.lrTile(s)
if lt == 0 {
todo = append(todo, ray{lx, ly, (b.dir + 3) % 4, b.ttl - 1})
} else if lt == 1 {
todo = append(todo, ray{lx, ly, (b.dir + 3) % 4, b.ttl})
}
if rt == 0 {
todo = append(todo, ray{rx, ry, (b.dir + 1) % 4, b.ttl - 1})
} else if lt == 1 {
todo = append(todo, ray{rx, ry, (b.dir + 1) % 4, b.ttl})
}
}
}
if nb, f := been[b.x+10*b.y]; f {
for ix, i := range nb {
if i.dir == b.dir {
been[b.x+10*b.y][ix] = b
f = false
break
}
}
if f {
been[b.x+10*b.y] = append(been[b.x+10*b.y], b)
}
} else {
been[b.x+10*b.y] = []ray{b}
}
}
for i := 0; i < 100; i++ {
if s[i] == 0 {
b, f := been[i]
if f {
p := 0
for _, j := range b {
if j.dir == 0 || j.dir == 2 {
p |= 1
} else {
p |= 2
}
}
switch p {
case 1:
fmt.Printf("/")
case 2:
fmt.Printf(`\`)
case 3:
fmt.Printf("X")
}
} else {
fmt.Printf(" ")
}
} else {
fmt.Printf("%c", tile[s[i]])
}
}
fmt.Println()
}
}
|
package v1
import (
"fmt"
"github.com/gin-gonic/gin"
"go.rock.com/rock-platform/rock/server/clients/k8s"
"go.rock.com/rock-platform/rock/server/clients/license"
"go.rock.com/rock-platform/rock/server/database/api"
"go.rock.com/rock-platform/rock/server/utils"
"io/ioutil"
"net/http"
"os"
)
const (
LicenseMode = "voucher" // dongle or voucher
)
// license-ca status struct from cactl.go
type CAStatusResp struct {
Server uint `json:"server" example:"0"` // 0 is master, 1 is slave
Mode string `json:"mode" example:"voucher"` // license-ca authorization mode, default is voucher mode
Disable bool `json:"disable" example:"false"` // is ca disabled, by soft start/stop ca, if disabled, ca can't supply nomal service
IsActive bool `json:"is_active" example:"true"` // master or standby ca
ActiveLimit int32 `json:"active_limit" example:"100"` // cluster total active limit
AloneTime int32 `json:"alone_time" example:"0"` // ca alone time, uint seconds, 0 means forever
DongleTime int64 `json:"dongle_time" example:"1616762924"` // dongle timestamp
Status string `json:"status" example:"alone"` // ca status, "alone" or "alive" or "dead", means whether ca is in alive
AuthID string `json:"auth_id" example:"495788f9-9797-4bf8-a3e1-d65d09b107cd"` // cluster license sn
Product string `json:"product" example:"IVA-VIPER"` // product name
DongleID string `json:"dongle_id" example:"494330853"` // dongle id
ExpiredAt string `json:"expired_at" example:"99991231"` // expire time
Company string `json:"company" example:"sensetime_SC"` // company name
FeatureIds []uint64 `json:"feature_ids" example:"22000"` // feature ids
Quotas map[string]quotaLimit `json:"quotas"` // cluster quotas, used and total
Consts map[string]interface{} `json:"consts"` // cluster consts, value type will be int32 or string
Devices []caDeviceInfo `json:"devices"` // the quotas that devices have taken
}
type quotaLimit struct {
Used int32 `json:"used" example:"1"` // used quotas
Total int32 `json:"total" example:"2"` // total quotas
}
type caDeviceInfo struct {
UdID string `json:"udid,omitempty" example:"engine-face-extract-service-kd4k9-a954a1f74cd23d97248249d04de10221-fba9aae9f524e083"`
QuotaUsage map[string]int32 `json:"quota_usage,omitempty"`
}
type K8sMasterInfo struct {
Master1IP string `json:"master1_ip" example:"10.151.5.136"`
Master2IP string `json:"master2_ip" example:"10.151.5.137"`
MasterTotal uint `json:"master_total" example:"3"`
}
type LicenseModeReq struct {
LicenseMode string `json:"license_mode" form:"license_mode" binding:"omitempty,min=1" example:"voucher or dongle"` // default voucher
}
type LicenseServerTypeReq struct {
// 由于 serverType 只有0 和1 两个值。但定义required,则0位uint的零值,gin validate以为你没有输入。
// 所以要定义最大为1,最小为0,不要加required字段。这里可以用 min=0,max=1 或者 oneof=0 1两种写法。
ServerType uint `json:"server_type" form:"server_type" binding:"required,oneof=0 1" example:"0"` // 0 is master, 1 is slave
LicenseModeReq
}
type ActiveResp struct {
StatusCode string `json:"status_code,omitempty" example:"000000"`
StatusMessage string `json:"status_message,omitempty" example:"SUCCESS"`
}
// Client Licenses Response
type ClientLicResp struct {
Licenses []string `json:"licenses,omitempty"`
}
// @Summary Get license status by cluster id and license mode
// @Description api for get license status by cluster id and license mode
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Param license_mode query string false "license mode"
// @Success 200 {array} v1.CAStatusResp "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/license-status [get]
func (c *Controller) GetLicenseStatus(ctx *gin.Context) {
var idReq IdReq // cluster id
if err := ctx.ShouldBindUri(&idReq); err != nil {
panic(err)
}
var req LicenseModeReq
if err := ctx.ShouldBind(&req); err != nil {
panic(err)
}
// ensure license authorization mode
mode := getLicenseMode(req.LicenseMode)
cluster, err := api.GetClusterById(idReq.Id)
if err != nil {
panic(err)
}
// get the k8s master nodes info
k8sClusterInfo, err := getClusterIp(cluster.Config)
if err != nil {
panic(err)
}
// get license-ca status
var caMasterStatus *license.CAStatus
var caSlaveStatus *license.CAStatus
var caStatus []*license.CAStatus
if k8sClusterInfo.MasterTotal >= 3 { // when cluster mode
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
slaveCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master2IP)
caCtl, err := license.NewServiceCtl(masterCAUrl, slaveCAUrl) // get license-ca client
if err != nil {
panic(err)
}
caMasterStatus, err = caCtl.GetCAStatus(0, mode) // get license-ca master status
if err != nil {
panic(err)
}
caSlaveStatus, err = caCtl.GetCAStatus(1, mode) // get license-ca slave status
if err != nil {
panic(err)
}
caStatus = []*license.CAStatus{caMasterStatus, caSlaveStatus}
} else { // when single node mode
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
caCtl, err := license.NewServiceCtl(masterCAUrl, masterCAUrl) // get license-ca client
if err != nil {
panic(err)
}
caMasterStatus, err = caCtl.GetCAStatus(0, mode)
if err != nil {
panic(err)
}
caStatus = []*license.CAStatus{caMasterStatus}
}
resp := make([]CAStatusResp, 2)
err = utils.MarshalResponse(caStatus, &resp)
if err != nil {
panic(err)
}
c.Logger.Infof("Get license ca status by cluster id %v, authorization mode is %v", cluster.Id, mode)
ctx.JSON(http.StatusOK, resp)
}
func getLicenseMode(licenseMode string) string {
if licenseMode != "" {
return licenseMode
} else {
return LicenseMode
}
}
func getClusterIp(k8sConf string) (*K8sMasterInfo, error) {
// check the total number of K8S cluster master
nodeList, err := k8s.GetClusterNodes(k8sConf)
if err != nil {
return nil, err
}
nodes, err := formatNodesResp(nodeList.Items)
if err != nil {
return nil, err
}
// gets the total number of k8s cluster master nodes
masterTotal, err := getClusterMaster(nodes)
if err != nil {
return nil, err
}
// get the master node's IP by k8s cluster
masterClusterInfo := new(K8sMasterInfo)
if masterTotal >= 3 { // when cluster mode
masterClusterInfo.Master1IP = (*nodes)[0].InternalIP
masterClusterInfo.Master2IP = (*nodes)[1].InternalIP
masterClusterInfo.MasterTotal = masterTotal
return masterClusterInfo, nil
} else if masterTotal == 1 || masterTotal == 2 { // when single node mode
masterClusterInfo.Master1IP = (*nodes)[0].InternalIP
masterClusterInfo.MasterTotal = masterTotal
return masterClusterInfo, nil
} else {
err := utils.NewRockError(404, 40400015, fmt.Sprintf("k8s cluster node not found"))
return nil, err
}
}
// Gets the total number of K8S cluster master nodes
func getClusterMaster(nodes *[]ClusterNodeResp) (uint, error) {
var masterTotal uint = 0
for _, node := range *nodes {
for _, label := range node.Labels {
if label.Key == "node-role.kubernetes.io/master" && label.Value == "" {
masterTotal += 1
}
}
}
return masterTotal, nil
}
// @Summary Download license hardware c2v file
// @Description api for download license hardware c2v file
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Param server_type query uint true "license master or slave type"
// @Param license_mode query string false "license mode"
// @Success 200 {string} string "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/license-c2v [get]
func (c *Controller) GetC2vFile(ctx *gin.Context) {
var idReq IdReq
if err := ctx.ShouldBindUri(&idReq); err != nil {
panic(err)
}
var serverTypeReq LicenseServerTypeReq
if err := ctx.ShouldBind(&serverTypeReq); err != nil {
panic(err)
}
// ensure license authorization mode
mode := getLicenseMode(serverTypeReq.LicenseMode)
cluster, err := api.GetClusterById(idReq.Id)
if err != nil {
panic(err)
}
// get the k8s master nodes info
k8sClusterInfo, err := getClusterIp(cluster.Config)
if err != nil {
panic(err)
}
var caCtl *license.CACtl
if k8sClusterInfo.MasterTotal >= 3 { // when cluster mode
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
slaveCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master2IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, slaveCAUrl) // get license-ca client
if err != nil {
panic(err)
}
} else {
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, masterCAUrl) // get license-ca client
if err != nil {
panic(err)
}
}
// ServerType: 0 is master, 1 is slave
// Type: 0 is c2v + fingerprint
hardwareInfoResp, err := caCtl.HardwareInfo(license.ServerType(serverTypeReq.ServerType), 0)
if err != nil {
panic(err)
}
// save c2v file
c2vTmpFile, err := ioutil.TempFile("/tmp", "c2v-file-*")
if err != nil {
panic(err)
}
defer os.Remove(c2vTmpFile.Name())
_, err = c2vTmpFile.WriteString(hardwareInfoResp.C2V)
if err != nil {
panic(err)
}
defer c2vTmpFile.Close()
fileName := "default.c2v"
status, err := caCtl.GetCAStatus(license.ServerType(serverTypeReq.ServerType), mode) // get license-ca master/slave status
if err != nil {
c.Logger.Warnf("Get dongle id failed, set dongle id to default and skip it: %v", err)
} else {
fileName = fmt.Sprintf("%s.c2v", status.DongleID)
}
c.Logger.Infof("Download c2v hardware info by cluster id %d, authorization mode is %v", idReq.Id, mode)
// 为了前端通过调用当前接口就能直接下载文件,这里必须配置如下格式(filename + application/octet-stream):
ctx.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fileName))
ctx.Header("Content-Type", "application/octet-stream")
ctx.File(c2vTmpFile.Name()) // 读取文件内容并返回
}
// @Summary Download license hardware fingerprint file
// @Description api for download license hardware fingerprint file
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Param server_type query uint true "license master or slave type"
// @Param license_mode query string false "license mode"
// @Success 200 {string} string "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/license-fingerprint [get]
func (c *Controller) GetFingerprintFile(ctx *gin.Context) {
var idReq IdReq
if err := ctx.ShouldBindUri(&idReq); err != nil {
panic(err)
}
var serverTypeReq LicenseServerTypeReq
if err := ctx.ShouldBind(&serverTypeReq); err != nil {
panic(err)
}
// ensure license authorization mode
mode := getLicenseMode(serverTypeReq.LicenseMode)
cluster, err := api.GetClusterById(idReq.Id)
if err != nil {
panic(err)
}
// get the k8s master nodes info
k8sClusterInfo, err := getClusterIp(cluster.Config)
if err != nil {
panic(err)
}
var caCtl *license.CACtl
if k8sClusterInfo.MasterTotal >= 3 { // when cluster mode
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
slaveCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master2IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, slaveCAUrl) // get license-ca client
if err != nil {
panic(err)
}
} else {
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, masterCAUrl) // get license-ca client
if err != nil {
panic(err)
}
}
// ServerType: 0 is master, 1 is slave
// Type: 0 is c2v + fingerprint
hardwareInfoResp, err := caCtl.HardwareInfo(license.ServerType(serverTypeReq.ServerType), 0)
if err != nil {
panic(err)
}
// save fingerprint file
fpTmpFile, err := ioutil.TempFile("/tmp", "fingerprint-file-*")
if err != nil {
panic(err)
}
defer os.Remove(fpTmpFile.Name())
_, err = fpTmpFile.WriteString(hardwareInfoResp.FingerPrint)
if err != nil {
panic(err)
}
defer fpTmpFile.Close()
fileName := "default.fingerprint"
status, err := caCtl.GetCAStatus(license.ServerType(serverTypeReq.ServerType), mode) // get license-ca master/slave status
if err != nil {
c.Logger.Warnf("Get dongle id failed, set dongle id to default and skip it: %v", err)
} else {
fileName = fmt.Sprintf("%s.fingerprint", status.DongleID)
}
c.Logger.Infof("Download fingerprint hardware info by cluster id %d, authorization mode is %v", idReq.Id, mode)
// 为了前端通过调用当前接口就能直接下载文件,这里必须配置如下格式(filename + application/octet-stream):
ctx.Header("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", fileName))
ctx.Header("Content-Type", "application/octet-stream")
ctx.File(fpTmpFile.Name()) // 读取文件内容并返回
}
// @Summary Online active the license-ca
// @Description api for online active the license-ca
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Param server_type query uint true "license master or slave type"
// @Param license_mode query string false "license mode"
// @Success 200 {object} v1.ActiveResp "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/license-online [post]
func (c *Controller) ActiveOnline(ctx *gin.Context) {
// 如果在线激活报错,请去官网查看报错原因。https://dongle.sensetime.com/errors
// 因为license-ca源码是闭源的,只能基础研发才有权限查看。
// 示例报错码:
// "status_code": "000002",
// "status_message": "get online data from sdk error of -2014281727"
// 查询结果:
// ID 错误码 厂商错误码 厂商错误码描述
// 154 -2014281727 0x8001 链接激活服务器失败
var idReq IdReq
if err := ctx.ShouldBindUri(&idReq); err != nil {
panic(err)
}
var serverTypeReq LicenseServerTypeReq
if err := ctx.ShouldBind(&serverTypeReq); err != nil {
panic(err)
}
// ensure license authorization mode
mode := getLicenseMode(serverTypeReq.LicenseMode)
cluster, err := api.GetClusterById(idReq.Id)
if err != nil {
panic(err)
}
// get the k8s master nodes info
k8sClusterInfo, err := getClusterIp(cluster.Config)
if err != nil {
panic(err)
}
var caCtl *license.CACtl
if k8sClusterInfo.MasterTotal >= 3 { // when cluster mode
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
slaveCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master2IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, slaveCAUrl) // get license-ca client
if err != nil {
panic(err)
}
} else {
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, masterCAUrl) // get license-ca client
if err != nil {
panic(err)
}
}
// online active
activeResp, err := caCtl.OnlineActivate(license.ServerType(serverTypeReq.ServerType), "activate")
if err != nil {
panic(err)
}
if activeResp.StatusCode != "000000" {
message := activeResp.StatusMessage + ", You can use the error code to query the reason in here: https://dongle.sensetime.com/errors"
err := utils.NewRockError(500, 50000007, message)
panic(err)
}
// check license-ca status is active
status, err := caCtl.GetCAStatus(license.ServerType(serverTypeReq.ServerType), mode)
if err != nil {
panic(err)
}
if status.IsActive == false {
err := utils.NewRockError(500, 50000008, "License is not active even after executing license online activation")
panic(err)
}
resp := ActiveResp{}
if err := utils.MarshalResponse(activeResp, &resp); err != nil {
panic(err)
}
// get client licenses data from license-ca
clientLicResp, err := caCtl.GetClientLics(license.ServerType(serverTypeReq.ServerType))
if err != nil || len(clientLicResp.Licenses) == 0 {
err := fmt.Sprintf("get client license data failed: %v", err)
panic(err)
}
// update license-config configmap
err = k8s.UpdateConfigmapWithLicense(cluster.Config, mode, clientLicResp.Licenses[0])
if err != nil {
err := fmt.Sprintf("update configmap failed: %v", err)
panic(err)
}
// restart engine namespace pods
err = k8s.RestartPodsWithLicense(cluster.Config, 60)
if err != nil {
panic(err)
}
c.Logger.Infof("Online active license success, active by cluster_id %d, authorization mode is %v", idReq.Id, mode)
ctx.JSON(http.StatusOK, resp)
}
// @Summary Offline active the license-ca
// @Description api for offline active the license-ca
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Param server_type query uint true "license master or slave type"
// @Param license_mode query string false "license mode"
// @Success 200 {object} v1.ActiveResp "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/license-offline [post]
func (c *Controller) ActiveOffline(ctx *gin.Context) {
var idReq IdReq
if err := ctx.ShouldBindUri(&idReq); err != nil {
panic(err)
}
var serverTypeReq LicenseServerTypeReq
if err := ctx.ShouldBind(&serverTypeReq); err != nil {
panic(err)
}
// get the authorized v2c file
file, err := ctx.FormFile("v2c")
if err != nil {
panic(err)
}
// create a temporary file
tmpFile, err := ioutil.TempFile("", "v2c-file-*")
if err != nil {
panic(err)
}
defer tmpFile.Close()
defer os.Remove(tmpFile.Name())
// save v2c file to temporary file
if err := ctx.SaveUploadedFile(file, tmpFile.Name()); err != nil {
panic(err)
}
// read temporary file content
v2cData, err := ioutil.ReadFile(tmpFile.Name())
if err != nil {
panic(err)
}
// ensure license authorization mode
mode := getLicenseMode(serverTypeReq.LicenseMode)
cluster, err := api.GetClusterById(idReq.Id)
if err != nil {
panic(err)
}
// get the k8s master nodes info
k8sClusterInfo, err := getClusterIp(cluster.Config)
if err != nil {
panic(err)
}
var caCtl *license.CACtl
if k8sClusterInfo.MasterTotal >= 3 { // when cluster mode
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
slaveCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master2IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, slaveCAUrl) // get license-ca client
if err != nil {
panic(err)
}
} else {
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, masterCAUrl) // get license-ca client
if err != nil {
panic(err)
}
}
// offline active
activeResp, err := caCtl.OfflineActivate(license.ServerType(serverTypeReq.ServerType), string(v2cData))
if err != nil {
panic(err)
}
if activeResp.StatusCode != "000000" {
message := activeResp.StatusMessage + ", You can use the error code to query the reason in here: https://dongle.sensetime.com/errors"
err := utils.NewRockError(500, 50000009, message)
panic(err)
}
// check license-ca status is active
status, err := caCtl.GetCAStatus(license.ServerType(serverTypeReq.ServerType), mode)
if err != nil {
panic(err)
}
if status.IsActive == false {
err := utils.NewRockError(500, 50000010, "License is not active even after executing license offline activation")
panic(err)
}
resp := ActiveResp{}
if err := utils.MarshalResponse(activeResp, &resp); err != nil {
panic(err)
}
// get client licenses data from license-ca
clientLicResp, err := caCtl.GetClientLics(license.ServerType(serverTypeReq.ServerType))
if err != nil || len(clientLicResp.Licenses) == 0 {
err := fmt.Sprintf("get client license data failed: %v", err)
panic(err)
}
// update license-config configmap
err = k8s.UpdateConfigmapWithLicense(cluster.Config, mode, clientLicResp.Licenses[0])
if err != nil {
err := fmt.Sprintf("update configmap failed: %v", err)
panic(err)
}
// restart engine namespace pods
err = k8s.RestartPodsWithLicense(cluster.Config, 60)
if err != nil {
panic(err)
}
c.Logger.Infof("Offline active license success, active by cluster_id %d, authorization mode is %v", idReq.Id, mode)
ctx.JSON(http.StatusOK, resp)
}
// @Summary Get client licenses
// @Description api for get client licenses
// @Tags CLUSTER
// @Accept json
// @Produce json
// @Param id path integer true "Cluster ID"
// @Success 200 {object} v1.ClientLicResp "StatusOK"
// @Failure 400 {object} utils.HTTPError "StatusBadRequest"
// @Failure 404 {object} utils.HTTPError "StatusNotFound"
// @Failure 500 {object} utils.HTTPError "StatusInternalServerError"
// @Router /v1/clusters/{id}/license-clics [get]
func (c *Controller) GetClientLicenses(ctx *gin.Context) {
// 获取 client_license.lic 证书内容
// 命令获取方式: license_client license | sed '1,6d'
// 集群中,不管serverType是0还是1,licence-ca的license.lic证书肯定是一样的,因为lic证书是放在(license-config)configmap中的,所以一样。
var idReq IdReq // cluster id
if err := ctx.ShouldBindUri(&idReq); err != nil {
panic(err)
}
// 集群中,不管serverType是0还是1,licence-ca的license.lic证书肯定是一样的,因为lic证书是放在(license-config)configmap中的,所以一样。
// 所以不需要单独获取 license serverType 了
//var serverTypeReq LicenseServerTypeReq
//if err := ctx.ShouldBind(&serverTypeReq); err != nil {
// panic(err)
//}
cluster, err := api.GetClusterById(idReq.Id)
if err != nil {
panic(err)
}
// get the k8s master nodes info
k8sClusterInfo, err := getClusterIp(cluster.Config)
if err != nil {
panic(err)
}
var caCtl *license.CACtl
if k8sClusterInfo.MasterTotal >= 3 { // when cluster mode
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
slaveCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master2IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, slaveCAUrl) // get license-ca client
if err != nil {
panic(err)
}
} else {
masterCAUrl := utils.GetLicenseCaUrl(k8sClusterInfo.Master1IP)
caCtl, err = license.NewServiceCtl(masterCAUrl, masterCAUrl) // get license-ca client
if err != nil {
panic(err)
}
}
clientLicResp, err := caCtl.GetClientLics(0)
if err != nil {
panic(err)
}
resp := ClientLicResp{}
if err := utils.MarshalResponse(clientLicResp, &resp); err != nil {
panic(err)
}
c.Logger.Infof("Get client licenses by cluster_id %v", idReq.Id)
ctx.JSON(http.StatusOK, resp)
}
|
package client
import (
"net/http"
"github.com/farzadrastegar/simple-cab/gateway"
"github.com/spf13/viper"
)
type RequestService interface {
ExecuteRequest(req *http.Request) (*http.Response, error)
}
// Client represents a client to connect to the HTTP server.
type Client struct {
cabService CabService
Handler *Handler
}
// NewClient returns a new instance of Client.
func NewClient() *Client {
// Read CheckZombieStatus service's address and port.
localServerAddr = viper.GetString("servers.zombie_driver.address")
localServerPort = viper.GetString("servers.zombie_driver.port")
c := &Client{
Handler: NewHandler(),
}
//c.cabService.client = c
c.cabService.handler = &c.Handler
return c
}
// Connect returns the cabservice from client.
func (c *Client) Connect() gateway.CabService {
return &c.cabService
}
|
package gitreceive
import (
"fmt"
)
const (
slugTGZName = "slug.tgz"
// CacheKeyPattern is the template for location cache dirs.
CacheKeyPattern = "home/%s/cache"
// GitKeyPattern is the template for storing git key files.
GitKeyPattern = "home/%s:git-%s"
)
// SlugBuilderInfo contains all of the object storage related information needed to pass to a
// slug builder.
type SlugBuilderInfo struct {
pushKey string
tarKey string
cacheKey string
disableCaching bool
}
// NewSlugBuilderInfo creates and populates a new SlugBuilderInfo based on the given data
func NewSlugBuilderInfo(appName string, shortSha string, disableCaching bool) *SlugBuilderInfo {
basePath := fmt.Sprintf(GitKeyPattern, appName, shortSha)
tarKey := fmt.Sprintf("%s/tar", basePath)
// this is where workflow tells slugrunner to download the slug from, so we have to tell slugbuilder to upload it to here
pushKey := fmt.Sprintf("%s/push", basePath)
cacheKey := fmt.Sprintf(CacheKeyPattern, appName)
return &SlugBuilderInfo{
pushKey: pushKey,
tarKey: tarKey,
cacheKey: cacheKey,
disableCaching: disableCaching,
}
}
// PushKey returns the object storage key that the slug builder will store the slug in.
// The returned value only contains the path to the folder, not including the final filename.
func (s SlugBuilderInfo) PushKey() string { return s.pushKey }
// TarKey returns the object storage key from which the slug builder will download for the tarball
// (from which it uses to build the slug). The returned value only contains the path to the
// folder, not including the final filename.
func (s SlugBuilderInfo) TarKey() string { return s.tarKey }
// CacheKey returns the object storage key that the slug builder will use to store the cache in
// it's application specific and persisted between deploys (doesn't contain git-sha)
func (s SlugBuilderInfo) CacheKey() string { return s.cacheKey }
// DisableCaching dictates whether or not the slugbuilder should persist the buildpack cache.
func (s SlugBuilderInfo) DisableCaching() bool { return s.disableCaching }
// AbsoluteSlugObjectKey returns the PushKey plus the final filename of the slug.
func (s SlugBuilderInfo) AbsoluteSlugObjectKey() string { return s.PushKey() + "/" + slugTGZName }
// AbsoluteProcfileKey returns the PushKey plus the standard procfile name.
func (s SlugBuilderInfo) AbsoluteProcfileKey() string { return s.PushKey() + "/Procfile" }
|
package autotag
import (
"fmt"
"testing"
"time"
"github.com/gogits/git-module"
)
func newRepo(t *testing.T, preName, preLayout string, prefix bool) GitRepo {
path := createTestRepo(t)
repo, err := git.OpenRepository(path)
checkFatal(t, err)
seedTestRepoPrefixToggle(t, repo, prefix)
r, err := NewRepo(GitRepoConfig{
RepoPath: repo.Path,
Branch: "master",
PreReleaseName: preName,
PreReleaseTimestampLayout: preLayout,
Prefix: prefix,
})
if err != nil {
t.Fatal("Error creating repo", err)
}
return *r
}
func newRepoWithPreReleasedTag(t *testing.T, prefix bool) GitRepo {
path := createTestRepo(t)
repo, err := git.OpenRepository(path)
checkFatal(t, err)
seedTestRepoPrefixToggle(t, repo, prefix)
if prefix {
makeTag(repo, "v1.0.2-pre")
} else {
makeTag(repo, "1.0.2-pre")
}
r, err := NewRepo(GitRepoConfig{RepoPath: repo.Path, Branch: "master", Prefix: prefix})
if err != nil {
t.Fatal("Error creating repo", err)
}
return *r
}
func TestBumpers(t *testing.T) {
r := newRepo(t, "", "", true)
defer cleanupTestRepo(t, r.repo)
majorTag(t, r.repo)
v, err := r.MajorBump()
if err != nil {
t.Fatal("MajorBump failed: ", err)
}
if v.String() != "2.0.0" {
t.Fatalf("MajorBump failed expected '2.0.1' got '%s' ", v)
}
fmt.Printf("Major is now %s\n", v)
}
func TestMinor(t *testing.T) {
r := newRepo(t, "", "", true)
defer cleanupTestRepo(t, r.repo)
majorTag(t, r.repo)
v, err := r.MinorBump()
if err != nil {
t.Fatal("MinorBump failed: ", err)
}
if v.String() != "1.1.0" {
t.Fatalf("MinorBump failed expected '1.1.0' got '%s' \n", v)
}
}
func TestPatch(t *testing.T) {
r := newRepo(t, "", "", true)
defer cleanupTestRepo(t, r.repo)
majorTag(t, r.repo)
v, err := r.PatchBump()
if err != nil {
t.Fatal("PatchBump failed: ", err)
}
if v.String() != "1.0.2" {
t.Fatalf("PatchBump failed expected '1.0.2' got '%s' \n", v)
}
}
func TestAutoTag(t *testing.T) {
expected := []string{"v1.0.2", "v1.0.1"}
test := "TestAutoTag"
tags := prepareRepository(t, true)
if !compareValues(expected, tags) {
t.Fatalf("%s expected '%+v' got '%+v'\n", test, expected, tags)
}
}
func TestAutoTagNoPrefix(t *testing.T) {
expected := []string{"1.0.2", "1.0.1"}
test := "TestAutoTagNoPrefix"
tags := prepareRepository(t, false)
if !compareValues(expected, tags) {
t.Fatalf("%s expected '%+v' got '%+v'\n", test, expected, tags)
}
}
func TestAutoTagCommits(t *testing.T) {
tags := prepareRepositoryMajor(t, true)
expect := []string{"v2.0.0", "v1.0.1"}
test := "TestAutoTagCommits"
if !compareValues(expect, tags) {
t.Fatalf("%s expected '%+v' got '%+v'\n", test, expect, tags)
}
}
func prepareRepositoryMajor(t *testing.T, prefix bool) []string {
r := newRepoMajorPrefixToggle(t, prefix)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
return tags
}
func TestAutoTagCommitsNoPrefix(t *testing.T) {
tags := prepareRepositoryMajor(t, false)
expect := []string{"2.0.0", "1.0.1"}
test := "TestAutoTagCommitsNoPrefix"
if !compareValues(expect, tags) {
t.Fatalf("%s expected '%+v' got '%+v'\n", test, expect, tags)
}
}
func TestAutoTagWithPreReleasedTag(t *testing.T) {
tags := prepareRepositoryPreReleasedTag(t, true)
expect := []string{"v1.0.2-pre", "v1.0.2", "v1.0.1"}
test := "TestAutoTagWithPreReleasedTag"
if !compareValues(expect, tags) {
t.Fatalf("%s expected '%+v' got '%+v'\n", test, expect, tags)
}
}
func TestAutoTagWithPreReleasedTagNoPrefix(t *testing.T) {
tags := prepareRepositoryPreReleasedTag(t, false)
test := "TestAutoTagWithPreReleasedTag"
expect := []string{"1.0.2-pre", "1.0.2", "1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("%s expected '%+v' got '%+v'\n", test, expect, tags)
}
}
func TestAutoTagWithPreReleaseName(t *testing.T) {
r := newRepo(t, "test", "", true)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{"v1.0.2-test", "v1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("TestAutoTagWithPreReleaseName expected '%+v' got '%+v'\n", expect, tags)
}
}
func TestAutoTagWithPreReleaseNameNoPrefix(t *testing.T) {
r := newRepo(t, "test", "", false)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{"1.0.2-test", "1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("TestAutoTagWithPreReleaseNameNoPrefix expected '%+v' got '%+v'\n", expect, tags)
}
}
func TestAutoTagWithPreReleaseTimestampLayout_Epoch(t *testing.T) {
r := newRepo(t, "", "epoch", true)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
timeNow := time.Now().UTC()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{fmt.Sprintf("v1.0.2-%d", timeNow.Unix()), "v1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("TestAutoTagWithPreReleaseTimestampLayout_Epoch expected '%+v' got '%+v'\n", expect, tags)
}
}
func TestAutoTagWithPreReleaseTimestampLayout_EpochNoPrefix(t *testing.T) {
r := newRepo(t, "", "epoch", false)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
timeNow := time.Now().UTC()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{fmt.Sprintf("1.0.2-%d", timeNow.Unix()), "1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("TestAutoTagWithPreReleaseTimestampLayout_EpochNoPrefix expected '%+v' got '%+v'\n", expect, tags)
}
}
const testDatetimeLayout = "20060102150405"
func TestAutoTagWithPreReleaseTimestampLayout_Datetime(t *testing.T) {
r := newRepo(t, "", testDatetimeLayout, true)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
timeNow := time.Now().UTC()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{fmt.Sprintf("v1.0.2-%s", timeNow.Format(testDatetimeLayout)), "v1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("AutoBump expected '%+v' got '%+v'\n", expect, tags)
}
}
func TestAutoTagWithPreReleaseTimestampLayout_DatetimeNoPrefix(t *testing.T) {
r := newRepo(t, "", testDatetimeLayout, false)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
timeNow := time.Now().UTC()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{fmt.Sprintf("1.0.2-%s", timeNow.Format(testDatetimeLayout)), "1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("AutoBump expected '%+v' got '%+v'\n", expect, tags)
}
}
func TestAutoTagWithPreReleaseNameAndPreReleaseTimestampLayout(t *testing.T) {
r := newRepo(t, "test", "epoch", true)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
timeNow := time.Now().UTC()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{fmt.Sprintf("v1.0.2-test.%d", timeNow.Unix()), "v1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("TestAutoTagWithPreReleaseNameAndPreReleaseTimestampLayout expected '%+v' got '%+v'\n", expect, tags)
}
}
func TestAutoTagWithPreReleaseNameAndPreReleaseTimestampLayoutNoPrefix(t *testing.T) {
r := newRepo(t, "test", "epoch", false)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
timeNow := time.Now().UTC()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
expect := []string{fmt.Sprintf("1.0.2-test.%d", timeNow.Unix()), "1.0.1"}
if !compareValues(expect, tags) {
t.Fatalf("TestAutoTagWithPreReleaseNameAndPreReleaseTimestampLayoutNoPrefix expected '%+v' got '%+v'\n", expect, tags)
}
}
func compareValues(expect []string, tags []string) bool {
found := true
for _, val := range expect {
found = found && hasValue(tags, val)
}
return found
}
func hasValue(tags []string, value string) bool {
for _, tag := range tags {
if tag == value {
return true
}
}
return false
}
func prepareRepository(t *testing.T, prefix bool) []string {
r := newRepo(t, "", "", prefix)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
return tags
}
func prepareRepositoryPreReleasedTag(t *testing.T, prefix bool) []string {
r := newRepoWithPreReleasedTag(t, prefix)
defer cleanupTestRepo(t, r.repo)
err := r.AutoTag()
if err != nil {
t.Fatal("AutoTag failed ", err)
}
tags, err := r.repo.GetTags()
checkFatal(t, err)
return tags
}
|
package backend
import "github.com/anabiozz/yotunheim/backend/common/datastore"
const (
// Counter ...
Counter = "counter"
// Gauge ...
Gauge
// Untyped ...
Untyped
// Summary ...
Summary
// Histogram ...
Histogram = "histogram"
// Table ...
Table = "table"
)
// Accumulator ...
type Accumulator interface {
AddMetric(datastore.InfluxMetrics)
AddMetrics(datastore.Response)
}
|
package main
import "fmt"
// Complete the alternatingCharacters function below.
func alternatingCharacters(s string) int32 {
count := int32(0)
last := s[0]
for i := 1; i < len(s); i++ {
if last == s[i] {
count++
}
last = s[i]
}
return count
}
func main() {
var input string
var output int32
input = "AAAA"
output = alternatingCharacters(input)
fmt.Println(input, output)
input = "BBBBB"
output = alternatingCharacters(input)
fmt.Println(input, output)
input = "ABABABAB"
output = alternatingCharacters(input)
fmt.Println(input, output)
input = "BABABA"
output = alternatingCharacters(input)
fmt.Println(input, output)
input = "AAABBB"
output = alternatingCharacters(input)
fmt.Println(input, output)
}
|
package model
type OrderItem struct {
HotelId int64 `json:"hotel,omitempty"`
OrderItemId int64 `json:"id,omitempty"`
OrderId int `json:"pedido,omitempty"`
ProductId int `json:"id_produto,omitempty"`
Product Product `json:"produto,omitempty"`
UserId int `json:"usuario,omitempty"`
Sequence int64 `json:"sequencia,omitempty"`
EntryDateTime string `json:"data_lancto,omitempty"`
DeliveryDateTime string `json:"data_entrega,omitempty"`
Quantity float64 `json:"quantidade,omitempty"`
UnitValue float64 `json:"valor_unitario,omitempty"`
TotalValue float64 `json:"valor_total,omitempty"`
ServiceTax float64 `json:"taxa_servico,omitempty"`
ServiceTaxValue float64 `json:"valor_taxa_servico,omitempty"`
GeneralTotalValue float64 `json:"valor_total_geral,omitempty"`
Canceled string `json:"cancelado,omitempty"`
PreparingTime int64 `json:"tempo_preparo,omitempty"`
Comment string `json:"observacao,omitempty"`
Printed string `json:"impresso,omitempty"`
Discount float64 `json:"desconto,omitempty"`
DiscoultValue float64 `json:"valor_desconto,omitempty"`
Accrual float64 `json:"acrescimo,omitempty"`
AccrualValue float64 `json:"valor_acrescimo,omitempty"`
}
|
// Copyright (c) Mainflux
// SPDX-License-Identifier: Apache-2.0
package http
import "github.com/mainflux/mainflux/things"
type identifyReq struct {
Token string `json:"token"`
}
func (req identifyReq) validate() error {
if req.Token == "" {
return things.ErrUnauthorizedAccess
}
return nil
}
type canAccessByKeyReq struct {
chanID string
Token string `json:"token"`
}
func (req canAccessByKeyReq) validate() error {
if req.Token == "" || req.chanID == "" {
return things.ErrUnauthorizedAccess
}
return nil
}
type canAccessByIDReq struct {
chanID string
ThingID string `json:"thing_id"`
}
func (req canAccessByIDReq) validate() error {
if req.ThingID == "" || req.chanID == "" {
return things.ErrUnauthorizedAccess
}
return nil
}
|
package concepts
import (
"encoding/json"
"errors"
"fmt"
"net/http"
fthealth "github.com/Financial-Times/go-fthealth/v1_1"
)
const conceptSearchQueryParam = "ids"
var (
// ErrNoConceptsToSearch indicates the provided uuids array was empty
ErrNoConceptsToSearch = errors.New("no concept ids to search for")
// ErrConceptIDsAreEmpty indicates the provided uuids array only contained empty string
ErrConceptIDsAreEmpty = errors.New("provided concept ids are empty")
)
type Search interface {
ByIDs(tid string, uuids ...string) (map[string]Concept, error)
Check() fthealth.Check
}
type conceptSearchAPI struct {
client *http.Client
uri string
}
type conceptSearchResponse struct {
Concepts []Concept `json:"concepts"`
}
func NewSearch(client *http.Client, uri string) Search {
return &conceptSearchAPI{client: client, uri: uri}
}
func (c *conceptSearchAPI) ByIDs(tid string, uuids ...string) (map[string]Concept, error) {
if err := validateIDs(uuids); err != nil {
return nil, err
}
req, err := http.NewRequest("GET", c.uri+"/concepts", nil)
if err != nil {
return nil, err
}
queryParams := req.URL.Query()
for _, uuid := range uuids {
queryParams.Add(conceptSearchQueryParam, uuid)
}
req.URL.RawQuery = queryParams.Encode()
stampRequest(req, tid)
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, decodeResponseError(resp)
}
searchResp := conceptSearchResponse{}
dec := json.NewDecoder(resp.Body)
err = dec.Decode(&searchResp)
if err != nil {
return nil, err
}
concepts := make(map[string]Concept)
for _, c := range searchResp.Concepts {
if uuid, ok := stripThingPrefix(c.ID); ok {
concepts[uuid] = c
}
}
return concepts, nil
}
func stampRequest(req *http.Request, tid string) {
req.Header.Add("User-Agent", "UPP internal-concordances")
req.Header.Add("X-Request-Id", tid)
}
func validateIDs(ids []string) error {
if len(ids) == 0 {
return ErrNoConceptsToSearch
}
for _, v := range ids {
if v != "" {
return nil
}
}
return ErrConceptIDsAreEmpty
}
func (c *conceptSearchAPI) Check() fthealth.Check {
return fthealth.Check{
ID: "concept-search-api",
BusinessImpact: "Concept information can not be returned to clients",
Name: "Concept Search API Healthcheck",
PanicGuide: "https://runbooks.in.ft.com/internal-concordances",
Severity: 2,
TechnicalSummary: "Concept Search API is not available",
Checker: c.gtg,
}
}
func (c *conceptSearchAPI) gtg() (string, error) {
req, err := http.NewRequest("GET", c.uri+"/__gtg", nil)
if err != nil {
return "", err
}
req.Header.Add("User-Agent", "UPP internal-concordances")
resp, err := c.client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("GTG returned a non-200 HTTP status: %v", resp.StatusCode)
}
return "Concept Search API is good to go", nil
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package patch
import (
"reflect"
"testing"
"github.com/DataDog/datadog-operator/apis/datadoghq/v1alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/google/go-cmp/cmp"
)
func TestDatadogAgent(t *testing.T) {
tests := []struct {
name string
da *v1alpha1.DatadogAgent
want *v1alpha1.DatadogAgent
wantPatched bool
}{
{
name: "nothing to patch",
da: &v1alpha1.DatadogAgent{},
want: &v1alpha1.DatadogAgent{},
wantPatched: false,
},
{
name: "patch logCollection",
da: &v1alpha1.DatadogAgent{
Spec: v1alpha1.DatadogAgentSpec{
Agent: v1alpha1.DatadogAgentSpecAgentSpec{
Log: &v1alpha1.LogCollectionConfig{
Enabled: apiutils.NewBoolPointer(true),
},
},
},
},
want: &v1alpha1.DatadogAgent{
Spec: v1alpha1.DatadogAgentSpec{
Features: v1alpha1.DatadogFeatures{
LogCollection: &v1alpha1.LogCollectionConfig{
Enabled: apiutils.NewBoolPointer(true),
},
},
Agent: v1alpha1.DatadogAgentSpecAgentSpec{
Log: &v1alpha1.LogCollectionConfig{
Enabled: apiutils.NewBoolPointer(true),
},
},
},
},
wantPatched: true,
},
{
name: "don't patch existing LogCollection",
da: &v1alpha1.DatadogAgent{
Spec: v1alpha1.DatadogAgentSpec{
Features: v1alpha1.DatadogFeatures{
LogCollection: &v1alpha1.LogCollectionConfig{
Enabled: apiutils.NewBoolPointer(false),
},
},
Agent: v1alpha1.DatadogAgentSpecAgentSpec{
Log: &v1alpha1.LogCollectionConfig{
Enabled: apiutils.NewBoolPointer(true),
},
},
},
},
want: &v1alpha1.DatadogAgent{
Spec: v1alpha1.DatadogAgentSpec{
Features: v1alpha1.DatadogFeatures{
LogCollection: &v1alpha1.LogCollectionConfig{
Enabled: apiutils.NewBoolPointer(false),
},
},
Agent: v1alpha1.DatadogAgentSpecAgentSpec{
Log: &v1alpha1.LogCollectionConfig{
Enabled: apiutils.NewBoolPointer(true),
},
},
},
},
wantPatched: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, got1 := CopyAndPatchDatadogAgent(tt.da)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("DatadogAgent() %s", cmp.Diff(got, tt.want))
}
if got1 != tt.wantPatched {
t.Errorf("DatadogAgent() got1 = %v, want %v", got1, tt.wantPatched)
}
})
}
}
|
// Copyright 2020 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package util
import (
"bufio"
"context"
"encoding/json"
"io/ioutil"
"math"
"os"
"regexp"
"strconv"
"strings"
"chromiumos/tast/common/testexec"
"chromiumos/tast/errors"
"chromiumos/tast/testing"
)
var devRegExp = regexp.MustCompile(`(sda|nvme\dn\d|mmcblk\d)$`)
// Blockdevice represents information about a single storage device as reported by lsblk.
type Blockdevice struct {
Name string `json:"name"`
Type string `json:"type"`
Hotplug bool `json:"hotplug"`
Size int64 `json:"size"`
State string `json:"state"`
}
// DiskInfo is a serializable structure representing output of lsblk command.
type DiskInfo struct {
Blockdevices []*Blockdevice `json:"blockdevices"`
}
// MainDevice returns the main storage device from a list of available devices.
// The method returns the device with the biggest size if multiple present.
func (d DiskInfo) MainDevice() (*Blockdevice, error) {
var bestMatch *Blockdevice
for _, device := range d.Blockdevices {
if bestMatch == nil || bestMatch.Size < device.Size {
bestMatch = device
}
}
if bestMatch == nil {
return nil, errors.Errorf("unable to identify main storage device from devices: %+v", d)
}
return bestMatch, nil
}
// DeviceCount returns number of found valid block devices on the system.
func (d DiskInfo) DeviceCount() int {
return len(d.Blockdevices)
}
// CheckMainDeviceSize verifies that the size of the main storage disk is more than
// the given minimal size. Otherwise, an error is returned.
func (d DiskInfo) CheckMainDeviceSize(minSize int64) error {
device, err := d.MainDevice()
if err != nil {
return errors.Wrap(err, "failed getting main storage disk")
}
if device.Size < minSize {
return errors.Errorf("main storage device size too small: %v", device.Size)
}
return nil
}
// SaveDiskInfo dumps disk info to an external file with a given file name.
// The information is saved in JSON format.
func (d DiskInfo) SaveDiskInfo(fileName string) error {
file, err := json.MarshalIndent(d, "", " ")
if err != nil {
return errors.Wrap(err, "failed marshalling disk info to JSON")
}
err = ioutil.WriteFile(fileName, file, 0644)
if err != nil {
return errors.Wrap(err, "failed saving disk info to file")
}
return nil
}
// SizeInGB returns size of the main block device in whole GB's.
func (d DiskInfo) SizeInGB() (int, error) {
device, err := d.MainDevice()
if err != nil {
return 0, errors.Wrap(err, "failed getting main storage disk")
}
return int(math.Round(float64(device.Size) / 1e9)), nil
}
// IsEMMC returns whether the device is an eMMC device.
func IsEMMC(testPath string) bool {
return strings.Contains(testPath, "mmcblk")
}
// IsNVME returns whether the device is an NVMe device.
func IsNVME(testPath string) bool {
return strings.Contains(testPath, "nvme")
}
// IsUFS returns whether the device is a UFS device.
func IsUFS(testPath string) bool {
if !strings.Contains(testPath, "/dev/sd") {
return false
}
// Extract "sdX" part out of the testPath.
dev := strings.Split(testPath, "/")[2][0:3]
// UFS device should have a unit descriptor directory in sysfs.
target := "/sys/block/" + dev + "/device/unit_descriptor"
if _, err := os.Stat(target); os.IsNotExist(err) {
return false
}
return true
}
// GetNVMEIdNSFeature returns the feature value for the NVMe disk using
// nvme id-ns.
func GetNVMEIdNSFeature(ctx context.Context, diskPath, feature string) (string, error) {
cmd := "nvme id-ns -n 1 " + diskPath + " | grep " + feature
out, err := testexec.CommandContext(ctx, "sh", "-c", cmd).Output(testexec.DumpLogOnError)
if err != nil {
return "0", errors.Wrap(err, "failed to read NVMe id-ns feature")
}
value := string(out)
return strings.TrimSpace(strings.Split(strings.TrimSpace(value), ":")[1]), nil
}
// PartitionSize return size (in bytes) of given disk partition.
func PartitionSize(ctx context.Context, partition string) (uint64, error) {
devNames := strings.Split(partition, "/")
partitionDevName := devNames[len(devNames)-1]
f, err := os.Open("/proc/partitions")
if err != nil {
return 0, errors.Wrap(err, "failed to open /proc/partitions file")
}
defer f.Close()
scanner := bufio.NewScanner(f)
re := regexp.MustCompile(`\s+`)
var blocksStr string
for scanner.Scan() {
line := scanner.Text()
if strings.HasSuffix(line, partitionDevName) {
blocksStr = re.Split(strings.TrimSpace(line), -1)[2]
break
}
}
if err := scanner.Err(); err != nil {
return 0, errors.Wrap(err, "failed to read disk partitions file")
}
if len(blocksStr) == 0 {
return 0, errors.Wrapf(err, "partition %s not found in partitions file", partitionDevName)
}
blocks, err := strconv.ParseFloat(blocksStr, 64)
if err != nil {
return 0, errors.Wrapf(err, "failed parsing size of partition: %s", partition)
}
return uint64(blocks) * 1024, nil
}
// RootPartitionForTest returns root partition to use for the tests.
func RootPartitionForTest(ctx context.Context) (string, error) {
diskName, err := fixedDstDrive(ctx)
if err != nil {
return "", errors.Wrap(err, "failed selecting free root partition")
}
rootDev, err := rootDevice(ctx)
if err != nil {
return "", errors.Wrap(err, "failed selecting free root device")
}
testing.ContextLog(ctx, "Diskname: ", diskName, ", root: ", rootDev)
if diskName == rootDev {
freeRootPart, err := freeRootPartition(ctx)
if err != nil {
return "", errors.Wrap(err, "failed selecting free root partition")
}
return freeRootPart, nil
}
return diskName, nil
}
func fixedDstDrive(ctx context.Context) (string, error) {
// Reading fixed drive device name as reported by ChromeOS test system scripts.
const command = ". /usr/sbin/write_gpt.sh;. /usr/share/misc/chromeos-common.sh;load_base_vars;get_fixed_dst_drive"
out, err := testexec.CommandContext(ctx, "sh", "-c", command).Output(testexec.DumpLogOnError)
if err != nil {
return "", errors.Wrap(err, "failed to read fixed DST drive info")
}
return strings.TrimSpace(string(out)), nil
}
func rootDevice(ctx context.Context) (string, error) {
out, err := testexec.CommandContext(ctx, "rootdev", "-s", "-d").Output(testexec.DumpLogOnError)
if err != nil {
return "", errors.Wrap(err, "failed to read root device info")
}
return strings.TrimSpace(string(out)), nil
}
func rootDevicePartitionName(ctx context.Context) (string, error) {
out, err := testexec.CommandContext(ctx, "rootdev", "-s").Output(testexec.DumpLogOnError)
if err != nil {
return "", errors.Wrap(err, "failed to read root device parition name info")
}
return strings.TrimSpace(string(out)), nil
}
func freeRootPartition(ctx context.Context) (string, error) {
partition, err := rootDevicePartitionName(ctx)
if err != nil {
return "", errors.Wrap(err, "failed to read root partition info")
}
if len(partition) == 0 {
return "", errors.New("error reading root partition info")
}
// For main storage device, this is the mapping of main root to free root partitions,
// i.e. free partition is /dev/nvme0n1p5 for the root partition /dev/nvme0n1p3.
partitionIndex := partition[len(partition)-1:]
if partitionIndex != "3" && partitionIndex != "5" {
return "", errors.Errorf("invalid index of root parition: %s", partitionIndex)
}
spareRootMap := map[string]string{"3": "5", "5": "3"}
return partition[:len(partition)-1] + spareRootMap[partitionIndex], nil
}
// ReadDiskInfo returns storage information as reported by lsblk tool.
// Only disk devices are returns.
func ReadDiskInfo(ctx context.Context) (*DiskInfo, error) {
cmd := testexec.CommandContext(ctx, "lsblk", "-b", "-d", "-J", "-o", "NAME,TYPE,HOTPLUG,SIZE,STATE")
out, err := cmd.Output(testexec.DumpLogOnError)
if err != nil {
return nil, errors.Wrap(err, "failed to run lsblk")
}
diskInfo, err := parseDiskInfo(out)
if err != nil {
return nil, err
}
return removeDisallowedDevices(diskInfo), nil
}
func parseDiskInfo(out []byte) (*DiskInfo, error) {
var result DiskInfo
// TODO(dlunev): make sure the format is the same for all kernel versions.
if err := json.Unmarshal(out, &result); err != nil {
return nil, errors.Wrap(err, "failed to parse lsblk result")
}
return &result, nil
}
// removeDisallowedDevices filters out devices which are not matching the regexp
// or are not disks
// TODO(dlunev): We should consider mmc devices only if they are 'root' devices
// for there is no reliable way to differentiate removable mmc.
func removeDisallowedDevices(diskInfo *DiskInfo) *DiskInfo {
var devices []*Blockdevice
for _, device := range diskInfo.Blockdevices {
if device.Type == "disk" && devRegExp.MatchString(device.Name) {
devices = append(devices, device)
}
}
return &DiskInfo{Blockdevices: devices}
}
|
package templates
import (
"text/template"
)
func Parse(t string) (*template.Template, error) {
return template.New("").Funcs(numericFuncs()).Funcs(stringFuncs()).Parse(t)
}
|
package odoo
import (
"fmt"
)
// CalendarEvent represents calendar.event model.
type CalendarEvent struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
Active *Bool `xmlrpc:"active,omptempty"`
ActivityIds *Relation `xmlrpc:"activity_ids,omptempty"`
AlarmIds *Relation `xmlrpc:"alarm_ids,omptempty"`
Allday *Bool `xmlrpc:"allday,omptempty"`
AttendeeIds *Relation `xmlrpc:"attendee_ids,omptempty"`
AttendeeStatus *Selection `xmlrpc:"attendee_status,omptempty"`
Byday *Selection `xmlrpc:"byday,omptempty"`
CategIds *Relation `xmlrpc:"categ_ids,omptempty"`
Count *Int `xmlrpc:"count,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
Day *Int `xmlrpc:"day,omptempty"`
Description *String `xmlrpc:"description,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
DisplayStart *String `xmlrpc:"display_start,omptempty"`
DisplayTime *String `xmlrpc:"display_time,omptempty"`
Duration *Float `xmlrpc:"duration,omptempty"`
EndType *Selection `xmlrpc:"end_type,omptempty"`
FinalDate *Time `xmlrpc:"final_date,omptempty"`
Fr *Bool `xmlrpc:"fr,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
Interval *Int `xmlrpc:"interval,omptempty"`
IsAttendee *Bool `xmlrpc:"is_attendee,omptempty"`
IsHighlighted *Bool `xmlrpc:"is_highlighted,omptempty"`
Location *String `xmlrpc:"location,omptempty"`
MessageChannelIds *Relation `xmlrpc:"message_channel_ids,omptempty"`
MessageFollowerIds *Relation `xmlrpc:"message_follower_ids,omptempty"`
MessageIds *Relation `xmlrpc:"message_ids,omptempty"`
MessageIsFollower *Bool `xmlrpc:"message_is_follower,omptempty"`
MessageLastPost *Time `xmlrpc:"message_last_post,omptempty"`
MessageNeedaction *Bool `xmlrpc:"message_needaction,omptempty"`
MessageNeedactionCounter *Int `xmlrpc:"message_needaction_counter,omptempty"`
MessagePartnerIds *Relation `xmlrpc:"message_partner_ids,omptempty"`
MessageUnread *Bool `xmlrpc:"message_unread,omptempty"`
MessageUnreadCounter *Int `xmlrpc:"message_unread_counter,omptempty"`
Mo *Bool `xmlrpc:"mo,omptempty"`
MonthBy *Selection `xmlrpc:"month_by,omptempty"`
Name *String `xmlrpc:"name,omptempty"`
OpportunityId *Many2One `xmlrpc:"opportunity_id,omptempty"`
PartnerId *Many2One `xmlrpc:"partner_id,omptempty"`
PartnerIds *Relation `xmlrpc:"partner_ids,omptempty"`
Privacy *Selection `xmlrpc:"privacy,omptempty"`
Recurrency *Bool `xmlrpc:"recurrency,omptempty"`
RecurrentId *Int `xmlrpc:"recurrent_id,omptempty"`
RecurrentIdDate *Time `xmlrpc:"recurrent_id_date,omptempty"`
ResId *Int `xmlrpc:"res_id,omptempty"`
ResModel *String `xmlrpc:"res_model,omptempty"`
ResModelId *Many2One `xmlrpc:"res_model_id,omptempty"`
Rrule *String `xmlrpc:"rrule,omptempty"`
RruleType *Selection `xmlrpc:"rrule_type,omptempty"`
Sa *Bool `xmlrpc:"sa,omptempty"`
ShowAs *Selection `xmlrpc:"show_as,omptempty"`
Start *Time `xmlrpc:"start,omptempty"`
StartDate *Time `xmlrpc:"start_date,omptempty"`
StartDatetime *Time `xmlrpc:"start_datetime,omptempty"`
State *Selection `xmlrpc:"state,omptempty"`
Stop *Time `xmlrpc:"stop,omptempty"`
StopDate *Time `xmlrpc:"stop_date,omptempty"`
StopDatetime *Time `xmlrpc:"stop_datetime,omptempty"`
Su *Bool `xmlrpc:"su,omptempty"`
Th *Bool `xmlrpc:"th,omptempty"`
Tu *Bool `xmlrpc:"tu,omptempty"`
UserId *Many2One `xmlrpc:"user_id,omptempty"`
We *Bool `xmlrpc:"we,omptempty"`
WebsiteMessageIds *Relation `xmlrpc:"website_message_ids,omptempty"`
WeekList *Selection `xmlrpc:"week_list,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// CalendarEvents represents array of calendar.event model.
type CalendarEvents []CalendarEvent
// CalendarEventModel is the odoo model name.
const CalendarEventModel = "calendar.event"
// Many2One convert CalendarEvent to *Many2One.
func (ce *CalendarEvent) Many2One() *Many2One {
return NewMany2One(ce.Id.Get(), "")
}
// CreateCalendarEvent creates a new calendar.event model and returns its id.
func (c *Client) CreateCalendarEvent(ce *CalendarEvent) (int64, error) {
ids, err := c.CreateCalendarEvents([]*CalendarEvent{ce})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateCalendarEvent creates a new calendar.event model and returns its id.
func (c *Client) CreateCalendarEvents(ces []*CalendarEvent) ([]int64, error) {
var vv []interface{}
for _, v := range ces {
vv = append(vv, v)
}
return c.Create(CalendarEventModel, vv)
}
// UpdateCalendarEvent updates an existing calendar.event record.
func (c *Client) UpdateCalendarEvent(ce *CalendarEvent) error {
return c.UpdateCalendarEvents([]int64{ce.Id.Get()}, ce)
}
// UpdateCalendarEvents updates existing calendar.event records.
// All records (represented by ids) will be updated by ce values.
func (c *Client) UpdateCalendarEvents(ids []int64, ce *CalendarEvent) error {
return c.Update(CalendarEventModel, ids, ce)
}
// DeleteCalendarEvent deletes an existing calendar.event record.
func (c *Client) DeleteCalendarEvent(id int64) error {
return c.DeleteCalendarEvents([]int64{id})
}
// DeleteCalendarEvents deletes existing calendar.event records.
func (c *Client) DeleteCalendarEvents(ids []int64) error {
return c.Delete(CalendarEventModel, ids)
}
// GetCalendarEvent gets calendar.event existing record.
func (c *Client) GetCalendarEvent(id int64) (*CalendarEvent, error) {
ces, err := c.GetCalendarEvents([]int64{id})
if err != nil {
return nil, err
}
if ces != nil && len(*ces) > 0 {
return &((*ces)[0]), nil
}
return nil, fmt.Errorf("id %v of calendar.event not found", id)
}
// GetCalendarEvents gets calendar.event existing records.
func (c *Client) GetCalendarEvents(ids []int64) (*CalendarEvents, error) {
ces := &CalendarEvents{}
if err := c.Read(CalendarEventModel, ids, nil, ces); err != nil {
return nil, err
}
return ces, nil
}
// FindCalendarEvent finds calendar.event record by querying it with criteria.
func (c *Client) FindCalendarEvent(criteria *Criteria) (*CalendarEvent, error) {
ces := &CalendarEvents{}
if err := c.SearchRead(CalendarEventModel, criteria, NewOptions().Limit(1), ces); err != nil {
return nil, err
}
if ces != nil && len(*ces) > 0 {
return &((*ces)[0]), nil
}
return nil, fmt.Errorf("calendar.event was not found with criteria %v", criteria)
}
// FindCalendarEvents finds calendar.event records by querying it
// and filtering it with criteria and options.
func (c *Client) FindCalendarEvents(criteria *Criteria, options *Options) (*CalendarEvents, error) {
ces := &CalendarEvents{}
if err := c.SearchRead(CalendarEventModel, criteria, options, ces); err != nil {
return nil, err
}
return ces, nil
}
// FindCalendarEventIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindCalendarEventIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(CalendarEventModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindCalendarEventId finds record id by querying it with criteria.
func (c *Client) FindCalendarEventId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(CalendarEventModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("calendar.event was not found with criteria %v and options %v", criteria, options)
}
|
// Copyright 2017 The Cockroach Authors.
//
// Licensed as a CockroachDB Enterprise file under the Cockroach Community
// License (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt
package main
import (
"context"
"crypto/aes"
"encoding/binary"
"encoding/hex"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
"github.com/cockroachdb/cockroach/pkg/ccl/storageccl/engineccl/enginepbccl"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/protoutil"
"github.com/cockroachdb/errors"
)
const fileRegistryPath = "COCKROACHDB_REGISTRY"
const keyRegistryPath = "COCKROACHDB_DATA_KEYS"
const currentPath = "CURRENT"
const optionsPathGlob = "OPTIONS-*"
var dbDir = flag.String("db-dir", "", "path to the db directory")
var storeKeyPath = flag.String("store-key", "", "path to the active store key")
type fileEntry struct {
envType enginepb.EnvType
settings enginepbccl.EncryptionSettings
}
func (f fileEntry) String() string {
ret := fmt.Sprintf(" env type: %d, %s\n",
f.envType, f.settings.EncryptionType)
if f.settings.EncryptionType != enginepbccl.EncryptionType_Plaintext {
ret += fmt.Sprintf(" keyID: %s\n nonce: % x\n counter: %d\n",
f.settings.KeyId,
f.settings.Nonce,
f.settings.Counter)
}
return ret
}
type keyEntry struct {
encryptionType enginepbccl.EncryptionType
rawKey []byte
}
func (k keyEntry) String() string {
return fmt.Sprintf("%s len: %d", k.encryptionType, len(k.rawKey))
}
var fileRegistry = map[string]fileEntry{}
var keyRegistry = map[string]keyEntry{}
func loadFileRegistry() {
data, err := ioutil.ReadFile(filepath.Join(*dbDir, fileRegistryPath))
if err != nil {
log.Fatalf(context.Background(), "could not read %s: %v", fileRegistryPath, err)
}
var reg enginepb.FileRegistry
if err := protoutil.Unmarshal(data, ®); err != nil {
log.Fatalf(context.Background(), "could not unmarshal %s: %v", fileRegistryPath, err)
}
log.Infof(context.Background(), "file registry version: %s", reg.Version)
log.Infof(context.Background(), "file registry contains %d entries", len(reg.Files))
for name, entry := range reg.Files {
var encSettings enginepbccl.EncryptionSettings
settings := entry.EncryptionSettings
if err := protoutil.Unmarshal(settings, &encSettings); err != nil {
log.Fatalf(context.Background(), "could not unmarshal encryption setting for %s: %v", name, err)
}
fileRegistry[name] = fileEntry{entry.EnvType, encSettings}
log.Infof(context.Background(), " %-30s level: %-8s type: %-12s keyID: %s", name, entry.EnvType, encSettings.EncryptionType, encSettings.KeyId[:8])
}
}
func loadStoreKey() {
if len(*storeKeyPath) == 0 || *storeKeyPath == "plain" {
log.Infof(context.Background(), "no store key specified")
return
}
data, err := ioutil.ReadFile(*storeKeyPath)
if err != nil {
log.Fatalf(context.Background(), "could not read %s: %v", *storeKeyPath, err)
}
var k keyEntry
switch len(data) {
case 48:
k.encryptionType = enginepbccl.EncryptionType_AES128_CTR
case 56:
k.encryptionType = enginepbccl.EncryptionType_AES192_CTR
case 64:
k.encryptionType = enginepbccl.EncryptionType_AES256_CTR
default:
log.Fatalf(context.Background(), "wrong key length %d, want 32 bytes + AES length", len(data))
}
// Hexadecimal representation of the first 32 bytes.
id := hex.EncodeToString(data[0:32])
// Raw key is the rest.
k.rawKey = data[32:]
keyRegistry[id] = k
log.Infof(context.Background(), "store key: %s", k)
}
func loadKeyRegistry() {
data, err := readFile(keyRegistryPath)
if err != nil {
log.Fatalf(context.Background(), "could not read %s: %v", keyRegistryPath, err)
}
var reg enginepbccl.DataKeysRegistry
if err := protoutil.Unmarshal(data, ®); err != nil {
log.Fatalf(context.Background(), "could not unmarshal %s: %v", keyRegistryPath, err)
}
log.Infof(context.Background(), "key registry contains %d store keys(s) and %d data key(s)",
len(reg.StoreKeys), len(reg.DataKeys))
for _, e := range reg.StoreKeys {
log.Infof(context.Background(), " store key: type: %-12s %v", e.EncryptionType, e)
}
for _, e := range reg.DataKeys {
log.Infof(context.Background(), " data key: type: %-12s %v", e.Info.EncryptionType, e.Info)
}
for k, e := range reg.DataKeys {
keyRegistry[k] = keyEntry{e.Info.EncryptionType, e.Key}
}
}
func loadCurrent() {
data, err := readFile(currentPath)
if err != nil {
log.Fatalf(context.Background(), "could not read %s: %v", currentPath, err)
}
log.Infof(context.Background(), "current: %s", string(data))
}
func loadOptions() {
absGlob := filepath.Join(*dbDir, optionsPathGlob)
paths, err := filepath.Glob(absGlob)
if err != nil {
log.Fatalf(context.Background(), "problem finding files matching %s: %v", absGlob, err)
}
for _, f := range paths {
fname := filepath.Base(f)
data, err := readFile(fname)
if err != nil {
log.Fatalf(context.Background(), "could not read %s: %v", fname, err)
}
log.Infof(context.Background(), "options file: %s starts with: %s", fname, string(data[:100]))
}
}
func readFile(filename string) ([]byte, error) {
if len(filename) == 0 {
return nil, errors.Errorf("filename is empty")
}
absPath := filename
if filename[0] != '/' {
absPath = filepath.Join(*dbDir, filename)
}
data, err := ioutil.ReadFile(absPath)
if err != nil {
return nil, errors.Errorf("could not read %s: %v", absPath, err)
}
reg, ok := fileRegistry[filename]
if !ok || reg.settings.EncryptionType == enginepbccl.EncryptionType_Plaintext {
// Plaintext: do nothing.
log.Infof(context.Background(), "reading plaintext %s", absPath)
return data, nil
}
// Encrypted: find the key.
key, ok := keyRegistry[reg.settings.KeyId]
if !ok {
return nil, errors.Errorf("could not find key %s for file %s", reg.settings.KeyId, absPath)
}
log.Infof(context.Background(), "decrypting %s with %s key %s...", filename, reg.settings.EncryptionType, reg.settings.KeyId[:8])
cipher, err := aes.NewCipher(key.rawKey)
if err != nil {
return nil, errors.Errorf("could not build AES cipher for file %s: %v", absPath, err)
}
size := len(data)
counter := reg.settings.Counter
nonce := reg.settings.Nonce
if len(nonce) != 12 {
log.Fatalf(context.Background(), "nonce has wrong length: %d, expected 12", len(nonce))
}
iv := make([]byte, aes.BlockSize)
for offset := 0; offset < size; offset += aes.BlockSize {
// Put nonce at beginning of IV.
copy(iv, nonce)
// Write counter to end of IV in network byte order.
binary.BigEndian.PutUint32(iv[12:], counter)
// Increment counter for next block.
counter++
// Encrypt IV (AES CTR mode is always encrypt).
cipher.Encrypt(iv, iv)
// XOR data with decrypted IV. We may have a partial block at the end of 'data'.
for i := 0; i < aes.BlockSize; i++ {
pos := offset + i
if pos >= size {
// Partial block.
break
}
data[pos] = data[pos] ^ iv[i]
}
}
return data, nil
}
func main() {
flag.Parse()
loadStoreKey()
loadFileRegistry()
loadKeyRegistry()
loadCurrent()
loadOptions()
}
|
package v1alpha1
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"code.cloudfoundry.org/quarks-operator/pkg/kube/apis"
)
// This file is safe to edit
// It's used as input for the Kube code generator
// Run "make generate" after modifying this file
// ReferenceType lists all the types of Reference we can supports
type ReferenceType = string
// Valid values for ref types
const (
// ConfigMapReference represents ConfigMap reference
ConfigMapReference ReferenceType = "configmap"
// SecretReference represents Secret reference
SecretReference ReferenceType = "secret"
// URLReference represents URL reference
URLReference ReferenceType = "url"
ManifestSpecName string = "manifest"
OpsSpecName string = "ops"
ImplicitVariableKeyName string = "value"
)
// DeploymentSecretType lists all the types of secrets used in
// the lifecycle of a BOSHDeployment
type DeploymentSecretType int
const (
// DeploymentSecretTypeManifestWithOps is a manifest that has ops files applied
DeploymentSecretTypeManifestWithOps DeploymentSecretType = iota
// DeploymentSecretTypeDesiredManifest is a manifest whose variables have been interpolated
DeploymentSecretTypeDesiredManifest
// DeploymentSecretTypeVariable is a BOSH variable generated using an QuarksSecret
DeploymentSecretTypeVariable
// DeploymentSecretTypeInstanceGroupResolvedProperties is a YAML file containing all properties needed to render an Instance Group
DeploymentSecretTypeInstanceGroupResolvedProperties
// DeploymentSecretBPMInformation is a YAML file containing the BPM information for one instance group
DeploymentSecretBPMInformation
)
func (s DeploymentSecretType) String() string {
return [...]string{
"with-ops",
"desired",
"var",
"ig-resolved",
"bpm"}[s]
}
// Prefix returns the prefix used for our k8s secrets:
// `<secretType>.`
func (s DeploymentSecretType) Prefix() string {
return s.String() + "."
}
var (
// LabelDeploymentName is the label key for the deployment manifest name
LabelDeploymentName = fmt.Sprintf("%s/deployment-name", apis.GroupName)
// LabelDeploymentSecretType is the label key for secret type
LabelDeploymentSecretType = fmt.Sprintf("%s/secret-type", apis.GroupName)
// LabelInstanceGroupName is the name of a label for an instance group name.
LabelInstanceGroupName = fmt.Sprintf("%s/instance-group-name", apis.GroupName)
// LabelDeploymentVersion is the name of a label for the deployment's version.
LabelDeploymentVersion = fmt.Sprintf("%s/deployment-version", apis.GroupName)
// LabelReferencedJobName is the name key for dependent job
LabelReferencedJobName = fmt.Sprintf("%s/referenced-job-name", apis.GroupName)
// AnnotationLinkProvidesKey is the key for the quarks links 'provides' JSON
AnnotationLinkProvidesKey = fmt.Sprintf("%s/provides", apis.GroupName)
// AnnotationLinkProviderService is the annotation key used on services to identify the link provider
AnnotationLinkProviderService = fmt.Sprintf("%s/link-provider-name", apis.GroupName)
// LabelEntanglementKey to identify a quarks link
LabelEntanglementKey = fmt.Sprintf("%s/entanglement", apis.GroupName)
)
// BOSHDeploymentSpec defines the desired state of BOSHDeployment
type BOSHDeploymentSpec struct {
Manifest ResourceReference `json:"manifest"`
Ops []ResourceReference `json:"ops,omitempty"`
Vars []VarReference `json:"vars,omitempty"`
}
// VarReference represents a user-defined secret for an explicit variable
type VarReference struct {
Name string `json:"name"`
Secret string `json:"secret"`
}
// ResourceReference defines the resource reference type and location
type ResourceReference struct {
Name string `json:"name"`
Type ReferenceType `json:"type"`
}
// BOSHDeploymentStatus defines the observed state of BOSHDeployment
type BOSHDeploymentStatus struct {
// Timestamp for the last reconcile
LastReconcile *metav1.Time `json:"lastReconcile"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// BOSHDeployment is the Schema for the boshdeployments API
// +k8s:openapi-gen=true
type BOSHDeployment struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BOSHDeploymentSpec `json:"spec,omitempty"`
Status BOSHDeploymentStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// BOSHDeploymentList contains a list of BOSHDeployment
type BOSHDeploymentList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []BOSHDeployment `json:"items"`
}
// GetNamespacedName returns the resource name with its namespace
func (bdpl *BOSHDeployment) GetNamespacedName() string {
return fmt.Sprintf("%s/%s", bdpl.Namespace, bdpl.Name)
}
|
package solutions
func longestValidParentheses(s string) int {
var stack = make([]int, len(s) + 1)
currentStart := -1
currentIndex := 0
result := 0
for i := 0; i < len(s); i++ {
if s[i] == '(' {
stack[currentIndex] = currentStart
currentStart = i
currentIndex++
} else if currentIndex == 0 {
currentStart = i
} else {
currentIndex--
currentStart = stack[currentIndex]
if i - currentStart > result {
result = i - currentStart
}
}
}
return result
}
|
package dinosql
import (
"fmt"
"io/ioutil"
"path/filepath"
"sort"
"strconv"
"strings"
"github.com/davecgh/go-spew/spew"
"github.com/kyleconroy/dinosql/internal/catalog"
core "github.com/kyleconroy/dinosql/internal/pg"
"github.com/kyleconroy/dinosql/internal/postgres"
pg "github.com/lfittl/pg_query_go"
nodes "github.com/lfittl/pg_query_go/nodes"
)
func keepSpew() {
spew.Dump("hello world")
}
func ParseCatalog(dir string, settings GenerateSettings) (core.Catalog, error) {
files, err := ioutil.ReadDir(dir)
if err != nil {
return core.Catalog{}, err
}
c := core.NewCatalog()
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".sql") {
continue
}
if strings.HasPrefix(f.Name(), ".") {
continue
}
blob, err := ioutil.ReadFile(filepath.Join(dir, f.Name()))
if err != nil {
return c, err
}
contents := RemoveGooseRollback(string(blob))
tree, err := pg.Parse(contents)
if err != nil {
return c, err
}
if err := updateCatalog(&c, tree); err != nil {
return c, err
}
}
return c, nil
}
func updateCatalog(c *core.Catalog, tree pg.ParsetreeList) error {
for _, stmt := range tree.Statements {
if err := validateFuncCall(stmt); err != nil {
return err
}
if err := catalog.Update(c, stmt); err != nil {
return err
}
}
return nil
}
func join(list nodes.List, sep string) string {
items := []string{}
for _, item := range list.Items {
if n, ok := item.(nodes.String); ok {
items = append(items, n.Str)
}
}
return strings.Join(items, sep)
}
func stringSlice(list nodes.List) []string {
items := []string{}
for _, item := range list.Items {
if n, ok := item.(nodes.String); ok {
items = append(items, n.Str)
}
}
return items
}
type Parameter struct {
Number int
DataType string
Name string // TODO: Relation?
NotNull bool
}
// Name and Cmd may be empty
// Maybe I don't need the SQL string if I have the raw Stmt?
type Query struct {
SQL string
Columns []core.Column
Params []Parameter
Name string
Cmd string // TODO: Pick a better name. One of: one, many, exec, execrows
// XXX: Hack
NeedsEdit bool
}
type Result struct {
Settings GenerateSettings
Queries []*Query
Catalog core.Catalog
}
func ParseQueries(c core.Catalog, settings GenerateSettings) (*Result, error) {
files, err := ioutil.ReadDir(settings.QueryDir)
if err != nil {
return nil, err
}
var q []*Query
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".sql") {
continue
}
if strings.HasPrefix(f.Name(), ".") {
continue
}
blob, err := ioutil.ReadFile(filepath.Join(settings.QueryDir, f.Name()))
if err != nil {
return nil, err
}
source := string(blob)
tree, err := pg.Parse(source)
if err != nil {
return nil, err
}
for _, stmt := range tree.Statements {
queryTwo, err := parseQuery(c, stmt, source)
if err != nil {
return nil, err
}
if queryTwo != nil {
q = append(q, queryTwo)
}
}
}
return &Result{Catalog: c, Queries: q, Settings: settings}, nil
}
func pluckQuery(source string, n nodes.RawStmt) (string, error) {
// TODO: Bounds checking
head := n.StmtLocation
tail := n.StmtLocation + n.StmtLen
return strings.TrimSpace(source[head:tail]), nil
}
func rangeVars(root nodes.Node) []nodes.RangeVar {
var vars []nodes.RangeVar
find := VisitorFunc(func(node nodes.Node) {
switch n := node.(type) {
case nodes.RangeVar:
vars = append(vars, n)
}
})
Walk(find, root)
return vars
}
// TODO: Validate metadata
func parseMetadata(t string) (string, string, error) {
for _, line := range strings.Split(t, "\n") {
if !strings.HasPrefix(line, "-- name:") {
continue
}
part := strings.Split(line, " ")
return part[2], strings.TrimSpace(part[3]), nil
}
return "", "", nil
}
func parseQuery(c core.Catalog, stmt nodes.Node, source string) (*Query, error) {
if err := validateParamRef(stmt); err != nil {
return nil, err
}
raw, ok := stmt.(nodes.RawStmt)
if !ok {
return nil, nil
}
switch raw.Stmt.(type) {
case nodes.SelectStmt:
case nodes.DeleteStmt:
case nodes.InsertStmt:
case nodes.UpdateStmt:
default:
return nil, nil
}
if err := validateFuncCall(raw); err != nil {
return nil, err
}
rawSQL, err := pluckQuery(source, raw)
if err != nil {
return nil, err
}
name, cmd, err := parseMetadata(rawSQL)
if err != nil {
return nil, err
}
rvs := rangeVars(raw.Stmt)
refs := findParameters(raw.Stmt)
params, err := resolveCatalogRefs(c, rvs, refs)
if err != nil {
return nil, err
}
cols, err := outputColumns(c, raw.Stmt)
if err != nil {
return nil, err
}
return &Query{
Cmd: cmd,
Name: name,
Params: params,
Columns: cols,
SQL: rawSQL,
NeedsEdit: needsEdit(stmt),
}, nil
}
type QueryCatalog struct {
catalog core.Catalog
ctes map[string]core.Table
}
func NewQueryCatalog(c core.Catalog, with *nodes.WithClause) QueryCatalog {
ctes := map[string]core.Table{}
if with != nil {
for _, item := range with.Ctes.Items {
if cte, ok := item.(nodes.CommonTableExpr); ok {
cols, err := outputColumns(c, cte.Ctequery)
if err != nil {
panic(err.Error())
}
ctes[*cte.Ctename] = core.Table{
Name: *cte.Ctename,
Columns: cols,
}
}
}
}
return QueryCatalog{catalog: c, ctes: ctes}
}
func (qc QueryCatalog) GetTable(fqn core.FQN) (core.Table, error) {
cte, exists := qc.ctes[fqn.Rel]
if exists {
return cte, nil
}
schema, exists := qc.catalog.Schemas[fqn.Schema]
if !exists {
return core.Table{}, core.ErrorSchemaDoesNotExist(fqn.Schema)
}
table, exists := schema.Tables[fqn.Rel]
if !exists {
return core.Table{}, core.ErrorRelationDoesNotExist(fqn.Rel)
}
return table, nil
}
// Compute the output columns for a statement.
//
// Return an error if column references are ambiguous
// Return an error if column references don't exist
// Return an error if a table is referenced twice
// Return an error if an unknown column is referenced
func sourceTables(c core.Catalog, node nodes.Node) ([]core.Table, error) {
var list nodes.List
var with *nodes.WithClause
switch n := node.(type) {
case nodes.DeleteStmt:
list = nodes.List{
Items: []nodes.Node{*n.Relation},
}
case nodes.InsertStmt:
list = nodes.List{
Items: []nodes.Node{*n.Relation},
}
case nodes.UpdateStmt:
list = nodes.List{
Items: append(n.FromClause.Items, *n.Relation),
}
case nodes.SelectStmt:
with = n.WithClause
list = n.FromClause
default:
return nil, fmt.Errorf("sourceTables: unsupported node type: %T", n)
}
qc := NewQueryCatalog(c, with)
var tables []core.Table
for _, item := range list.Items {
switch n := item.(type) {
case nodes.RangeVar:
fqn, err := catalog.ParseRange(&n)
if err != nil {
return nil, err
}
table, err := qc.GetTable(fqn)
if err != nil {
return nil, err
}
tables = append(tables, table)
default:
return nil, fmt.Errorf("sourceTable: unsupported list item type: %T", n)
}
}
return tables, nil
}
func IsStarRef(cf nodes.ColumnRef) bool {
if len(cf.Fields.Items) != 1 {
return false
}
_, aStar := cf.Fields.Items[0].(nodes.A_Star)
return aStar
}
// Compute the output columns for a statement.
//
// Return an error if column references are ambiguous
// Return an error if column references don't exist
func outputColumns(c core.Catalog, node nodes.Node) ([]core.Column, error) {
tables, err := sourceTables(c, node)
if err != nil {
fmt.Println(tables)
return nil, err
}
var targets nodes.List
switch n := node.(type) {
case nodes.DeleteStmt:
targets = n.ReturningList
case nodes.InsertStmt:
targets = n.ReturningList
case nodes.SelectStmt:
targets = n.TargetList
case nodes.UpdateStmt:
targets = n.ReturningList
default:
return nil, fmt.Errorf("outputColumns: unsupported node type: %T", n)
}
var cols []core.Column
for _, target := range targets.Items {
res, ok := target.(nodes.ResTarget)
if !ok {
continue
}
switch n := res.Val.(type) {
case nodes.A_Expr:
name := "_"
if res.Name != nil {
name = *res.Name
}
if postgres.IsComparisonOperator(join(n.Name, "")) {
// TODO: Generate a name for these operations
cols = append(cols, core.Column{Name: name, DataType: "bool", NotNull: true})
}
case nodes.ColumnRef:
parts := stringSlice(n.Fields)
var name, alias string
switch {
case IsStarRef(n):
// TODO: Disambiguate columns
for _, t := range tables {
for _, c := range t.Columns {
cname := c.Name
if res.Name != nil {
cname = *res.Name
}
cols = append(cols, core.Column{
Name: cname,
DataType: c.DataType,
NotNull: c.NotNull,
})
}
}
continue
case len(parts) == 1:
name = parts[0]
case len(parts) == 2:
alias = parts[0]
name = parts[1]
default:
panic(fmt.Sprintf("unknown number of fields: %d", len(parts)))
}
var found int
for _, t := range tables {
if alias != "" && t.Name != alias {
continue
}
for _, c := range t.Columns {
if c.Name == name {
found += 1
cname := c.Name
if res.Name != nil {
cname = *res.Name
}
cols = append(cols, core.Column{
Name: cname,
DataType: c.DataType,
NotNull: c.NotNull,
})
}
}
}
if found == 0 {
return nil, Error{
Code: "42703",
Message: fmt.Sprintf("column \"%s\" does not exist", name),
}
}
if found > 1 {
return nil, Error{
Code: "42703",
Message: fmt.Sprintf("column reference \"%s\" is ambiguous", name),
}
}
case nodes.FuncCall:
name := join(n.Funcname, ".")
if res.Name != nil {
name = *res.Name
}
cols = append(cols, core.Column{Name: name, DataType: "integer"})
}
}
return cols, nil
}
type paramRef struct {
parent nodes.Node
rv *nodes.RangeVar
ref nodes.ParamRef
}
type paramSearch struct {
parent nodes.Node
rangeVar *nodes.RangeVar
refs map[int]paramRef
}
func (p *paramSearch) Visit(node nodes.Node) Visitor {
switch n := node.(type) {
case nodes.A_Expr:
p.parent = node
case nodes.InsertStmt:
if s, ok := n.SelectStmt.(nodes.SelectStmt); ok {
for i, item := range s.TargetList.Items {
target, ok := item.(nodes.ResTarget)
if !ok {
continue
}
ref, ok := target.Val.(nodes.ParamRef)
if !ok {
continue
}
// TODO: Out-of-bounds panic
p.refs[ref.Number] = paramRef{parent: n.Cols.Items[i], ref: ref, rv: p.rangeVar}
}
for _, vl := range s.ValuesLists {
for i, v := range vl {
ref, ok := v.(nodes.ParamRef)
if !ok {
continue
}
// TODO: Out-of-bounds panic
p.refs[ref.Number] = paramRef{parent: n.Cols.Items[i], ref: ref, rv: p.rangeVar}
}
}
}
case nodes.RangeVar:
p.rangeVar = &n
case nodes.ResTarget:
p.parent = node
case nodes.ParamRef:
if _, found := p.refs[n.Number]; !found {
p.refs[n.Number] = paramRef{parent: p.parent, ref: n, rv: p.rangeVar}
}
return nil
}
return p
}
func findParameters(root nodes.Node) []paramRef {
v := ¶mSearch{refs: map[int]paramRef{}}
Walk(v, root)
refs := make([]paramRef, 0)
for _, r := range v.refs {
refs = append(refs, r)
}
sort.Slice(refs, func(i, j int) bool { return refs[i].ref.Number < refs[j].ref.Number })
return refs
}
type starWalker struct {
found bool
}
func (s *starWalker) Visit(node nodes.Node) Visitor {
if _, ok := node.(nodes.A_Star); ok {
s.found = true
return nil
}
return s
}
func needsEdit(root nodes.Node) bool {
v := &starWalker{}
Walk(v, root)
return v.found
}
func argName(name string) string {
out := ""
for i, p := range strings.Split(name, "_") {
if i == 0 {
out += strings.ToLower(p)
} else if p == "id" {
out += "ID"
} else {
out += strings.Title(p)
}
}
return out
}
func resolveCatalogRefs(c core.Catalog, rvs []nodes.RangeVar, args []paramRef) ([]Parameter, error) {
typeMap := map[string]map[string]core.Column{}
for _, t := range c.Schemas["public"].Tables {
typeMap[t.Name] = map[string]core.Column{}
for _, c := range t.Columns {
cc := c
typeMap[t.Name][c.Name] = cc
}
}
aliasMap := map[string]string{}
defaultTable := ""
for _, rv := range rvs {
if rv.Relname == nil {
continue
}
if defaultTable == "" {
defaultTable = *rv.Relname
}
if rv.Alias == nil {
continue
}
aliasMap[*rv.Alias.Aliasname] = *rv.Relname
}
var a []Parameter
for _, ref := range args {
switch n := ref.parent.(type) {
case nodes.A_Expr:
switch n := n.Lexpr.(type) {
case nodes.ColumnRef:
items := stringSlice(n.Fields)
var key, alias string
switch len(items) {
case 1:
key = items[0]
case 2:
alias = items[0]
key = items[1]
default:
panic("too many field items: " + strconv.Itoa(len(items)))
}
table := aliasMap[alias]
if table == "" && ref.rv != nil && ref.rv.Relname != nil {
table = *ref.rv.Relname
}
if table == "" {
table = defaultTable
}
if c, ok := typeMap[table][key]; ok {
a = append(a, Parameter{
Number: ref.ref.Number,
Name: argName(key),
DataType: c.DataType,
NotNull: c.NotNull,
})
} else {
return nil, Error{
Code: "42703",
Message: fmt.Sprintf("column \"%s\" does not exist", key),
}
}
}
case nodes.ResTarget:
if n.Name == nil {
return nil, fmt.Errorf("nodes.ResTarget has nil name")
}
key := *n.Name
if c, ok := typeMap[defaultTable][key]; ok {
a = append(a, Parameter{
Number: ref.ref.Number,
Name: argName(key),
DataType: c.DataType,
NotNull: c.NotNull,
})
} else {
return nil, Error{
Code: "42703",
Message: fmt.Sprintf("column \"%s\" does not exist", key),
}
}
case nodes.ParamRef:
a = append(a, Parameter{Number: ref.ref.Number, Name: "_", DataType: "interface{}"})
default:
// return nil, fmt.Errorf("unsupported type: %T", n)
}
}
return a, nil
}
type TypeOverride struct {
Package string `json:"package"`
PostgresType string `json:"postgres_type"`
GoType string `json:"go_type"`
Null bool `json:"null"`
}
type GenerateSettings struct {
SchemaDir string `json:"schema"`
QueryDir string `json:"queries"`
Out string `json:"out"`
Package string `json:"package"`
EmitPreparedQueries bool `json:"emit_prepared_queries"`
EmitTags bool `json:"emit_tags"`
Overrides []TypeOverride `json:"overrides"`
}
|
package main
import (
"fmt"
"log"
"net/http"
"github.com/gorilla/websocket"
"github.com/sajallimbu/main/pubsub"
uuid "github.com/satori/go.uuid"
)
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
// autoID ... returns a UUID
func autoID() string {
return uuid.Must(uuid.NewV1(), nil).String()
}
var ps = &pubsub.PubSub{}
func websocketHandler(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
return
}
fmt.Println("Client is connected")
client := pubsub.Client{
ID: autoID(),
Connection: conn,
}
ps.AddClient(client)
for {
messageType, p, err := conn.ReadMessage()
if err != nil {
// if the client is disconnected we need to unsubscribe the user and remove the client from the PubSub repo
fmt.Printf("Client: %s disconnected\n", client.ID)
fmt.Println("Removing client and its subscriptions")
ps.RemoveClient(client)
fmt.Printf("Total client: %d | Subscriptions: %d\n", len(ps.Clients), len(ps.Subscriptions))
return
}
ps.HandleReceiveMessage(client, messageType, p)
}
}
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "static")
})
http.HandleFunc("/ws", websocketHandler)
fmt.Println("Server is running: http://localhost:3000")
http.ListenAndServe(":3000", nil)
}
|
// cache.go
//
// Author: blinklv <blinklv@icloud.com>
// Create Time: 2018-08-22
// Maintainer: blinklv <blinklv@icloud.com>
// Last Change: 2018-10-15
// A concurrency-safe cache for applications running on a single machine. It supports
// set operation with expiration. Elements are not stored in a single pool (map) but
// distributed in many separate regions, which called shard. This design allows us
// to perform some massive operations (like cleaning expired elements) progressively.
package cache
import (
"fmt"
"sync"
"time"
)
// Cache configuration.
type Config struct {
// The elements are not stored in a single pool but distributed in many
// separate regions, which called shard. ShardNumber specifies how many
// shards there are. Of course, there must exist one shard at least. If
// you don't specify this field (its value is zero), DefaultShardNumber
// will be used.
ShardNumber int
// Cache will clean expired elements periodically, this parameter controls
// the frequency of cleaning operations. It can't be less than 1 min in this
// version; otherwise, many CPU cycles are occupied by the 'clean' task. If
// you don't specify this field (its value is zero), DefaultCleanInterval
// will be used.
CleanInterval time.Duration
// When an element is out of date, it will be cleand sliently. But maybe an
// element is complicated and some additional works are needed to release its
// resource, which is why Finalizer field exists. When an element is deleted
// (auto or manual), this field will be applied for it.
Finalizer func(string, interface{})
}
const (
// DefaultShardNumber is the default ShardNumber used by creating Cache.
DefaultShardNumber = 32
// DefaultCleanInterval is the default CleanInterval used by creating Cache.
DefaultCleanInterval = time.Hour
minShardNumber = 1
minCleanInterval = time.Minute
)
// Check whether the configuration is right. If it's invalid, returns a non-nil
// error. Otherwise, returns a new Config instance which contains final results.
// Some defualt values will be used when the original Config is nil or you don't
// specify fields.
func (cfg *Config) validate() (*Config, error) {
result := &Config{
ShardNumber: DefaultShardNumber,
CleanInterval: DefaultCleanInterval,
}
if cfg == nil {
return result, nil
}
// NOTE: We won't update the original Config instance when it's nil in this
// method. Otherwise, users might be puzzled when they use it elsewhere after
// this method has been called.
if cfg.ShardNumber >= minShardNumber {
result.ShardNumber = cfg.ShardNumber
} else if cfg.ShardNumber != 0 {
return nil, fmt.Errorf("the number of shards (%d) can't be less than %d",
cfg.ShardNumber, minShardNumber)
}
if cfg.CleanInterval >= minCleanInterval {
result.CleanInterval = cfg.CleanInterval
} else if cfg.CleanInterval != 0 {
return nil, fmt.Errorf("the clean interval (%s) can't be less than %s",
cfg.CleanInterval, minCleanInterval)
}
result.Finalizer = cfg.Finalizer
return result, nil
}
// Cache is a concurrency-safe cache for applications running on a single machine.
type Cache struct {
shards []*shard
n uint32
interval time.Duration
exit chan chan struct{}
exitOnce *sync.Once
}
// Create a Cache instance. The configuration parameter should be valid when it's
// not nil; like the number of shards must be greater than or equal to 1 and the
// clean interval can't be less than 1 minute. If you don't specify ShardNumber
// field or CleanInterval field, DefaultShardNumber and DefaultCleanInterval will
// be used respectively by default. These two default values are also used when
// the configuration parameter is nil.
func New(cfg *Config) (c *Cache, err error) {
if cfg, err = cfg.validate(); err != nil {
return nil, err
}
c = &Cache{
shards: make([]*shard, cfg.ShardNumber),
n: uint32(cfg.ShardNumber),
interval: cfg.CleanInterval,
exit: make(chan chan struct{}),
exitOnce: &sync.Once{},
}
for i, _ := range c.shards {
c.shards[i] = &shard{
elements: make(map[string]element),
finalizer: cfg.Finalizer,
q: &queue{},
}
}
go c.clean()
return c, nil
}
// Add an element to the cache. If the element has already existed, return an error.
func (c *Cache) Add(k string, v interface{}) error {
return c.shards[fnv32a(k)%c.n].add(k, v, 0)
}
// Add an element to the cache. If the element has already existed, replacing it.
func (c *Cache) Set(k string, v interface{}) {
c.shards[fnv32a(k)%c.n].set(k, v, 0)
}
// Add an element to the cache. If the element has already existed, return an error.
// If the expiration is zero, the effect is same as using Add method. Otherwise the
// element won't be got when it has expired.
func (c *Cache) EAdd(k string, v interface{}, d time.Duration) error {
return c.shards[fnv32a(k)%c.n].add(k, v, d)
}
// Add an element to the cache with an expiration. If the element has already existed,
// replacing it. If the expiration is zero, the effect is same as using Set method.
// Otherwise the element won't be got when it has expired.
func (c *Cache) ESet(k string, v interface{}, d time.Duration) {
c.shards[fnv32a(k)%c.n].set(k, v, d)
}
// Get an element from the cache. Return nil if this element doesn't exist or has
// already expired.
func (c *Cache) Get(k string) interface{} {
return c.shards[fnv32a(k)%c.n].get(k)
}
// Check whether an element exists. If it exists, returns true. Otherwise, returns false.
func (c *Cache) Exist(k string) bool {
return c.shards[fnv32a(k)%c.n].exist(k)
}
// Delete an element from the cache. If the Finalizer field of the cache has been set,
// it will be applied for the element.
func (c *Cache) Del(k string) {
c.shards[fnv32a(k)%c.n].del(k)
}
// Returns the number of elements in the cache. This may include expired elements
// which have not yet been cleaned up.
func (c *Cache) Size() int {
n := 0
for _, s := range c.shards {
n += s.size()
}
return n
}
// Close the cache. All elements in the cache will be deleted. If the Finalizer field
// of the cache has been set, it will be applied for the all elements in the cache. You
// shouldn't use this cache anymore after this method has been called.
func (c *Cache) Close() error {
c.exitOnce.Do(func() {
exitDone := make(chan struct{})
c.exit <- exitDone
<-exitDone
})
return nil
}
// Clean expired elements in the cache periodically.
func (c *Cache) clean() {
for {
select {
case <-time.After(c.interval):
// It's not all shards execute clean opearation simultaneously, but
// one by one. It's too waste time when a shard execute the clean
// method, if all shards do this at the same time, all user requests
// will be blocked. So I decide clean shards sequentially to reduce
// this effect.
for _, s := range c.shards {
s.clean()
}
case exitDone := <-c.exit:
for _, s := range c.shards {
s.close()
}
close(exitDone)
return
}
}
}
// A shard contains a part of elements of the entire cache.
type shard struct {
sync.RWMutex
elements map[string]element
finalizer func(string, interface{})
// If we set the expiration for an element, the index of which will be
// saved in this queue. The primary objective of designing this field is
// iterating expired elements in an incremental way.
q *queue
}
// Returns the number of elements in the shard. This may include expired elements
// which have not yet been cleaned up.
func (s *shard) size() (n int) {
s.RLock()
n = len(s.elements)
s.RUnlock()
return
}
// Add an element to the shard. If the element has already existed but not expired,
// return an error.
func (s *shard) add(k string, v interface{}, d time.Duration) error {
s.Lock()
defer s.Unlock()
if e, found := s.elements[k]; found && !e.expired() {
return fmt.Errorf("element (%s) has already existed", k)
}
s._set(k, v, d)
return nil
}
// Add an element to the shard. If the element has existed, replacing it. If the
// duration is zero, which means this element never expires.
func (s *shard) set(k string, v interface{}, d time.Duration) {
s.Lock()
s._set(k, v, d)
s.Unlock()
}
func (s *shard) _set(k string, v interface{}, d time.Duration) {
expiration := int64(0)
if d > 0 {
expiration = time.Now().Add(d).UnixNano()
// NOTE: If the element has existed, there might be multiple indices related
// to it in the queue. They have the same key but different expiration, but
// only one of which is valid.
s.q.push(index{k, expiration})
}
s.elements[k] = element{v, expiration}
}
// Get an element from the shard. Return nil if this element doesn't exist or
// has already expired.
func (s *shard) get(k string) interface{} {
s.RLock()
e, found := s.elements[k]
s.RUnlock()
if !found || e.expired() {
return nil
}
return e.data
}
// Check whether an element exists. Return true if the element exists and hasn't expired.
func (s *shard) exist(k string) bool {
s.RLock()
e, found := s.elements[k]
s.RUnlock()
return found && !e.expired()
}
// Delete an element from the shard. If the finalizer field of the shard has been set,
// it will be applied for the element.
func (s *shard) del(k string) {
s.Lock()
e, found := s.elements[k]
// NOTE: We don't need to remove the index (or indices) of this element
// from the queue at this point.
delete(s.elements, k)
s.Unlock()
if found && s.finalizer != nil {
s.finalizer(k, e.data)
}
}
// Clean all expired elements in the shard, return the number of elements cleaned
// in this process. NOTE: You can't run this method of a shard instance many times
// at the same time.
func (s *shard) clean() int {
q, n := &queue{}, 0
for b := s.pop(); b != nil; b = s.pop() {
for _, i := range b.indices {
expired := false
s.Lock()
e, found := s.elements[i.key]
// At first, we need to ensure that there exist an element related to
// this index. The following two cases will make the index invalid:
// 1. The element has already been deleted manually.
// 2. The index is dirty, which means the element has been updated. In
// this case, their expirations are not equal.
if found && e.expiration == i.expiration {
// Then we check whether the element has expired. If it has, we remove
// it from the map. Otherwise, we must save the index to the temporary
// queue, which for the next clean.
if expired = e.expired(); expired {
delete(s.elements, i.key)
n++
} else {
q.push(i)
}
}
s.Unlock()
// We don't know how many time the following statement will cost, it may
// take a lot of time. so I don't place it into the above critical region.
if expired && s.finalizer != nil {
s.finalizer(i.key, e.data)
}
}
}
s.Lock()
s.q = q
s.Unlock()
return n
}
func (s *shard) pop() (b *block) {
s.Lock()
b = s.q.pop()
s.Unlock()
return
}
// Close the shard. In fact, the pratical effect of this operation is deleting all
// elements in the shard. If the finalizer field is not nil, it will be applied for
// each element.
func (s *shard) close() {
s.Lock()
defer s.Unlock()
if s.finalizer != nil {
for k, e := range s.elements {
s.finalizer(k, e.data)
}
}
// NOTE: We just set 'elements' and 'queue' field to an empty variable instead
// of really deleting all items in them, GC will help us do this work.
s.elements, s.q = make(map[string]element), &queue{}
}
type element struct {
data interface{}
expiration int64
}
// If an element has expired, returns true. Otherwise, returns false. It also
// returns false directly if the expiration field is zero, which means this
// element has unlimited life.
func (e element) expired() bool {
return e.expiration != 0 && time.Now().UnixNano() > e.expiration
}
// A queue which contains indices.
type queue struct {
top, tail *block
bn int // blocks number.
}
func (q *queue) push(i index) {
// The queue is empty; we need to append a block to it. In this case,
// top and tail reference the same block.
if q.tail == nil {
q.tail, q.bn = &block{}, 1
q.top = q.tail
}
// The last block is full; we need to append a new block to the queue
// and update tail.
if len(q.tail.indices) == blockCapacity {
q.tail.next = &block{}
q.tail = q.tail.next
q.bn++
}
// Append an index to the last block.
q.tail.indices = append(q.tail.indices, i)
}
// NOTE: We pop a black from the queue instead of an index.
func (q *queue) pop() (b *block) {
// We can classify problems into three cases:
// 1. There exist two blocks in the queue at least. We only need to
// return the block referenced by top and update top.
// 2. There exist only one block in the queue. We need to return the
// block referenced by top, then set top and tail to nil. Because
// they reference to the same one in this case.
// 3. There's no block in the queue. Returns nil directly.
if q.top != nil {
if q.top != q.tail {
b, q.top = q.top, q.top.next
} else {
b, q.top, q.tail = q.top, nil, nil
}
q.bn--
}
return
}
// Get the number of indices in the queue.
func (q *queue) size() int {
if q.tail != nil {
return (q.bn-1)*blockCapacity + len(q.tail.indices)
}
return 0
}
// Compute the blocks number of the queue; it's only used in test now.
func (q *queue) _bn() int {
bn := 0
for top := q.top; top != nil; top = top.next {
bn++
}
return bn
}
// Get the number of indices in the tail block; it's only used in test now.
func (q *queue) _tailSize() int {
if q.tail != nil {
return len(q.tail.indices)
}
return 0
}
// The maximal number of indices in one block.
const blockCapacity = 32
// block is the basic unit for cleaning out expired elements. Multiple indices are
// stored in a common block and multiple blocks are organized in linked-list format.
type block struct {
indices []index
next *block
}
// We only build an index for an element which expiration field is not zero.
type index struct {
key string
expiration int64
}
const (
offset32 = 0x811c9dc5
prime32 = 0x1000193
)
// Takes a string and return a 32 bit FNV-1a. This function makes no memory allocations.
func fnv32a(s string) uint32 {
var h uint32 = offset32
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= prime32
}
return h
}
|
package client
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/go-openwrks/openwrks/response"
)
func TransformResponse(src *http.Response, dst interface{}) error {
defer src.Body.Close()
d, err := ioutil.ReadAll(src.Body)
if err != nil {
return err
}
// If the status code is that of an error, return an error.
if src.StatusCode > 399 {
e := &response.Error{}
json.Unmarshal(d, e)
return e
}
return json.Unmarshal(d, dst)
}
|
package lantern_cache
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"time"
)
type bucketConfig struct {
maxCapacity uint64
initCapacity uint64
chunkAlloc *chunkAllocator
statistics *Stats
}
type bucket struct {
mutex sync.RWMutex
m map[uint64]uint64
offset uint64
loop uint32
chunks [][]byte
chunkAlloc *chunkAllocator
statistics *Stats
}
func newBucket(cfg *bucketConfig) *bucket {
ensure(cfg.maxCapacity > 0, "bucket max capacity need > 0")
if cfg.initCapacity == 0 {
cfg.initCapacity = cfg.maxCapacity / 4
}
ret := &bucket{}
ret.statistics = cfg.statistics
needChunkCount := (cfg.maxCapacity + chunkSize - 1) / chunkSize
ensure(needChunkCount > 0, "max bucket chunk count need > 0")
initChunkCount := (cfg.initCapacity + chunkSize - 1) / chunkSize
if initChunkCount == 0 {
initChunkCount = 1
}
ret.chunks = make([][]byte, needChunkCount)
ret.chunkAlloc = cfg.chunkAlloc
ret.offset = 0
ret.loop = 0
ret.m = make(map[uint64]uint64)
for i := uint64(0); i < initChunkCount; i++ {
chunk, err := ret.chunkAlloc.getChunk()
if err != nil {
panic(err)
}
ret.chunks[i] = chunk
}
return ret
}
func (b *bucket) put(keyHash uint64, key, val []byte, expire int64) error {
puts := atomic.AddUint64(&b.statistics.Puts, 1)
if puts%(CleanCount) == 0 {
b.clean()
}
b.mutex.Lock()
defer b.mutex.Unlock()
entrySize := uint64(EntryHeadFieldSizeOf + len(key) + len(val))
if len(key) == 0 || len(val) == 0 || len(key) > MaxKeySize || len(val) > MaxValueSize || entrySize > chunkSize {
atomic.AddUint64(&b.statistics.Errors, 1)
return ErrorInvalidEntry
}
offset := b.offset
nextOffset := offset + entrySize
chunkIndex := offset / chunkSize
nextChunkIndex := nextOffset / chunkSize
if nextChunkIndex > chunkIndex {
if int(nextChunkIndex) >= len(b.chunks) {
b.loop++
fmt.Printf("chunk(%v) need loop:%d offset:%d nextOffset:%d chunkIndex:%d nextChunkIndex:%d len(b.chunks):%d\n", &b, b.loop, offset, nextOffset, chunkIndex, nextChunkIndex, len(b.chunks))
chunkIndex = 0
offset = 0
} else {
//b.logger.Printf("bucket chunk[%d] no space to write so jump next chunk[%d] continue loop:%d", chunkIndex, nextChunkIndex, b.loop)
chunkIndex = nextChunkIndex
offset = chunkIndex * chunkSize
}
nextOffset = offset + entrySize
}
if b.chunks[chunkIndex] == nil {
chunk, err := b.chunkAlloc.getChunk()
if err != nil {
atomic.AddUint64(&b.statistics.Errors, 1)
return ErrorChunkAlloc
}
b.chunks[chunkIndex] = chunk
}
chunkOffset := offset & (chunkSize - 1)
wrapEntry(b.chunks[chunkIndex][chunkOffset:], expire, key, val)
b.m[keyHash] = (uint64(b.loop) << OffsetSizeOf) | offset
b.offset = nextOffset
//fmt.Printf("[%v] key:%s loop:%d offset:%d", &b, key, b.loop, offset)
return nil
}
func (b *bucket) get(blob []byte, keyHash uint64, key []byte) ([]byte, error) {
b.mutex.RLock()
defer b.mutex.RUnlock()
atomic.AddUint64(&b.statistics.Gets, 1)
v, ok := b.m[keyHash]
if !ok {
atomic.AddUint64(&b.statistics.Misses, 1)
return nil, ErrorNotFound
}
loop := uint32(v >> OffsetSizeOf)
offset := v & 0x000000ffffffffff
//b.logger.Printf("[%v] get key:%s loop:%d now loop:%d offset:%d now offset:%d", &b, key, loop, b.loop, offset, b.offset)
// 1. loop == b.loop && offset < b.offset
// 这种情况发生在写和读没有发生覆盖的情况下, offset记录的是当时写入的offset, b.offset代表已经写入后的offset(可能多次写)
// 2.loop+1 == b.loop && offset >= b.offset
// 这种情况说明, 在写入后, 发生了一次覆盖, 但幸运的是, 覆盖后的值, 没有覆盖到这个key这里
if loop == b.loop && offset < b.offset || (loop+1 == b.loop && offset >= b.offset) {
chunkIndex := offset / chunkSize
if int(chunkIndex) >= len(b.chunks) {
atomic.AddUint64(&b.statistics.Errors, 1)
return nil, ErrorChunkIndexOutOfRange
}
chunkOffset := offset & (chunkSize - 1) // or offset % chunkSize
timestamp := readTimeStamp(b.chunks[chunkIndex][chunkOffset:])
if timestamp > 0 && timestamp < time.Now().Unix() {
return nil, ErrorValueExpire
}
readKey := readKey(b.chunks[chunkIndex][chunkOffset:])
if !bytes.Equal(readKey, key) {
atomic.AddUint64(&b.statistics.Collisions, 1)
return nil, ErrorNotFound
}
blob = append(blob, readValue(b.chunks[chunkIndex][chunkOffset:], uint16(len(readKey)))...)
atomic.AddUint64(&b.statistics.Hits, 1)
return blob, nil
}
atomic.AddUint64(&b.statistics.Misses, 1)
return nil, ErrorNotFound
}
func (b *bucket) clean() {
count := 0
b.mutex.Lock()
for k, v := range b.m {
loop := uint32(v >> OffsetSizeOf)
offset := v & 0x000000ffffffffff
if loop == b.loop && offset < b.offset || (loop+1 == b.loop && offset >= b.offset) {
continue
}
delete(b.m, k)
count++
}
b.mutex.Unlock()
}
func (b *bucket) size() int {
b.mutex.Lock()
defer b.mutex.Unlock()
return len(b.m)
}
func (b *bucket) del(keyHash uint64) {
b.mutex.Lock()
defer b.mutex.Unlock()
delete(b.m, keyHash)
}
func (b *bucket) reset() {
b.mutex.RLock()
defer b.mutex.RUnlock()
chunks := b.chunks
for i := range chunks {
b.chunkAlloc.putChunk(chunks[i])
chunks[i] = nil
}
for k := range b.m {
delete(b.m, k)
}
b.offset = 0
b.loop = 0
}
// map len
// map cap
// chunk size
// 理论上chunk最多容量
func (b *bucket) stats() (uint64, uint64, uint64, uint64) {
b.mutex.RLock()
defer b.mutex.RUnlock()
size := uint64(0)
for i := range b.chunks {
if b.chunks[i] != nil {
size += uint64(len(b.chunks[i]))
}
}
return uint64(len(b.m)), uint64(len(b.m) * 16), size, uint64(len(b.chunks)) * chunkSize
}
func (b *bucket) scan(count int) ([][]byte, error) {
b.mutex.RLock()
defer b.mutex.RUnlock()
ret := make([][]byte, 0, count)
i := 0
for _, v := range b.m {
i++
if i > count {
break
}
loop := uint32(v >> OffsetSizeOf)
offset := v & 0x000000ffffffffff
if loop == b.loop && offset < b.offset || (loop+1 == b.loop && offset >= b.offset) {
chunkIndex := offset / chunkSize
if int(chunkIndex) < len(b.chunks) {
chunkOffset := offset & (chunkSize - 1)
readKey := readKey(b.chunks[chunkIndex][chunkOffset:])
ret = append(ret, readKey)
}
}
}
return ret, nil
}
|
package geoip
import (
"context"
"github.com/alecthomas/kingpin"
"github.com/apex/log"
"github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/cli/root"
"github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/ooni"
"github.com/ooni/probe-cli/v3/cmd/ooniprobe/internal/output"
)
func init() {
cmd := root.Command("geoip", "Perform a geoip lookup")
cmd.Action(func(_ *kingpin.ParseContext) error {
return dogeoip(defaultconfig)
})
}
type dogeoipconfig struct {
Logger log.Interface
NewProbeCLI func() (ooni.ProbeCLI, error)
SectionTitle func(string)
}
var defaultconfig = dogeoipconfig{
Logger: log.Log,
NewProbeCLI: root.NewProbeCLI,
SectionTitle: output.SectionTitle,
}
func dogeoip(config dogeoipconfig) error {
config.SectionTitle("GeoIP lookup")
probeCLI, err := config.NewProbeCLI()
if err != nil {
return err
}
engine, err := probeCLI.NewProbeEngine(context.Background())
if err != nil {
return err
}
defer engine.Close()
err = engine.MaybeLookupLocation()
if err != nil {
return err
}
config.Logger.WithFields(log.Fields{
"type": "table",
"asn": engine.ProbeASNString(),
"network_name": engine.ProbeNetworkName(),
"country_code": engine.ProbeCC(),
"ip": engine.ProbeIP(),
}).Info("Looked up your location")
return nil
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.