text
stringlengths 11
4.05M
|
|---|
package output
import (
"context"
"fmt"
"github.com/benthosdev/benthos/v4/public/service"
)
func init() {
err := service.RegisterOutput(
"blue_stdout", service.NewConfigSpec(),
func(conf *service.ParsedConfig, mgr *service.Resources) (out service.Output, maxInFlight int, err error) {
return &blueOutput{}, 1, nil
})
if err != nil {
panic(err)
}
}
//------------------------------------------------------------------------------
type blueOutput struct{}
func (b *blueOutput) Connect(ctx context.Context) error {
return nil
}
func (b *blueOutput) Write(ctx context.Context, msg *service.Message) error {
content, err := msg.AsBytes()
if err != nil {
return err
}
fmt.Printf("\033[01;34m%s\033[m\n", content)
return nil
}
func (b *blueOutput) Close(ctx context.Context) error {
return nil
}
|
package ice
import (
"bytes"
"fmt"
)
/*
SessionCheckState describes the state of ICE check.
*/
type SessionCheckState int
const (
/**
* A check for this pair hasn't been performed, and it can't
* yet be performed until some other check succeeds, allowing this
* pair to unfreeze and move into the Waiting state.
*/
checkStateFrozen SessionCheckState = iota
/**
* A check has not been performed for this pair, and can be
* performed as soon as it is the highest priority Waiting pair on
* the check list.
*/
checkStateWaiting
/**
* A check has not been performed for this pair, and can be
* performed as soon as it is the highest priority Waiting pair on
* the check list.
*/
checkStateInProgress
/**
* A check has not been performed for this pair, and can be
* performed as soon as it is the highest priority Waiting pair on
* the check list.
*/
checkStateSucced
/**
* A check for this pair was already done and failed, either
* never producing any response or producing an unrecoverable failure
* response.
*/
checkStateFailed
)
func (s SessionCheckState) String() string {
switch s {
case checkStateFrozen:
return "frozen"
case checkStateWaiting:
return "waiting"
case checkStateInProgress:
return "inprogress"
case checkStateSucced:
return "success"
case checkStateFailed:
return "failed"
}
return "unknown"
}
/**
* This structure describes an ICE connectivity check. An ICE check
* contains a candidate pair, and will involve sending STUN Binding
* Request transaction for the purposes of verifying connectivity.
* A check is sent from the local candidate to the remote candidate
* of a candidate pair.
*/
type sessionCheck struct {
localCandidate *Candidate
remoteCandidate *Candidate
key string //简单与其他 check 区分,更多用于调试.
priority uint64
state SessionCheckState
/**
* Flag to indicate whether this check is nominated. A nominated check
* contains USE-CANDIDATE attribute in its STUN Binding request.
*/
nominated bool
/*
what error
*/
err error
}
func (s *sessionCheck) String() string {
return fmt.Sprintf("{l=%s,r=%s,priorit=%x,state=%s,nominated=%v,err=%s}",
s.localCandidate.addr, s.remoteCandidate.addr, s.priority, s.state, s.nominated, s.err)
}
type sessionCheckList struct {
checks []*sessionCheck
}
func (sc *sessionCheckList) String() string {
w := new(bytes.Buffer)
for i, v := range sc.checks {
fmt.Fprintf(w, "\t [%d]=%s\n", i, v)
}
fmt.Fprintf(w, "}")
return w.String()
}
func (sc *sessionCheckList) Len() int {
return len(sc.checks)
}
func (sc *sessionCheckList) Less(i, j int) bool {
return sc.checks[i].priority > sc.checks[j].priority
}
func (sc *sessionCheckList) Swap(i, j int) {
var t *sessionCheck
t = sc.checks[i]
sc.checks[i] = sc.checks[j]
sc.checks[j] = t
}
|
package main
import (
"fmt"
"io"
"net/http"
"github.com/jbenet/go-ipfs/core"
"github.com/jbenet/go-ipfs/core/coreunix"
"github.com/jbenet/go-ipfs/importer"
"github.com/jbenet/go-ipfs/importer/chunk"
"github.com/jbenet/go-ipfs/repo/fsrepo"
uio "github.com/jbenet/go-ipfs/unixfs/io"
u "github.com/jbenet/go-ipfs/util"
"code.google.com/p/go.net/context"
)
var gnode *core.IpfsNode
func ServeIpfsRand(w http.ResponseWriter, r *http.Request) {
read := io.LimitReader(u.NewTimeSeededRand(), 2048)
str, err := coreunix.Add(gnode, read)
if err != nil {
w.WriteHeader(504)
w.Write([]byte(err.Error()))
} else {
w.Write([]byte(str))
}
}
func ServeRandDir(w http.ResponseWriter, r *http.Request) {
db := uio.NewDirectory(gnode.DAG)
for i := 0; i < 50; i++ {
read := io.LimitReader(u.NewTimeSeededRand(), 512)
nd, err := importer.BuildDagFromReader(read, gnode.DAG, nil, chunk.DefaultSplitter)
if err != nil {
panic(err)
}
k, err := gnode.DAG.Add(nd)
if err != nil {
panic(err)
}
err = db.AddChild(fmt.Sprint(i), k)
if err != nil {
panic(err)
}
}
nd := db.GetNode()
k, err := gnode.DAG.Add(nd)
if err != nil {
w.WriteHeader(504)
w.Write([]byte(err.Error()))
} else {
w.Write([]byte(k.B58String()))
}
}
func main() {
builder := core.NewNodeBuilder().Online()
r := fsrepo.At("~/.go-ipfs")
if err := r.Open(); err != nil {
panic(err)
}
builder.SetRepo(r)
ctx, cancel := context.WithCancel(context.Background())
node, err := builder.Build(ctx)
if err != nil {
panic(err)
}
gnode = node
http.HandleFunc("/ipfsobject", ServeIpfsRand)
http.HandleFunc("/ipfsdir", ServeRandDir)
http.ListenAndServe(":8080", nil)
cancel()
}
|
// Copyright 2020 MongoDB Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build e2e cloudmanager,generic
package cloud_manager_test
import (
"encoding/json"
"fmt"
"math/rand"
"os"
"os/exec"
"strings"
"testing"
"time"
"github.com/mongodb/go-client-mongodb-atlas/mongodbatlas"
)
func TestDBUsers(t *testing.T) {
cliPath, err := cli()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
const dbUsersEntity = "dbusers"
username := fmt.Sprintf("user-%v", r.Uint32())
t.Run("Create", func(t *testing.T) {
cmd := exec.Command(cliPath,
entity,
dbUsersEntity,
"create",
"--username="+username,
"--password=passW0rd",
"--role=readWriteAnyDatabase",
"--mechanisms=SCRAM-SHA-256 ")
cmd.Env = os.Environ()
resp, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("unexpected error: %v, resp: %v", err, string(resp))
}
if !strings.Contains(string(resp), "Changes are being applied") {
t.Errorf("got=%#v\nwant=%#v\n", string(resp), "Changes are being applied")
}
})
t.Run("List", func(t *testing.T) {
cmd := exec.Command(cliPath, entity, dbUsersEntity, "ls")
cmd.Env = os.Environ()
resp, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("unexpected error: %v, resp: %v", err, string(resp))
}
var users []mongodbatlas.DatabaseUser
err = json.Unmarshal(resp, &users)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(users) == 0 {
t.Fatalf("expected len(users) > 0, got 0")
}
})
t.Run("Delete", func(t *testing.T) {
cmd := exec.Command(cliPath, entity, dbUsersEntity, "delete", username, "--force", "--authDB", "admin")
cmd.Env = os.Environ()
resp, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("unexpected error: %v, resp: %v", err, string(resp))
}
if !strings.Contains(string(resp), "Changes are being applied") {
t.Errorf("got=%#v\nwant=%#v\n", string(resp), "Changes are being applied")
}
})
}
|
package internal
import (
"context"
"github.com/matrix-org/dendrite/roomserver/api"
)
func (r *RoomserverInternalAPI) PerformPublish(
ctx context.Context,
req *api.PerformPublishRequest,
res *api.PerformPublishResponse,
) {
err := r.DB.PublishRoom(ctx, req.RoomID, req.Visibility == "public")
if err != nil {
res.Error = &api.PerformError{
Msg: err.Error(),
}
}
}
|
/*
For each row and then column of a matrix, we can add an extra entry with the sum of the last two entries in that row or column. For example with the following input matrix:
[ 1 1 1 ]
[ 2 3 4 ]
The resulting matrix would be:
[ 1 1 1 2 ]
[ 2 3 4 7 ]
[ 3 4 5 9 ]
Given an input of an integer N and an [X,Y] matrix of size at least 2x2, perform the above expansion N times and output the result. The resulting matrix will always be of size [X+N,Y+N].
Examples:
Input: Output:
2, [ 0 0 ] [ 0 0 0 0 ]
[ 0 0 ] [ 0 0 0 0 ]
[ 0 0 0 0 ]
[ 0 0 0 0 ]
3, [ 1 1 1 ] [ 1 1 1 2 3 5 ]
[ 2 3 4 ] [ 2 3 4 7 11 18 ]
[ 3 4 5 9 14 23 ]
[ 5 7 9 16 25 41 ]
[ 8 11 14 25 39 64 ]
*/
package main
import (
"fmt"
"reflect"
)
func main() {
test([][]int{
{0, 0},
{0, 0},
}, 2, [][]int{
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
})
test([][]int{
{1, 1, 1},
{2, 3, 4},
}, 1, [][]int{
{1, 1, 1, 2},
{2, 3, 4, 7},
{3, 4, 5, 9},
})
test([][]int{
{1, 1, 1},
{2, 3, 4},
}, 3, [][]int{
{1, 1, 1, 2, 3, 5},
{2, 3, 4, 7, 11, 18},
{3, 4, 5, 9, 14, 23},
{5, 7, 9, 16, 25, 41},
{8, 11, 14, 25, 39, 64},
})
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func test(m [][]int, n int, r [][]int) {
f := fibmex(m, n)
dump(f)
assert(reflect.DeepEqual(f, r))
}
func fibmex(m [][]int, n int) [][]int {
if len(m) == 0 || len(m[0]) == 0 {
return [][]int{}
}
c, r := len(m[0]), len(m)
f := alloc(r+n, c+n)
for i := range m {
for j := range m[i] {
f[i][j] = m[i][j]
}
}
for i := range f {
for j := 0; j < n; j++ {
f[i][c+j] = f[i][c+j-2] + f[i][c+j-1]
}
}
for i := 0; i < c+n; i++ {
for j := 0; j < n; j++ {
f[r+j][i] = f[r+j-2][i] + f[r+j-1][i]
}
}
return f
}
func alloc(r, c int) [][]int {
m := make([][]int, r)
t := make([]int, r*c)
for i := range m {
m[i] = t[i*c : (i+1)*c]
}
return m
}
func dump(m [][]int) {
for i := range m {
fmt.Println(m[i])
}
fmt.Println()
}
|
package main
import (
"io/ioutil"
"github.com/op/go-logging"
"os"
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022/pacs"
)
var LOGGER = logging.MustGetLogger("main")
func main() {
pacsMessage, err := ioutil.ReadFile("./example-message.xml")
if err != nil {
LOGGER.Fatalf("Unable to read file: %v", err)
os.Exit(1)
}
var pacsMessageParsed pacs.Document00800106
err = xml.Unmarshal(pacsMessage, &pacsMessageParsed)
if err != nil {
LOGGER.Fatalf("Unable to parse file: %v", err)
os.Exit(1)
}
LOGGER.Infof("Interbank Settlement Date: %v", pacsMessageParsed.Message.GroupHeader.InterbankSettlementDate)
}
|
// miscellaneous utility functions used for the landing page of the application
package newRequest
import (
"glsamaker/pkg/models"
"glsamaker/pkg/models/users"
"html/template"
"net/http"
)
// renderIndexTemplate renders all templates used for the landing page
func renderNewTemplate(w http.ResponseWriter, user *users.User, newID string) {
templates := template.Must(
template.Must(
template.New("Show").
ParseGlob("web/templates/layout/*.tmpl")).
ParseGlob("web/templates/new/*.tmpl"))
templates.ExecuteTemplate(w, "new.tmpl", createPageData("new", user, newID))
}
// createPageData creates the data used in the template of the landing page
func createPageData(page string, user *users.User, newID string) interface{} {
return struct {
Page string
Application *models.GlobalSettings
User *users.User
NewID string
}{
Page: page,
Application: models.GetDefaultGlobalSettings(),
User: user,
NewID: newID,
}
}
|
package emasmav1
import (
"context"
"fmt"
"math"
"strconv"
"strings"
"sync"
"github.com/markcheno/go-talib"
"github.com/mhereman/cryptotrader/algorithms"
"github.com/mhereman/cryptotrader/interfaces"
"github.com/mhereman/cryptotrader/logger"
"github.com/mhereman/cryptotrader/types"
)
const (
name string = "Ema/Sma"
cfgSmaLen = "Ema/Sma.sma_len"
cfgEmaLen = "Ema/Sma.ema_len"
cfgRsiLen = "Ema/Sma.rsi_len"
cfgRsiBuyMin = "Ema/Sma.rsi_buy_min"
cfgRsiBuyMax = "Ema/Sma.rsi_buy_max"
cfgRsiSell = "Ema/Sma.rsi_sell"
cfgBacktest = "Ema/Sma.backtest"
)
var defaultConfig types.AlgorithmConfig = types.AlgorithmConfig{
cfgSmaLen: "15",
cfgEmaLen: "7",
cfgRsiLen: "14",
cfgRsiBuyMin: "45.0",
cfgRsiBuyMax: "70.0",
cfgRsiSell: "90.0",
cfgBacktest: "false",
}
func init() {
algorithms.RegisterAlgorithm(name, createAlgorithm)
}
// Algorithm represents the Ema/Sma algorithm
type Algorithm struct {
smaLen int
emaLen int
rsiLen int
rsiBuyMin float64
rsiBuyMax float64
rsiSell float64
backtest bool
seriesChannel types.SeriesChannel
signalChannel types.SignalChannel
lastBuyPrice float64
}
// NewAlgorithm creates a new Ema/Sma algorithm
func NewAlgorithm() (a *Algorithm, err error) {
a = new(Algorithm)
if err = a.configure(defaultConfig); err != nil {
a = nil
return
}
a.lastBuyPrice = 0.0
return
}
func createAlgorithm() (algo interfaces.IAlgorithm, err error) {
algo, err = NewAlgorithm()
return
}
// Name returns the name of the algorithm
func (a Algorithm) Name() string {
return name
}
// DefaultConfig returns the default configuration of the algorithm
func (a Algorithm) DefaultConfig() types.AlgorithmConfig {
return defaultConfig
}
// Config returns the current configuration of the algorithm
func (a Algorithm) Config() types.AlgorithmConfig {
return types.AlgorithmConfig{
cfgSmaLen: fmt.Sprintf("%d", a.smaLen),
cfgEmaLen: fmt.Sprintf("%d", a.emaLen),
cfgRsiLen: fmt.Sprintf("%d", a.rsiLen),
cfgRsiBuyMin: fmt.Sprintf("%d", a.rsiBuyMin),
cfgRsiBuyMax: fmt.Sprintf("%f", a.rsiBuyMax),
cfgRsiSell: fmt.Sprintf("%f", a.rsiSell),
cfgBacktest: fmt.Sprintf("%t", a.backtest),
}
}
// RunAsync runs the algorithm in a goroutine
func (a *Algorithm) RunAsync(ctx context.Context, config types.AlgorithmConfig, seriesChannel types.SeriesChannel, signalChannel types.SignalChannel, waitGroup *sync.WaitGroup) (err error) {
a.seriesChannel = seriesChannel
a.signalChannel = signalChannel
if err = a.configure(config); err != nil {
logger.Errorf("Algorithm[%s]::RunAsync Error %v", name, err)
return
}
waitGroup.Add(1)
go runRoutine(ctx, waitGroup, a.seriesChannel, a)
return
}
func (a *Algorithm) emit(signal types.Signal) {
a.signalChannel <- signal
}
func (a *Algorithm) check(ctx context.Context, series types.Series) {
var sma, ema, rsi []float64
var in_rsi_range, buySignal, sellSignal1, sellSignal2, sellSignal3 bool
var calcSeries types.Series
calcSeries = series.SubSeries(0, series.Length()-1)
sma = talib.Sma(calcSeries.Close(), a.smaLen)
ema = talib.Ema(calcSeries.Close(), a.emaLen)
rsi = talib.Rsi(calcSeries.Close(), a.rsiLen)
in_rsi_range = rsi[len(rsi)-1] < a.rsiBuyMax && rsi[len(rsi)-1] >= a.rsiBuyMin
buySignal = talib.Crossover(ema, sma) && in_rsi_range
sellSignal1 = talib.Crossunder(ema, sma)
sellSignal2 = ema[len(ema)-3] > ema[len(ema)-2] && ema[len(ema)-2] > ema[len(ema)-1] && calcSeries.CurrentClose() > a.lastBuyPrice
sellSignal3 = rsi[len(rsi)-2] > a.rsiSell && rsi[len(rsi)-1] <= a.rsiSell
if buySignal {
logger.Debugf("EMIT BUY")
a.lastBuyPrice = calcSeries.CurrentClose()
if a.backtest {
a.emit(types.NewBacktestSignal(name, series.Symbol, types.Buy, calcSeries.CurrentCandleTime()))
} else {
a.emit(types.NewSignal(name, series.Symbol, types.Buy))
}
}
if sellSignal1 || sellSignal2 || sellSignal3 {
logger.Debugf("EMIT SELL")
a.lastBuyPrice = 0.0
if a.backtest {
a.emit(types.NewBacktestSignal(name, series.Symbol, types.Sell, calcSeries.CurrentCandleTime()))
} else {
a.emit(types.NewSignal(name, series.Symbol, types.Sell))
}
}
}
func (a *Algorithm) checkBacktest(ctx context.Context, series types.Series) {
var minSampleLen, length int
var subSeries types.Series
minSampleLen = int(math.Max(math.Max(float64(a.smaLen), float64(a.emaLen)), float64(a.rsiLen))) + 2
for length = minSampleLen; length <= series.Length(); length++ {
subSeries = series.SubSeries(0, length)
a.check(ctx, subSeries)
}
}
func (a *Algorithm) configure(config types.AlgorithmConfig) (err error) {
var key, value string
for key, value = range config {
switch key {
case cfgSmaLen:
if a.smaLen, err = strconv.Atoi(value); err != nil {
return
}
case cfgEmaLen:
if a.emaLen, err = strconv.Atoi(value); err != nil {
return
}
case cfgRsiLen:
if a.rsiLen, err = strconv.Atoi(value); err != nil {
return
}
case cfgRsiBuyMin:
if a.rsiBuyMin, err = strconv.ParseFloat(value, 64); err != nil {
return
}
case cfgRsiBuyMax:
if a.rsiBuyMax, err = strconv.ParseFloat(value, 64); err != nil {
return
}
case cfgRsiSell:
if a.rsiSell, err = strconv.ParseFloat(value, 64); err != nil {
return
}
case cfgBacktest:
a.backtest = strings.ToLower(value) == "true"
}
}
return
}
func runRoutine(ctx context.Context, wg *sync.WaitGroup, seriesChannel types.SeriesChannel, a *Algorithm) {
defer wg.Done()
var runLoop bool
var series types.Series
runLoop = true
for runLoop {
select {
case <-ctx.Done():
runLoop = false
case series = <-seriesChannel:
logger.Debugf("Algorithm :%s received new data\n", a.Name())
if a.backtest {
a.checkBacktest(ctx, series)
continue
}
a.check(ctx, series)
}
}
}
|
//File : ${NAME}.go
//Author: 燕人Lee&骚气又迷人的反派
//Date : ${DATE}
package main
import (
"fmt"
)
func main() {
fmt.Println("start")
}
package ${GO_PACKAGE_NAME}
|
package influxql
import (
"encoding/binary"
"errors"
"fmt"
"hash/fnv"
"sort"
"strings"
"time"
)
// DB represents an interface to the underlying storage.
type DB interface {
// Returns a list of series data ids matching a name and tags.
MatchSeries(name string, tags map[string]string) []uint32
// Returns a slice of tag values for a series.
SeriesTagValues(seriesID uint32, keys []string) []string
// Returns the id and data type for a series field.
// Returns id of zero if not a field.
Field(name, field string) (fieldID uint8, typ DataType)
// Returns an iterator given a series data id, field id, & field data type.
CreateIterator(id uint32, fieldID uint8, typ DataType, min, max time.Time, interval time.Duration) Iterator
}
// Planner represents an object for creating execution plans.
type Planner struct {
// The underlying storage that holds series and field meta data.
DB DB
// Returns the current time. Defaults to time.Now().
Now func() time.Time
}
// NewPlanner returns a new instance of Planner.
func NewPlanner(db DB) *Planner {
return &Planner{
DB: db,
Now: time.Now,
}
}
func (p *Planner) Plan(stmt *SelectStatement) (*Executor, error) {
// Create the executor.
e := &Executor{
db: p.DB,
stmt: stmt,
processors: make([]processor, len(stmt.Fields)),
}
// Fold conditional.
now := p.Now()
stmt.Condition = Fold(stmt.Condition, &now)
// Extract the time range.
min, max := TimeRange(stmt.Condition)
if max.IsZero() {
max = now
}
if max.Before(min) {
return nil, fmt.Errorf("invalid time range: %s - %s", min.Format(DateTimeFormat), max.Format(DateTimeFormat))
}
e.min, e.max = min, max
// Determine group by interval.
interval, tags, err := p.normalizeDimensions(stmt.Dimensions)
if err != nil {
return nil, err
}
e.interval, e.tags = interval, tags
// Generate a processor for each field.
for i, f := range stmt.Fields {
p, err := p.planField(e, f)
if err != nil {
return nil, err
}
e.processors[i] = p
}
return e, nil
}
// normalizeDimensions extacts the time interval, if specified.
// Returns all remaining dimensions.
func (p *Planner) normalizeDimensions(dimensions Dimensions) (time.Duration, []string, error) {
// Ignore if there are no dimensions.
if len(dimensions) == 0 {
return 0, nil, nil
}
// If the first dimension is a "time(duration)" then extract the duration.
if call, ok := dimensions[0].Expr.(*Call); ok && strings.ToLower(call.Name) == "time" {
// Make sure there is exactly one argument.
if len(call.Args) != 1 {
return 0, nil, errors.New("time dimension expected one argument")
}
// Ensure the argument is a duration.
lit, ok := call.Args[0].(*DurationLiteral)
if !ok {
return 0, nil, errors.New("time dimension must have one duration argument")
}
return lit.Val, dimensionKeys(dimensions[1:]), nil
}
return 0, dimensionKeys(dimensions), nil
}
// planField returns a processor for field.
func (p *Planner) planField(e *Executor, f *Field) (processor, error) {
return p.planExpr(e, f.Expr)
}
// planExpr returns a processor for an expression.
func (p *Planner) planExpr(e *Executor, expr Expr) (processor, error) {
switch expr := expr.(type) {
case *VarRef:
panic("TODO")
case *Call:
return p.planCall(e, expr)
case *BinaryExpr:
return p.planBinaryExpr(e, expr)
case *ParenExpr:
return p.planExpr(e, expr.Expr)
case *NumberLiteral:
return newLiteralProcessor(expr.Val), nil
case *StringLiteral:
return newLiteralProcessor(expr.Val), nil
case *BooleanLiteral:
return newLiteralProcessor(expr.Val), nil
case *TimeLiteral:
return newLiteralProcessor(expr.Val), nil
case *DurationLiteral:
return newLiteralProcessor(expr.Val), nil
}
panic("unreachable")
}
// planCall generates a processor for a function call.
func (p *Planner) planCall(e *Executor, c *Call) (processor, error) {
// Ensure there is a single argument.
if len(c.Args) != 1 {
return nil, fmt.Errorf("expected one argument for %s()", c.Name)
}
// Ensure the argument is a variable reference.
ref, ok := c.Args[0].(*VarRef)
if !ok {
return nil, fmt.Errorf("expected field argument in %s()", c.Name)
}
// Extract the substatement for the call.
sub, err := e.stmt.Substatement(ref)
if err != nil {
return nil, err
}
name := sub.Source.(*Measurement).Name
// Extract tags from conditional.
tags := make(map[string]string)
condition, err := p.extractTags(name, sub.Condition, tags)
if err != nil {
return nil, err
}
sub.Condition = condition
// Find field.
fname := strings.TrimPrefix(ref.Val, name+".")
fieldID, typ := e.db.Field(name, fname)
if fieldID == 0 {
return nil, fmt.Errorf("field not found: %s.%s", name, fname)
}
// Generate a reducer for the given function.
r := newReducer(e)
r.stmt = sub
// Retrieve a list of series data ids.
seriesIDs := p.DB.MatchSeries(name, tags)
// Generate mappers for each id.
r.mappers = make([]*mapper, len(seriesIDs))
for i, seriesID := range seriesIDs {
m := newMapper(e, seriesID, fieldID, typ)
m.min, m.max = e.min.UnixNano(), e.max.UnixNano()
m.interval = int64(e.interval)
m.key = append(make([]byte, 8), marshalStrings(p.DB.SeriesTagValues(seriesID, e.tags))...)
r.mappers[i] = m
}
// Set the appropriate reducer function.
switch strings.ToLower(c.Name) {
case "count":
r.fn = reduceSum
for _, m := range r.mappers {
m.fn = mapCount
}
case "sum":
r.fn = reduceSum
for _, m := range r.mappers {
m.fn = mapSum
}
default:
return nil, fmt.Errorf("function not found: %q", c.Name)
}
return r, nil
}
// planBinaryExpr generates a processor for a binary expression.
// A binary expression represents a join operator between two processors.
func (p *Planner) planBinaryExpr(e *Executor, expr *BinaryExpr) (processor, error) {
// Create processor for LHS.
lhs, err := p.planExpr(e, expr.LHS)
if err != nil {
return nil, fmt.Errorf("lhs: %s", err)
}
// Create processor for RHS.
rhs, err := p.planExpr(e, expr.RHS)
if err != nil {
return nil, fmt.Errorf("rhs: %s", err)
}
// Combine processors.
return newBinaryExprEvaluator(e, expr.Op, lhs, rhs), nil
}
// extractTags extracts a tag key/value map from a statement.
// Extracted tags are removed from the statement.
func (p *Planner) extractTags(name string, expr Expr, tags map[string]string) (Expr, error) {
// TODO: Refactor into a walk-like Replace().
switch expr := expr.(type) {
case *BinaryExpr:
// If the LHS is a variable ref then check for tag equality.
if lhs, ok := expr.LHS.(*VarRef); ok && expr.Op == EQ {
return p.extractBinaryExprTags(name, expr, lhs, expr.RHS, tags)
}
// If the RHS is a variable ref then check for tag equality.
if rhs, ok := expr.RHS.(*VarRef); ok && expr.Op == EQ {
return p.extractBinaryExprTags(name, expr, rhs, expr.LHS, tags)
}
// Recursively process LHS.
lhs, err := p.extractTags(name, expr.LHS, tags)
if err != nil {
return nil, err
}
expr.LHS = lhs
// Recursively process RHS.
rhs, err := p.extractTags(name, expr.RHS, tags)
if err != nil {
return nil, err
}
expr.RHS = rhs
return expr, nil
case *ParenExpr:
e, err := p.extractTags(name, expr.Expr, tags)
if err != nil {
return nil, err
}
expr.Expr = e
return expr, nil
default:
return expr, nil
}
}
// extractBinaryExprTags extracts a tag key/value map from a statement.
func (p *Planner) extractBinaryExprTags(name string, expr Expr, ref *VarRef, value Expr, tags map[string]string) (Expr, error) {
// Ignore if the value is not a string literal.
lit, ok := value.(*StringLiteral)
if !ok {
return expr, nil
}
// Extract the key and remove the measurement prefix.
key := strings.TrimPrefix(ref.Val, name+".")
// If tag is already filtered then return error.
if _, ok := tags[key]; ok {
return nil, fmt.Errorf("duplicate tag filter: %s.%s", name, key)
}
// Add tag to the filter.
tags[key] = lit.Val
// Return nil to remove the expression.
return nil, nil
}
// Executor represents the implementation of Executor.
// It executes all reducers and combines their result into a row.
type Executor struct {
db DB // source database
stmt *SelectStatement // original statement
processors []processor // per-field processors
min, max time.Time // time range
interval time.Duration // group by duration
tags []string // group by tag keys
}
// Execute begins execution of the query and returns a channel to receive rows.
func (e *Executor) Execute() (<-chan *Row, error) {
// Initialize processors.
for _, p := range e.processors {
p.start()
}
// Create output channel and stream data in a separate goroutine.
out := make(chan *Row, 0)
go e.execute(out)
return out, nil
}
// execute runs in a separate separate goroutine and streams data from processors.
func (e *Executor) execute(out chan *Row) {
// TODO: Support multi-value rows.
// Initialize map of rows by encoded tagset.
rows := make(map[string]*Row)
// Combine values from each processor.
loop:
for {
// Retrieve values from processors and write them to the approprite
// row based on their tagset.
for i, p := range e.processors {
// Retrieve data from the processor.
m, ok := <-p.C()
if !ok {
break loop
}
// Set values on returned row.
for k, v := range m {
// Extract timestamp and tag values from key.
b := []byte(k)
timestamp := int64(binary.BigEndian.Uint64(b[0:8]))
// Lookup row values and populate data.
values := e.createRowValuesIfNotExists(rows, e.processors[0].name(), b[8:], timestamp)
values[i+1] = v
}
}
}
// Normalize rows and values.
// This converts the timestamps from nanoseconds to microseconds.
a := make(Rows, 0, len(rows))
for _, row := range rows {
for _, values := range row.Values {
values[0] = values[0].(int64) / int64(time.Microsecond)
}
a = append(a, row)
}
sort.Sort(a)
// Send rows to the channel.
for _, row := range a {
out <- row
}
// Mark the end of the output channel.
close(out)
}
// creates a new value set if one does not already exist for a given tagset + timestamp.
func (e *Executor) createRowValuesIfNotExists(rows map[string]*Row, name string, tagset []byte, timestamp int64) []interface{} {
// TODO: Add "name" to lookup key.
// Find row by tagset.
var row *Row
if row = rows[string(tagset)]; row == nil {
row = &Row{Name: name}
// Create tag map.
row.Tags = make(map[string]string)
for i, v := range unmarshalStrings(tagset) {
row.Tags[e.tags[i]] = v
}
// Create column names.
row.Columns = make([]string, 1, len(e.stmt.Fields)+1)
row.Columns[0] = "time"
for i, f := range e.stmt.Fields {
name := f.Name()
if name == "" {
name = fmt.Sprintf("col%d", i)
}
row.Columns = append(row.Columns, name)
}
// Save to lookup.
rows[string(tagset)] = row
}
// If no values exist or last value doesn't match the timestamp then create new.
if len(row.Values) == 0 || row.Values[len(row.Values)-1][0] != timestamp {
values := make([]interface{}, len(e.processors)+1)
values[0] = timestamp
row.Values = append(row.Values, values)
}
return row.Values[len(row.Values)-1]
}
// dimensionKeys returns a list of tag key names for the dimensions.
// Each dimension must be a VarRef.
func dimensionKeys(dimensions Dimensions) (a []string) {
for _, d := range dimensions {
a = append(a, d.Expr.(*VarRef).Val)
}
return
}
// mapper represents an object for processing iterators.
type mapper struct {
executor *Executor // parent executor
seriesID uint32 // series id
fieldID uint8 // field id
typ DataType // field data type
itr Iterator // series iterator
min, max int64 // time range
interval int64 // group by interval
key []byte // encoded timestamp + dimensional values
fn mapFunc // map function
c chan map[string]interface{}
done chan chan struct{}
}
// newMapper returns a new instance of mapper.
func newMapper(e *Executor, seriesID uint32, fieldID uint8, typ DataType) *mapper {
return &mapper{
executor: e,
seriesID: seriesID,
fieldID: fieldID,
typ: typ,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
// start begins processing the iterator.
func (m *mapper) start() {
m.itr = m.executor.db.CreateIterator(m.seriesID, m.fieldID, m.typ,
m.executor.min, m.executor.max, m.executor.interval)
go m.run()
}
// stop stops the mapper.
func (m *mapper) stop() { syncClose(m.done) }
// C returns the streaming data channel.
func (m *mapper) C() <-chan map[string]interface{} { return m.c }
// run executes the map function against the iterator.
func (m *mapper) run() {
for m.itr.NextIterval() {
m.fn(m.itr, m)
}
close(m.c)
}
// emit sends a value to the mapper's output channel.
func (m *mapper) emit(key int64, value interface{}) {
// Encode the timestamp to the beginning of the key.
binary.BigEndian.PutUint64(m.key, uint64(key))
// OPTIMIZE: Collect emit calls and flush all at once.
m.c <- map[string]interface{}{string(m.key): value}
}
// mapFunc represents a function used for mapping iterators.
type mapFunc func(Iterator, *mapper)
// mapCount computes the number of values in an iterator.
func mapCount(itr Iterator, m *mapper) {
n := 0
for k, _ := itr.Next(); k != 0; k, _ = itr.Next() {
n++
}
m.emit(itr.Time(), float64(n))
}
// mapSum computes the summation of values in an iterator.
func mapSum(itr Iterator, m *mapper) {
n := float64(0)
for k, v := itr.Next(); k != 0; k, v = itr.Next() {
n += v.(float64)
}
m.emit(itr.Time(), n)
}
// processor represents an object for joining reducer output.
type processor interface {
start()
stop()
name() string
C() <-chan map[string]interface{}
}
// reducer represents an object for processing mapper output.
// Implements processor.
type reducer struct {
executor *Executor // parent executor
stmt *SelectStatement // substatement
mappers []*mapper // child mappers
fn reduceFunc // reduce function
c chan map[string]interface{}
done chan chan struct{}
}
// newReducer returns a new instance of reducer.
func newReducer(e *Executor) *reducer {
return &reducer{
executor: e,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
// start begins streaming values from the mappers and reducing them.
func (r *reducer) start() {
for _, m := range r.mappers {
m.start()
}
go r.run()
}
// stop stops the reducer.
func (r *reducer) stop() {
for _, m := range r.mappers {
m.stop()
}
syncClose(r.done)
}
// C returns the streaming data channel.
func (r *reducer) C() <-chan map[string]interface{} { return r.c }
// name returns the source name.
func (r *reducer) name() string { return r.stmt.Source.(*Measurement).Name }
// run runs the reducer loop to read mapper output and reduce it.
func (r *reducer) run() {
loop:
for {
// Combine all data from the mappers.
data := make(map[string][]interface{})
for _, m := range r.mappers {
kv, ok := <-m.C()
if !ok {
break loop
}
for k, v := range kv {
data[k] = append(data[k], v)
}
}
// Reduce each key.
for k, v := range data {
r.fn(k, v, r)
}
}
// Mark the channel as complete.
close(r.c)
}
// emit sends a value to the reducer's output channel.
func (r *reducer) emit(key string, value interface{}) {
r.c <- map[string]interface{}{key: value}
}
// reduceFunc represents a function used for reducing mapper output.
type reduceFunc func(string, []interface{}, *reducer)
// reduceSum computes the sum of values for each key.
func reduceSum(key string, values []interface{}, r *reducer) {
var n float64
for _, v := range values {
n += v.(float64)
}
r.emit(key, n)
}
// binaryExprEvaluator represents a processor for combining two processors.
type binaryExprEvaluator struct {
executor *Executor // parent executor
lhs, rhs processor // processors
op Token // operation
c chan map[string]interface{}
done chan chan struct{}
}
// newBinaryExprEvaluator returns a new instance of binaryExprEvaluator.
func newBinaryExprEvaluator(e *Executor, op Token, lhs, rhs processor) *binaryExprEvaluator {
return &binaryExprEvaluator{
executor: e,
op: op,
lhs: lhs,
rhs: rhs,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
// start begins streaming values from the lhs/rhs processors
func (e *binaryExprEvaluator) start() {
e.lhs.start()
e.rhs.start()
go e.run()
}
// stop stops the processor.
func (e *binaryExprEvaluator) stop() {
e.lhs.stop()
e.rhs.stop()
syncClose(e.done)
}
// C returns the streaming data channel.
func (e *binaryExprEvaluator) C() <-chan map[string]interface{} { return e.c }
// name returns the source name.
func (e *binaryExprEvaluator) name() string { return "" }
// run runs the processor loop to read subprocessor output and combine it.
func (e *binaryExprEvaluator) run() {
for {
// Read LHS value.
lhs, ok := <-e.lhs.C()
if !ok {
break
}
// Read RHS value.
rhs, ok := <-e.rhs.C()
if !ok {
break
}
// Merge maps.
m := make(map[string]interface{})
for k, v := range lhs {
m[k] = e.eval(v, rhs[k])
}
for k, v := range rhs {
// Skip value if already processed in lhs loop.
if _, ok := m[k]; ok {
continue
}
m[k] = e.eval(float64(0), v)
}
// Return value.
e.c <- m
}
// Mark the channel as complete.
close(e.c)
}
// eval evaluates two values using the evaluator's operation.
func (e *binaryExprEvaluator) eval(lhs, rhs interface{}) interface{} {
switch e.op {
case ADD:
return lhs.(float64) + rhs.(float64)
case SUB:
return lhs.(float64) - rhs.(float64)
case MUL:
return lhs.(float64) * rhs.(float64)
case DIV:
rhs := rhs.(float64)
if rhs == 0 {
return float64(0)
}
return lhs.(float64) / rhs
default:
// TODO: Validate operation & data types.
panic("invalid operation: " + e.op.String())
}
}
// literalProcessor represents a processor that continually sends a literal value.
type literalProcessor struct {
val interface{}
c chan map[string]interface{}
done chan chan struct{}
}
// newLiteralProcessor returns a literalProcessor for a given value.
func newLiteralProcessor(val interface{}) *literalProcessor {
return &literalProcessor{
val: val,
c: make(chan map[string]interface{}, 0),
done: make(chan chan struct{}, 0),
}
}
// C returns the streaming data channel.
func (p *literalProcessor) C() <-chan map[string]interface{} { return p.c }
// process continually returns a literal value with a "0" key.
func (p *literalProcessor) start() { go p.run() }
// run executes the processor loop.
func (p *literalProcessor) run() {
for {
select {
case ch := <-p.done:
close(ch)
return
case p.c <- map[string]interface{}{"": p.val}:
}
}
}
// stop stops the processor from sending values.
func (p *literalProcessor) stop() { syncClose(p.done) }
// name returns the source name.
func (p *literalProcessor) name() string { return "" }
// syncClose closes a "done" channel and waits for a response.
func syncClose(done chan chan struct{}) {
ch := make(chan struct{}, 0)
done <- ch
<-ch
}
// Iterator represents a forward-only iterator over a set of points.
// The iterator groups points together in interval sets.
type Iterator interface {
// Next returns the next value from the iterator.
Next() (key int64, value interface{})
// NextIterval moves to the next iterval. Returns true unless EOF.
NextIterval() bool
// Time returns start time of the current interval.
Time() int64
// Interval returns the group by duration.
Interval() time.Duration
}
// Row represents a single row returned from the execution of a statement.
type Row struct {
Name string `json:"name,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Columns []string `json:"columns"`
Values [][]interface{} `json:"values,omitempty"`
Err error `json:"err,omitempty"`
}
// tagsHash returns a hash of tag key/value pairs.
func (r *Row) tagsHash() uint64 {
h := fnv.New64a()
keys := r.tagsKeys()
for _, k := range keys {
h.Write([]byte(k))
h.Write([]byte(r.Tags[k]))
}
return h.Sum64()
}
// tagKeys returns a sorted list of tag keys.
func (r *Row) tagsKeys() []string {
a := make([]string, len(r.Tags))
for k := range r.Tags {
a = append(a, k)
}
sort.Strings(a)
return a
}
// Rows represents a list of rows that can be sorted consistently by name/tag.
type Rows []*Row
func (p Rows) Len() int { return len(p) }
func (p Rows) Less(i, j int) bool {
// Sort by name first.
if p[i].Name != p[j].Name {
return p[i].Name < p[j].Name
}
// Sort by tag set hash. Tags don't have a meaningful sort order so we
// just compute a hash and sort by that instead. This allows the tests
// to receive rows in a predictable order every time.
return p[i].tagsHash() < p[j].tagsHash()
}
func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// marshalStrings encodes an array of strings into a byte slice.
func marshalStrings(a []string) (ret []byte) {
for _, s := range a {
// Create a slice for len+data
b := make([]byte, 2+len(s))
binary.BigEndian.PutUint16(b[0:2], uint16(len(s)))
copy(b[2:], s)
// Append it to the full byte slice.
ret = append(ret, b...)
}
return
}
// unmarshalStrings decodes a byte slice into an array of strings.
func unmarshalStrings(b []byte) (ret []string) {
for {
// If there's no more data then exit.
if len(b) == 0 {
return
}
// Decode size + data.
n := binary.BigEndian.Uint16(b[0:2])
ret = append(ret, string(b[2:n+2]))
// Move the byte slice forward and retry.
b = b[n+2:]
}
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/8/22 7:47 上午
# @File : lt_21_合并2个有序链表.go
# @Description :
# @Attention :
*/
package offer
// 关键: 直接暴力遍历即可
func mergeTwoLists(l1 *ListNode, l2 *ListNode) *ListNode {
dummy := &ListNode{}
tmp := dummy
for nil != l1 && nil != l2 {
if l1.Val < l2.Val {
tmp.Next = l1
l1 = l1.Next
} else {
tmp.Next = l2
l2 = l2.Next
}
tmp = tmp.Next
}
if nil != l1 {
tmp.Next = l1
} else {
tmp.Next = l2
}
return dummy.Next
}
|
package Problem0355
import "sort"
import "time"
type tweet struct {
id int
time int64
}
// tweets 用于排序
type tweets []tweet
func (t tweets) Len() int {
return len(t)
}
func (t tweets) Less(i, j int) bool {
return t[i].time > t[j].time
}
func (t tweets) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
// Twitter is twitter user
type Twitter struct {
userTweets map[int]tweets
follow map[int][]int
}
// Constructor initialize your data structure here.
func Constructor() Twitter {
t := make(map[int]tweets)
f := make(map[int][]int)
return Twitter{userTweets: t, follow: f}
}
// PostTweet compose a new tweet.
func (t *Twitter) PostTweet(userID int, tweetID int) {
t.userTweets[userID] = append(
t.userTweets[userID],
tweet{
id: tweetID,
time: time.Now().UnixNano(),
},
)
}
// GetNewsFeed retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.
func (t *Twitter) GetNewsFeed(userID int) []int {
// 获取本人的 tweets
temp := make(tweets, len(t.userTweets[userID]))
copy(temp, t.userTweets[userID])
// 获取 followee 的 tweets
for _, id := range t.follow[userID] {
temp = append(temp, t.userTweets[id]...)
}
// 按照时间排序
sort.Sort(temp)
// 获取最近的 10 条或更少的内容
res := make([]int, 0, 10)
for i := 0; i < len(temp) && i < 10; i++ {
res = append(res, temp[i].id)
}
return res
}
// Follow followee. If the operation is invalid, it should be a no-op.
func (t *Twitter) Follow(followerID int, followeeID int) {
// 不能 follow 自己
if followerID == followeeID {
return
}
// 不能重复 follow
for _, id := range t.follow[followerID] {
if id == followeeID {
return
}
}
t.follow[followerID] = append(t.follow[followerID], followeeID)
}
// Unfollow follower unfollows a followee. If the operation is invalid, it should be a no-op.
func (t *Twitter) Unfollow(followerID int, followeeID int) {
for i, id := range t.follow[followerID] {
if id == followeeID {
// 删除 followeeID 记录
t.follow[followerID] = append(t.follow[followerID][:i], t.follow[followerID][i+1:]...)
}
}
}
/**
* Your Twitter object will be instantiated and called as such:
* obj := Constructor();
* obj.PostTweet(userID,tweetID);
* param_2 := obj.GetNewsFeed(userID);
* obj.Follow(followerID,followeeID);
* obj.Unfollow(followerID,followeeID);
*/
|
package mhfpacket
import (
"errors"
"github.com/Andoryuuta/Erupe/network"
"github.com/Andoryuuta/Erupe/network/clientctx"
"github.com/Andoryuuta/byteframe"
)
// MsgSysLogin represents the MSG_SYS_LOGIN
type MsgSysLogin struct {
AckHandle uint32
CharID0 uint32
LoginTokenNumber uint32
HardcodedZero0 uint16
RequestVersion uint16
CharID1 uint32
HardcodedZero1 uint16
LoginTokenStringLength uint16 // Hardcoded to 0x11
LoginTokenString string
}
// Opcode returns the ID associated with this packet type.
func (m *MsgSysLogin) Opcode() network.PacketID {
return network.MSG_SYS_LOGIN
}
// Parse parses the packet from binary
func (m *MsgSysLogin) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
m.AckHandle = bf.ReadUint32()
m.CharID0 = bf.ReadUint32()
m.LoginTokenNumber = bf.ReadUint32()
m.HardcodedZero0 = bf.ReadUint16()
m.RequestVersion = bf.ReadUint16()
m.CharID1 = bf.ReadUint32()
m.HardcodedZero1 = bf.ReadUint16()
m.LoginTokenStringLength = bf.ReadUint16()
m.LoginTokenString = string(bf.ReadBytes(17)) // TODO(Andoryuuta): What encoding is this string?
return nil
}
// Build builds a binary packet from the current data.
func (m *MsgSysLogin) Build(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
return errors.New("Not implemented")
}
|
package main
func main() {
}
func averageOfLevels(root *TreeNode) (averages []float64) {
nextLevel := []*TreeNode{root}
for len(nextLevel) > 0 {
sum := 0
curLevel := nextLevel
nextLevel = nil
for _, node := range curLevel {
sum += node.Val
if node.Left != nil {
nextLevel = append(nextLevel, node.Left)
}
if node.Right != nil {
nextLevel = append(nextLevel, node.Right)
}
}
averages = append(averages, float64(sum)/float64(len(curLevel)))
}
return
}
|
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"os"
"strings"
"github.com/nlopes/slack"
"github.com/tarm/serial"
)
const (
slackUserID = "YOURUSERID"
slackToken = "YOURLEGACYTOKEN"
)
func initializePort(path string) *serial.Port {
c := new(serial.Config)
c.Name = path
c.Baud = 115200
c.Size = 8
c.Parity = 'N'
c.StopBits = 1
stream, err := serial.OpenPort(c)
if err != nil {
log.Fatal(err)
}
return stream
}
func findTrinket() string {
// grab the contents of /dev
contents, err := ioutil.ReadDir("/dev")
if err != nil {
log.Fatal(err)
}
// look for what is mostly likely the trinket device
for _, f := range contents {
if strings.Contains(f.Name(), "tty.usbmodem") {
// initialize stream and scanner
stream := initializePort("/dev/" + f.Name())
scanner := bufio.NewScanner(stream)
// check for 'go away response' to confirm it's our guy
stream.Write([]byte("hey\r"))
for scanner.Scan() {
rcv := scanner.Text()
if rcv == "go away" {
fmt.Println("found trinket:", string(f.Name()))
return "/dev/" + f.Name()
}
}
}
}
// unable to find any candidates
return ""
}
func setLightStatus(stream *serial.Port, statusText string, statusEmoji string) {
// default solid green
color := "green"
mode := "@solid"
if statusEmoji != "" && statusText == "" {
// if there is only an emoji status...
switch statusEmoji {
case ":middle_finger:":
color = "red"
mode = "@pulse"
case ":triangular_flag_on_post:", ":red_circle:", ":woman-gesturing-no:", ":man-gesturing-no:", ":male-technologist:", ":female-technologist:":
color = "red"
mode = "@solid"
case ":thinking_face:", ":sleeping:", ":shushing_face:":
color = "yellow"
mode = "@solid"
}
} else if statusText != "" {
// else we are just going to use the text status
switch statusText {
case "in a meeting", "on a call":
color = "red"
mode = "@pulse"
case "focused", "busy":
color = "red"
mode = "@solid"
case "thinking":
color = "yellow"
mode = "@solid"
}
}
// set the light
fmt.Printf("Setting Light: %v-%v\n", mode, color)
_, err := stream.Write([]byte(color + "\r" + mode + "\r"))
if err != nil {
log.Fatal(err)
}
}
func main() {
// establish our serial stream
stream := initializePort(findTrinket())
// establish our slack connection, generate a legacy user token for this connection
api := slack.New(
slackToken,
slack.OptionDebug(false),
slack.OptionLog(log.New(os.Stdout, "slack-bot: ", log.Lshortfile|log.LstdFlags)),
)
// grab our current user status and set the light
user, err := api.GetUserInfo(slackUserID)
if err != nil {
fmt.Printf("%s\n", err)
return
}
setLightStatus(stream, strings.ToLower(user.Profile.StatusText), user.Profile.StatusEmoji)
// start new rtm api connection to monitor for status changes
rtm := api.NewRTM()
go rtm.ManageConnection()
// loop through events
for msg := range rtm.IncomingEvents {
fmt.Print("Event Received: ")
switch ev := msg.Data.(type) {
case *slack.UserChangeEvent:
fmt.Printf("%T\n", ev)
//fmt.Printf("EventVals: %+v\n", ev)
if ev.User.ID == slackUserID {
setLightStatus(stream, strings.ToLower(ev.User.Profile.StatusText), ev.User.Profile.StatusEmoji)
}
default:
fmt.Printf("%T\n", ev)
}
}
}
|
package log
import "sync"
// byteArrayPool represents a reusable byte pool. It is a centralized global instance for this package and can be
// accessed by calling log.BytePool(). It is intended to be used by Handlers.
type byteArrayPool struct {
pool *sync.Pool
}
func (p *byteArrayPool) Get() *Buffer {
return p.pool.Get().(*Buffer)
}
func (p *byteArrayPool) Put(buff *Buffer) {
buff.B = buff.B[:0]
p.pool.Put(buff)
}
// Buffer is a mere wrapper for a byte slice. It is intended to be used by Handlers.
type Buffer struct {
B []byte
}
|
package goreq
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"reflect"
)
// RespHandler you can implement some special cases
// TIPS: Usually JsonResp, RawResp and HybridResp handle most situations
type RespHandler interface {
HandleResponse(resp *http.Response, respWrapper Wrapper) error
}
// RawResp use http.Response and []byes to accept response
func RawResp(resp *http.Response, bs *[]byte) *RawRespHandler {
return &RawRespHandler{
resp: resp,
bs: bs,
}
}
// RawRespHandler is a wrapper to implement AgentOp and RespHandler
type RawRespHandler struct {
resp *http.Response
bs *[]byte
}
func (h *RawRespHandler) HandleResponse(resp *http.Response, respWrapper Wrapper) error {
if h.resp != nil {
*h.resp = *resp
}
if h.bs != nil {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("read http body failed: %w", err)
}
*h.bs = body
}
return nil
}
func (h *RawRespHandler) InitialAgent(a *Agent) error {
a.respHandler = h
return nil
}
// HybridResp can handle hybrid response such as Json and Raw
// you can use RespHandlerPredicate to indicate when use which resp handler with Predicate
func HybridResp(predicate ...RespHandlerPredicate) *HybridHandler {
return &HybridHandler{predicates: predicate}
}
type RespHandlerPredicate struct {
Predicate func(response *http.Response) bool
RespHandler RespHandler
}
// RawRespHandler is a wrapper to implement AgentOp and RespHandler
type HybridHandler struct {
predicates []RespHandlerPredicate
}
func (h *HybridHandler) HandleResponse(resp *http.Response, respWrapper Wrapper) error {
for i, p := range h.predicates {
if p.Predicate(resp) {
if err := p.RespHandler.HandleResponse(resp, respWrapper); err != nil {
return fmt.Errorf("hybrid resp handle failed at %d, err: %s", i, err)
}
}
}
return nil
}
func (h *HybridHandler) InitialAgent(a *Agent) error {
a.respHandler = h
return nil
}
// JsonResp use to handler json response, ret must be a ptr
func JsonResp(ret interface{}) *JsonRespHandler {
return &JsonRespHandler{ret: ret}
}
// JsonRespHandler is a wrapper to implement AgentOp and RespHandler
type JsonRespHandler struct {
ret interface{}
}
func (h *JsonRespHandler) HandleResponse(resp *http.Response, respWrapper Wrapper) error {
if respWrapper != nil {
respWrapper.SetData(h.ret)
h.ret = respWrapper
}
// json.Decoder is very well, but it can not get invalid content when unmarshal failed
// so we need to read all body, so can return it when unmarshal failed
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("read body failed: %w", err)
}
if err := json.Unmarshal(body, &h.ret); err != nil {
return fmt.Errorf("unmarshal body failed: %w, body: %s", err, body)
}
if respWrapper != nil {
return respWrapper.Validate()
}
return nil
}
func (h *JsonRespHandler) InitialAgent(a *Agent) error {
if reflect.TypeOf(h.ret).Kind() != reflect.Ptr {
return fmt.Errorf("result payload should be ptr")
}
a.respHandler = h
return nil
}
|
package server
import (
"net/http"
"github.com/cinus-ue/securekit/internal/webapps/fileserver/util"
)
func (h *handler) hsts(w http.ResponseWriter, r *http.Request) (needRedirect bool) {
_, port := util.ExtractHostnamePort(r.Host)
if len(port) > 0 {
return
}
header := w.Header()
header.Set("Strict-Transport-Security", "max-age=31536000")
if r.TLS != nil {
return
}
location := "https://" + r.Host + r.RequestURI
http.Redirect(w, r, location, http.StatusMovedPermanently)
return true
}
func (h *handler) https(w http.ResponseWriter, r *http.Request) (needRedirect bool) {
if r.TLS != nil {
return
}
hostname, _ := util.ExtractHostnamePort(r.Host)
var targetPort string
if len(h.httpsPort) > 0 && h.httpsPort != ":443" {
targetPort = h.httpsPort
}
targetHost := hostname + targetPort
location := "https://" + targetHost + r.RequestURI
http.Redirect(w, r, location, http.StatusMovedPermanently)
return true
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/terror"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/testkit/testutil"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
func TestLengthAndOctetLength(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args interface{}
expected int64
isNil bool
getErr bool
}{
{"abc", 3, false, false},
{"你好", 6, false, false},
{1, 1, false, false},
{3.14, 4, false, false},
{types.NewDecFromFloatForTest(123.123), 7, false, false},
{types.NewTime(types.FromGoTime(time.Now()), mysql.TypeDatetime, 6), 26, false, false},
{types.NewBinaryLiteralFromUint(0x01, -1), 1, false, false},
{types.Set{Value: 1, Name: "abc"}, 3, false, false},
{types.Duration{Duration: 12*time.Hour + 1*time.Minute + 1*time.Second, Fsp: types.DefaultFsp}, 8, false, false},
{nil, 0, true, false},
{errors.New("must error"), 0, false, true},
}
lengthMethods := []string{ast.Length, ast.OctetLength}
for _, lengthMethod := range lengthMethods {
for _, c := range cases {
f, err := newFunctionForTest(ctx, lengthMethod, primitiveValsToConstants(ctx, []interface{}{c.args})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
}
_, err := funcs[ast.Length].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
// Test GBK String
tbl := []struct {
input string
chs string
result int64
}{
{"abc", "gbk", 3},
{"一二三", "gbk", 6},
{"一二三", "", 9},
{"一二三!", "gbk", 7},
{"一二三!", "", 10},
}
for _, lengthMethod := range lengthMethods {
for _, c := range tbl {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, lengthMethod, primitiveValsToConstants(ctx, []interface{}{c.input})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
require.NoError(t, err)
require.Equal(t, c.result, d.GetInt64())
}
}
}
func TestASCII(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args interface{}
expected int64
isNil bool
getErr bool
}{
{"2", 50, false, false},
{2, 50, false, false},
{"23", 50, false, false},
{23, 50, false, false},
{2.3, 50, false, false},
{nil, 0, true, false},
{"", 0, false, false},
{"你好", 228, false, false},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.ASCII, primitiveValsToConstants(ctx, []interface{}{c.args})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
_, err := funcs[ast.Length].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
// Test GBK String
tbl := []struct {
input string
chs string
result int64
}{
{"abc", "gbk", 97},
{"你好", "gbk", 196},
{"你好", "", 228},
{"世界", "gbk", 202},
{"世界", "", 228},
}
for _, c := range tbl {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, ast.ASCII, primitiveValsToConstants(ctx, []interface{}{c.input})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
require.NoError(t, err)
require.Equal(t, c.result, d.GetInt64())
}
}
func TestConcat(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
retType *types.FieldType
}{
{
[]interface{}{nil},
true, false, "",
types.NewFieldTypeBuilder().SetType(mysql.TypeVarString).SetFlag(mysql.BinaryFlag).SetDecimal(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP(),
},
{
[]interface{}{"a", "b",
1, 2,
1.1, 1.2,
types.NewDecFromFloatForTest(1.1),
types.NewTime(types.FromDate(2000, 1, 1, 12, 01, 01, 0), mysql.TypeDatetime, types.DefaultFsp),
types.Duration{
Duration: 12*time.Hour + 1*time.Minute + 1*time.Second,
Fsp: types.DefaultFsp},
},
false, false, "ab121.11.21.12000-01-01 12:01:0112:01:01",
types.NewFieldTypeBuilder().SetType(mysql.TypeVarString).SetFlag(mysql.BinaryFlag).SetFlen(40).SetDecimal(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP(),
},
{
[]interface{}{"a", "b", nil, "c"},
true, false, "",
types.NewFieldTypeBuilder().SetType(mysql.TypeVarString).SetFlag(mysql.BinaryFlag).SetFlen(3).SetDecimal(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP(),
},
{
[]interface{}{errors.New("must error")},
false, true, "",
types.NewFieldTypeBuilder().SetType(mysql.TypeVarString).SetFlag(mysql.BinaryFlag).SetFlen(types.UnspecifiedLength).SetDecimal(types.UnspecifiedLength).SetCharset(charset.CharsetBin).SetCollate(charset.CollationBin).BuildP(),
},
}
fcName := ast.Concat
for _, c := range cases {
f, err := newFunctionForTest(ctx, fcName, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
v, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, v.Kind())
} else {
require.Equal(t, c.res, v.GetString())
}
}
}
}
func TestConcatSig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeVarchar),
types.NewFieldType(mysql.TypeVarchar),
}
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(1000)
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
&Column{Index: 1, RetType: colTypes[1]},
}
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
concat := &builtinConcatSig{base, 5}
cases := []struct {
args []interface{}
warnings int
res string
}{
{[]interface{}{"a", "b"}, 0, "ab"},
{[]interface{}{"aaa", "bbb"}, 1, ""},
{[]interface{}{"中", "a"}, 0, "中a"},
{[]interface{}{"中文", "a"}, 2, ""},
}
for _, c := range cases {
input := chunk.NewChunkWithCapacity(colTypes, 10)
input.AppendString(0, c.args[0].(string))
input.AppendString(1, c.args[1].(string))
res, isNull, err := concat.evalString(input.GetRow(0))
require.Equal(t, c.res, res)
require.NoError(t, err)
if c.warnings == 0 {
require.False(t, isNull)
} else {
require.True(t, isNull)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Len(t, warnings, c.warnings)
lastWarn := warnings[len(warnings)-1]
require.True(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err))
}
}
}
func TestConcatWS(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
expected string
}{
{
[]interface{}{nil, nil},
true, false, "",
},
{
[]interface{}{nil, "a", "b"},
true, false, "",
},
{
[]interface{}{",", "a", "b", "hello", `$^%`},
false, false,
`a,b,hello,$^%`,
},
{
[]interface{}{"|", "a", nil, "b", "c"},
false, false,
"a|b|c",
},
{
[]interface{}{",", "a", ",", "b", "c"},
false, false,
"a,,,b,c",
},
{
[]interface{}{errors.New("must error"), "a", "b"},
false, true, "",
},
{
[]interface{}{",", "a", "b", 1, 2, 1.1, 0.11,
types.NewDecFromFloatForTest(1.1),
types.NewTime(types.FromDate(2000, 1, 1, 12, 01, 01, 0), mysql.TypeDatetime, types.DefaultFsp),
types.Duration{
Duration: 12*time.Hour + 1*time.Minute + 1*time.Second,
Fsp: types.DefaultFsp},
},
false, false, "a,b,1,2,1.1,0.11,1.1,2000-01-01 12:01:01,12:01:01",
},
}
fcName := ast.ConcatWS
// ERROR 1582 (42000): Incorrect parameter count in the call to native function 'concat_ws'
_, err := newFunctionForTest(ctx, fcName, primitiveValsToConstants(ctx, []interface{}{nil})...)
require.Error(t, err)
for _, c := range cases {
f, err := newFunctionForTest(ctx, fcName, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
val, err1 := f.Eval(chunk.Row{})
if c.getErr {
require.NotNil(t, err1)
} else {
require.Nil(t, err1)
if c.isNil {
require.Equal(t, types.KindNull, val.Kind())
} else {
require.Equal(t, c.expected, val.GetString())
}
}
}
_, err = funcs[ast.ConcatWS].getFunction(ctx, primitiveValsToConstants(ctx, []interface{}{nil, nil}))
require.NoError(t, err)
}
func TestConcatWSSig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeVarchar),
types.NewFieldType(mysql.TypeVarchar),
types.NewFieldType(mysql.TypeVarchar),
}
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(1000)
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
&Column{Index: 1, RetType: colTypes[1]},
&Column{Index: 2, RetType: colTypes[2]},
}
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
concat := &builtinConcatWSSig{base, 6}
cases := []struct {
args []interface{}
warnings int
res string
}{
{[]interface{}{",", "a", "b"}, 0, "a,b"},
{[]interface{}{",", "aaa", "bbb"}, 1, ""},
{[]interface{}{",", "中", "a"}, 0, "中,a"},
{[]interface{}{",", "中文", "a"}, 2, ""},
}
for _, c := range cases {
input := chunk.NewChunkWithCapacity(colTypes, 10)
input.AppendString(0, c.args[0].(string))
input.AppendString(1, c.args[1].(string))
input.AppendString(2, c.args[2].(string))
res, isNull, err := concat.evalString(input.GetRow(0))
require.Equal(t, c.res, res)
require.NoError(t, err)
if c.warnings == 0 {
require.False(t, isNull)
} else {
require.True(t, isNull)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Len(t, warnings, c.warnings)
lastWarn := warnings[len(warnings)-1]
require.True(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err))
}
}
}
func TestLeft(t *testing.T) {
ctx := createContext(t)
stmtCtx := ctx.GetSessionVars().StmtCtx
origin := stmtCtx.IgnoreTruncate.Load()
stmtCtx.IgnoreTruncate.Store(true)
defer func() {
stmtCtx.IgnoreTruncate.Store(origin)
}()
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
}{
{[]interface{}{"abcde", 3}, false, false, "abc"},
{[]interface{}{"abcde", 0}, false, false, ""},
{[]interface{}{"abcde", 1.2}, false, false, "a"},
{[]interface{}{"abcde", 1.9}, false, false, "ab"},
{[]interface{}{"abcde", -1}, false, false, ""},
{[]interface{}{"abcde", 100}, false, false, "abcde"},
{[]interface{}{"abcde", nil}, true, false, ""},
{[]interface{}{nil, 3}, true, false, ""},
{[]interface{}{"abcde", "3"}, false, false, "abc"},
{[]interface{}{"abcde", "a"}, false, false, ""},
{[]interface{}{1234, 3}, false, false, "123"},
{[]interface{}{12.34, 3}, false, false, "12."},
{[]interface{}{types.NewBinaryLiteralFromUint(0x0102, -1), 1}, false, false, string([]byte{0x01})},
{[]interface{}{errors.New("must err"), 0}, false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Left, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
v, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, v.Kind())
} else {
require.Equal(t, c.res, v.GetString())
}
}
}
_, err := funcs[ast.Left].getFunction(ctx, []Expression{getVarcharCon(), getInt8Con()})
require.NoError(t, err)
}
func TestRight(t *testing.T) {
ctx := createContext(t)
stmtCtx := ctx.GetSessionVars().StmtCtx
origin := stmtCtx.IgnoreTruncate.Load()
stmtCtx.IgnoreTruncate.Store(true)
defer func() {
stmtCtx.IgnoreTruncate.Store(origin)
}()
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
}{
{[]interface{}{"abcde", 3}, false, false, "cde"},
{[]interface{}{"abcde", 0}, false, false, ""},
{[]interface{}{"abcde", 1.2}, false, false, "e"},
{[]interface{}{"abcde", 1.9}, false, false, "de"},
{[]interface{}{"abcde", -1}, false, false, ""},
{[]interface{}{"abcde", 100}, false, false, "abcde"},
{[]interface{}{"abcde", nil}, true, false, ""},
{[]interface{}{nil, 1}, true, false, ""},
{[]interface{}{"abcde", "3"}, false, false, "cde"},
{[]interface{}{"abcde", "a"}, false, false, ""},
{[]interface{}{1234, 3}, false, false, "234"},
{[]interface{}{12.34, 3}, false, false, ".34"},
{[]interface{}{types.NewBinaryLiteralFromUint(0x0102, -1), 1}, false, false, string([]byte{0x02})},
{[]interface{}{errors.New("must err"), 0}, false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Right, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
v, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, v.Kind())
} else {
require.Equal(t, c.res, v.GetString())
}
}
}
_, err := funcs[ast.Right].getFunction(ctx, []Expression{getVarcharCon(), getInt8Con()})
require.NoError(t, err)
}
func TestRepeat(t *testing.T) {
ctx := createContext(t)
args := []interface{}{"a", int64(2)}
fc := funcs[ast.Repeat]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(args...)))
require.NoError(t, err)
v, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equal(t, "aa", v.GetString())
args = []interface{}{"a", uint64(2)}
f, err = fc.getFunction(ctx, datumsToConstants(types.MakeDatums(args...)))
require.NoError(t, err)
v, err = evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equal(t, "aa", v.GetString())
args = []interface{}{"a", uint64(16777217)}
f, err = fc.getFunction(ctx, datumsToConstants(types.MakeDatums(args...)))
require.NoError(t, err)
v, err = evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.False(t, v.IsNull())
args = []interface{}{"a", uint64(16777216)}
f, err = fc.getFunction(ctx, datumsToConstants(types.MakeDatums(args...)))
require.NoError(t, err)
v, err = evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.False(t, v.IsNull())
args = []interface{}{"a", int64(-1)}
f, err = fc.getFunction(ctx, datumsToConstants(types.MakeDatums(args...)))
require.NoError(t, err)
v, err = evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equal(t, "", v.GetString())
args = []interface{}{"a", int64(0)}
f, err = fc.getFunction(ctx, datumsToConstants(types.MakeDatums(args...)))
require.NoError(t, err)
v, err = evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equal(t, "", v.GetString())
args = []interface{}{"a", uint64(0)}
f, err = fc.getFunction(ctx, datumsToConstants(types.MakeDatums(args...)))
require.NoError(t, err)
v, err = evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equal(t, "", v.GetString())
}
func TestRepeatSig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeVarchar),
types.NewFieldType(mysql.TypeLonglong),
}
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(1000)
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
&Column{Index: 1, RetType: colTypes[1]},
}
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
repeat := &builtinRepeatSig{base, 1000}
cases := []struct {
args []interface{}
warning int
res string
}{
{[]interface{}{"a", int64(6)}, 0, "aaaaaa"},
{[]interface{}{"a", int64(10001)}, 1, ""},
{[]interface{}{"毅", int64(6)}, 0, "毅毅毅毅毅毅"},
{[]interface{}{"毅", int64(334)}, 2, ""},
}
for _, c := range cases {
input := chunk.NewChunkWithCapacity(colTypes, 10)
input.AppendString(0, c.args[0].(string))
input.AppendInt64(1, c.args[1].(int64))
res, isNull, err := repeat.evalString(input.GetRow(0))
require.Equal(t, c.res, res)
require.NoError(t, err)
if c.warning == 0 {
require.False(t, isNull)
} else {
require.True(t, isNull)
require.NoError(t, err)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Len(t, warnings, c.warning)
lastWarn := warnings[len(warnings)-1]
require.True(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err))
}
}
}
func TestLower(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
}{
{[]interface{}{nil}, true, false, ""},
{[]interface{}{"ab"}, false, false, "ab"},
{[]interface{}{1}, false, false, "1"},
{[]interface{}{"one week’s time TEST"}, false, false, "one week’s time test"},
{[]interface{}{"one week's time TEST"}, false, false, "one week's time test"},
{[]interface{}{"ABC测试DEF"}, false, false, "abc测试def"},
{[]interface{}{"ABCテストDEF"}, false, false, "abcテストdef"},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Lower, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
v, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, v.Kind())
} else {
require.Equal(t, c.res, v.GetString())
}
}
}
_, err := funcs[ast.Lower].getFunction(ctx, []Expression{getVarcharCon()})
require.NoError(t, err)
// Test GBK String
tbl := []struct {
input string
chs string
result string
}{
{"ABC", "gbk", "abc"},
{"一二三", "gbk", "一二三"},
{"àáèéêìíòóùúüāēěīńňōūǎǐǒǔǖǘǚǜⅪⅫ", "gbk", "àáèéêìíòóùúüāēěīńňōūǎǐǒǔǖǘǚǜⅪⅫ"},
{"àáèéêìíòóùúüāēěīńňōūǎǐǒǔǖǘǚǜⅪⅫ", "", "àáèéêìíòóùúüāēěīńňōūǎǐǒǔǖǘǚǜⅺⅻ"},
}
for _, c := range tbl {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, ast.Lower, primitiveValsToConstants(ctx, []interface{}{c.input})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
require.NoError(t, err)
require.Equal(t, c.result, d.GetString())
}
}
func TestUpper(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
}{
{[]interface{}{nil}, true, false, ""},
{[]interface{}{"ab"}, false, false, "ab"},
{[]interface{}{1}, false, false, "1"},
{[]interface{}{"one week’s time TEST"}, false, false, "ONE WEEK’S TIME TEST"},
{[]interface{}{"one week's time TEST"}, false, false, "ONE WEEK'S TIME TEST"},
{[]interface{}{"abc测试def"}, false, false, "ABC测试DEF"},
{[]interface{}{"abcテストdef"}, false, false, "ABCテストDEF"},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Upper, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
v, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, v.Kind())
} else {
require.Equal(t, strings.ToUpper(c.res), v.GetString())
}
}
}
_, err := funcs[ast.Upper].getFunction(ctx, []Expression{getVarcharCon()})
require.NoError(t, err)
// Test GBK String
tbl := []struct {
input string
chs string
result string
}{
{"abc", "gbk", "ABC"},
{"一二三", "gbk", "一二三"},
{"àbc", "gbk", "àBC"},
{"àáèéêìíòóùúüāēěīńňōūǎǐǒǔǖǘǚǜⅪⅫ", "gbk", "àáèéêìíòóùúüāēěīńňōūǎǐǒǔǖǘǚǜⅪⅫ"},
{"àáèéêìíòóùúüāēěīńňōūǎǐǒǔǖǘǚǜⅪⅫ", "", "ÀÁÈÉÊÌÍÒÓÙÚÜĀĒĚĪŃŇŌŪǍǏǑǓǕǗǙǛⅪⅫ"},
}
for _, c := range tbl {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, ast.Upper, primitiveValsToConstants(ctx, []interface{}{c.input})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
require.NoError(t, err)
require.Equal(t, c.result, d.GetString())
}
}
func TestReverse(t *testing.T) {
ctx := createContext(t)
fc := funcs[ast.Reverse]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(nil)))
require.NoError(t, err)
d, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equal(t, types.KindNull, d.Kind())
tbl := []struct {
Input interface{}
Expect string
}{
{"abc", "cba"},
{"LIKE", "EKIL"},
{123, "321"},
{"", ""},
}
dtbl := tblToDtbl(tbl)
for _, c := range dtbl {
f, err = fc.getFunction(ctx, datumsToConstants(c["Input"]))
require.NoError(t, err)
require.NotNil(t, f)
d, err = evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, c["Expect"][0], d)
}
}
func TestStrcmp(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res int64
}{
{[]interface{}{"123", "123"}, false, false, 0},
{[]interface{}{"123", "1"}, false, false, 1},
{[]interface{}{"1", "123"}, false, false, -1},
{[]interface{}{"123", "45"}, false, false, -1},
{[]interface{}{123, "123"}, false, false, 0},
{[]interface{}{"12.34", 12.34}, false, false, 0},
{[]interface{}{nil, "123"}, true, false, 0},
{[]interface{}{"123", nil}, true, false, 0},
{[]interface{}{"", "123"}, false, false, -1},
{[]interface{}{"123", ""}, false, false, 1},
{[]interface{}{"", ""}, false, false, 0},
{[]interface{}{"", nil}, true, false, 0},
{[]interface{}{nil, ""}, true, false, 0},
{[]interface{}{nil, nil}, true, false, 0},
{[]interface{}{"123", errors.New("must err")}, false, true, 0},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Strcmp, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetInt64())
}
}
}
}
func TestReplace(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
flen int
}{
{[]interface{}{"www.mysql.com", "mysql", "pingcap"}, false, false, "www.pingcap.com", 17},
{[]interface{}{"www.mysql.com", "w", 1}, false, false, "111.mysql.com", 260},
{[]interface{}{1234, 2, 55}, false, false, "15534", 20},
{[]interface{}{"", "a", "b"}, false, false, "", 0},
{[]interface{}{"abc", "", "d"}, false, false, "abc", 3},
{[]interface{}{"aaa", "a", ""}, false, false, "", 3},
{[]interface{}{nil, "a", "b"}, true, false, "", 0},
{[]interface{}{"a", nil, "b"}, true, false, "", 1},
{[]interface{}{"a", "b", nil}, true, false, "", 1},
{[]interface{}{errors.New("must err"), "a", "b"}, false, true, "", -1},
}
for i, c := range cases {
f, err := newFunctionForTest(ctx, ast.Replace, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
require.Equalf(t, c.flen, f.GetType().GetFlen(), "test %v", i)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equalf(t, types.KindNull, d.Kind(), "test %v", i)
} else {
require.Equalf(t, c.res, d.GetString(), "test %v", i)
}
}
}
_, err := funcs[ast.Replace].getFunction(ctx, []Expression{NewZero(), NewZero(), NewZero()})
require.NoError(t, err)
}
func TestSubstring(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
}{
{[]interface{}{"Quadratically", 5}, false, false, "ratically"},
{[]interface{}{"Sakila", 1}, false, false, "Sakila"},
{[]interface{}{"Sakila", 2}, false, false, "akila"},
{[]interface{}{"Sakila", -3}, false, false, "ila"},
{[]interface{}{"Sakila", 0}, false, false, ""},
{[]interface{}{"Sakila", 100}, false, false, ""},
{[]interface{}{"Sakila", -100}, false, false, ""},
{[]interface{}{"Quadratically", 5, 6}, false, false, "ratica"},
{[]interface{}{"Sakila", -5, 3}, false, false, "aki"},
{[]interface{}{"Sakila", 2, 0}, false, false, ""},
{[]interface{}{"Sakila", 2, -1}, false, false, ""},
{[]interface{}{"Sakila", 2, 100}, false, false, "akila"},
{[]interface{}{nil, 2, 3}, true, false, ""},
{[]interface{}{"Sakila", nil, 3}, true, false, ""},
{[]interface{}{"Sakila", 2, nil}, true, false, ""},
{[]interface{}{errors.New("must error"), 2, 3}, false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Substring, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.Substring].getFunction(ctx, []Expression{NewZero(), NewZero(), NewZero()})
require.NoError(t, err)
_, err = funcs[ast.Substring].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
}
func TestConvert(t *testing.T) {
ctx := createContext(t)
tbl := []struct {
str interface{}
cs string
result string
hasBinaryFlag bool
}{
{"haha", "utf8", "haha", false},
{"haha", "ascii", "haha", false},
{"haha", "binary", "haha", true},
{"haha", "bInAry", "haha", true},
{types.NewBinaryLiteralFromUint(0x7e, -1), "BiNarY", "~", true},
{types.NewBinaryLiteralFromUint(0xe4b8ade696870a, -1), "uTf8", "中文\n", false},
}
for _, v := range tbl {
fc := funcs[ast.Convert]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(v.str, v.cs)))
require.NoError(t, err)
require.NotNil(t, f)
retType := f.getRetTp()
require.Equal(t, strings.ToLower(v.cs), retType.GetCharset())
collate, err := charset.GetDefaultCollation(strings.ToLower(v.cs))
require.NoError(t, err)
require.Equal(t, collate, retType.GetCollate())
require.Equal(t, v.hasBinaryFlag, mysql.HasBinaryFlag(retType.GetFlag()))
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equal(t, types.KindString, r.Kind())
require.Equal(t, v.result, r.GetString())
}
// Test case for getFunction() error
errTbl := []struct {
str interface{}
cs string
err string
}{
{"haha", "wrongcharset", "[expression:1115]Unknown character set: 'wrongcharset'"},
{"haha", "cp866", "[expression:1115]Unknown character set: 'cp866'"},
}
for _, v := range errTbl {
fc := funcs[ast.Convert]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(v.str, v.cs)))
require.Equal(t, v.err, err.Error())
require.Nil(t, f)
}
// Test wrong charset while evaluating.
fc := funcs[ast.Convert]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums("haha", "utf8")))
require.NoError(t, err)
require.NotNil(t, f)
wrongFunction := f.(*builtinConvertSig)
wrongFunction.tp.SetCharset("wrongcharset")
_, err = evalBuiltinFunc(wrongFunction, chunk.Row{})
require.Error(t, err)
require.Equal(t, "[expression:1115]Unknown character set: 'wrongcharset'", err.Error())
}
func TestSubstringIndex(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
}{
{[]interface{}{"www.pingcap.com", ".", 2}, false, false, "www.pingcap"},
{[]interface{}{"www.pingcap.com", ".", -2}, false, false, "pingcap.com"},
{[]interface{}{"www.pingcap.com", ".", 0}, false, false, ""},
{[]interface{}{"www.pingcap.com", ".", 100}, false, false, "www.pingcap.com"},
{[]interface{}{"www.pingcap.com", ".", -100}, false, false, "www.pingcap.com"},
{[]interface{}{"www.pingcap.com", "d", 0}, false, false, ""},
{[]interface{}{"www.pingcap.com", "d", 1}, false, false, "www.pingcap.com"},
{[]interface{}{"www.pingcap.com", "d", -1}, false, false, "www.pingcap.com"},
{[]interface{}{"www.pingcap.com", "", 0}, false, false, ""},
{[]interface{}{"www.pingcap.com", "", 1}, false, false, ""},
{[]interface{}{"www.pingcap.com", "", -1}, false, false, ""},
{[]interface{}{"", ".", 0}, false, false, ""},
{[]interface{}{"", ".", 1}, false, false, ""},
{[]interface{}{"", ".", -1}, false, false, ""},
{[]interface{}{nil, ".", 1}, true, false, ""},
{[]interface{}{"www.pingcap.com", nil, 1}, true, false, ""},
{[]interface{}{"www.pingcap.com", ".", nil}, true, false, ""},
{[]interface{}{errors.New("must error"), ".", 1}, false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.SubstringIndex, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.SubstringIndex].getFunction(ctx, []Expression{NewZero(), NewZero(), NewZero()})
require.NoError(t, err)
}
func TestSpace(t *testing.T) {
ctx := createContext(t)
stmtCtx := ctx.GetSessionVars().StmtCtx
origin := stmtCtx.IgnoreTruncate.Load()
stmtCtx.IgnoreTruncate.Store(true)
defer func() {
stmtCtx.IgnoreTruncate.Store(origin)
}()
cases := []struct {
arg interface{}
isNil bool
getErr bool
res string
}{
{0, false, false, ""},
{3, false, false, " "},
{mysql.MaxBlobWidth + 1, true, false, ""},
{-1, false, false, ""},
{"abc", false, false, ""},
{"3", false, false, " "},
{1.2, false, false, " "},
{1.9, false, false, " "},
{nil, true, false, ""},
{errors.New("must error"), false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Space, primitiveValsToConstants(ctx, []interface{}{c.arg})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.Space].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestSpaceSig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeLonglong),
}
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(1000)
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
}
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
space := &builtinSpaceSig{base, 1000}
input := chunk.NewChunkWithCapacity(colTypes, 10)
input.AppendInt64(0, 6)
input.AppendInt64(0, 1001)
res, isNull, err := space.evalString(input.GetRow(0))
require.Equal(t, " ", res)
require.False(t, isNull)
require.NoError(t, err)
res, isNull, err = space.evalString(input.GetRow(1))
require.Equal(t, "", res)
require.True(t, isNull)
require.NoError(t, err)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Equal(t, 1, len(warnings))
lastWarn := warnings[len(warnings)-1]
require.True(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err))
}
func TestLocate(t *testing.T) {
ctx := createContext(t)
// 1. Test LOCATE without binary input.
tbl := []struct {
Args []interface{}
Want interface{}
}{
{[]interface{}{"bar", "foobarbar"}, 4},
{[]interface{}{"xbar", "foobar"}, 0},
{[]interface{}{"", "foobar"}, 1},
{[]interface{}{"foobar", ""}, 0},
{[]interface{}{"", ""}, 1},
{[]interface{}{"好世", "你好世界"}, 2},
{[]interface{}{"界面", "你好世界"}, 0},
{[]interface{}{"b", "中a英b文"}, 4},
{[]interface{}{"bAr", "foobArbar"}, 4},
{[]interface{}{nil, "foobar"}, nil},
{[]interface{}{"bar", nil}, nil},
{[]interface{}{"bar", "foobarbar", 5}, 7},
{[]interface{}{"xbar", "foobar", 1}, 0},
{[]interface{}{"", "foobar", 2}, 2},
{[]interface{}{"foobar", "", 1}, 0},
{[]interface{}{"", "", 2}, 0},
{[]interface{}{"A", "大A写的A", 0}, 0},
{[]interface{}{"A", "大A写的A", 1}, 2},
{[]interface{}{"A", "大A写的A", 2}, 2},
{[]interface{}{"A", "大A写的A", 3}, 5},
{[]interface{}{"BaR", "foobarBaR", 5}, 7},
{[]interface{}{nil, nil}, nil},
{[]interface{}{"", nil}, nil},
{[]interface{}{nil, ""}, nil},
{[]interface{}{nil, nil, 1}, nil},
{[]interface{}{"", nil, 1}, nil},
{[]interface{}{nil, "", 1}, nil},
{[]interface{}{"foo", nil, -1}, nil},
{[]interface{}{nil, "bar", 0}, nil},
}
Dtbl := tblToDtbl(tbl)
instr := funcs[ast.Locate]
for i, c := range Dtbl {
f, err := instr.getFunction(ctx, datumsToConstants(c["Args"]))
require.NoError(t, err)
got, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.NotNil(t, f)
require.Equalf(t, c["Want"][0], got, "[%d]: args: %v", i, c["Args"])
}
// 2. Test LOCATE with binary input
tbl2 := []struct {
Args []interface{}
Want interface{}
}{
{[]interface{}{[]byte("BaR"), "foobArbar"}, 0},
{[]interface{}{"BaR", []byte("foobArbar")}, 0},
{[]interface{}{[]byte("bAr"), "foobarBaR", 5}, 0},
{[]interface{}{"bAr", []byte("foobarBaR"), 5}, 0},
{[]interface{}{"bAr", []byte("foobarbAr"), 5}, 7},
}
Dtbl2 := tblToDtbl(tbl2)
for i, c := range Dtbl2 {
exprs := datumsToConstants(c["Args"])
types.SetBinChsClnFlag(exprs[0].GetType())
types.SetBinChsClnFlag(exprs[1].GetType())
f, err := instr.getFunction(ctx, exprs)
require.NoError(t, err)
got, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.NotNil(t, f)
require.Equalf(t, c["Want"][0], got, "[%d]: args: %v", i, c["Args"])
}
}
func TestTrim(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
getErr bool
res string
}{
{[]interface{}{" bar "}, false, false, "bar"},
{[]interface{}{"\t bar \n"}, false, false, "\t bar \n"},
{[]interface{}{"\r bar \t"}, false, false, "\r bar \t"},
{[]interface{}{" \tbar\n "}, false, false, "\tbar\n"},
{[]interface{}{""}, false, false, ""},
{[]interface{}{nil}, true, false, ""},
{[]interface{}{"xxxbarxxx", "x"}, false, false, "bar"},
{[]interface{}{"bar", "x"}, false, false, "bar"},
{[]interface{}{" bar ", ""}, false, false, " bar "},
{[]interface{}{"", "x"}, false, false, ""},
{[]interface{}{"bar", nil}, true, false, ""},
{[]interface{}{nil, "x"}, true, false, ""},
{[]interface{}{"xxxbarxxx", "x", int(ast.TrimLeading)}, false, false, "barxxx"},
{[]interface{}{"barxxyz", "xyz", int(ast.TrimTrailing)}, false, false, "barx"},
{[]interface{}{"xxxbarxxx", "x", int(ast.TrimBoth)}, false, false, "bar"},
{[]interface{}{"bar", nil, int(ast.TrimLeading)}, true, false, ""},
{[]interface{}{errors.New("must error")}, false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Trim, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.Trim].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
_, err = funcs[ast.Trim].getFunction(ctx, []Expression{NewZero(), NewZero()})
require.NoError(t, err)
_, err = funcs[ast.Trim].getFunction(ctx, []Expression{NewZero(), NewZero(), NewZero()})
require.NoError(t, err)
}
func TestLTrim(t *testing.T) {
ctx := createContext(t)
cases := []struct {
arg interface{}
isNil bool
getErr bool
res string
}{
{" bar ", false, false, "bar "},
{"\t bar ", false, false, "\t bar "},
{" \tbar ", false, false, "\tbar "},
{"\t bar ", false, false, "\t bar "},
{" \tbar ", false, false, "\tbar "},
{"\r bar ", false, false, "\r bar "},
{" \rbar ", false, false, "\rbar "},
{"\n bar ", false, false, "\n bar "},
{" \nbar ", false, false, "\nbar "},
{"bar", false, false, "bar"},
{"", false, false, ""},
{nil, true, false, ""},
{errors.New("must error"), false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.LTrim, primitiveValsToConstants(ctx, []interface{}{c.arg})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.LTrim].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestRTrim(t *testing.T) {
ctx := createContext(t)
cases := []struct {
arg interface{}
isNil bool
getErr bool
res string
}{
{" bar ", false, false, " bar"},
{"bar", false, false, "bar"},
{"bar \n", false, false, "bar \n"},
{"bar\n ", false, false, "bar\n"},
{"bar \r", false, false, "bar \r"},
{"bar\r ", false, false, "bar\r"},
{"bar \t", false, false, "bar \t"},
{"bar\t ", false, false, "bar\t"},
{"", false, false, ""},
{nil, true, false, ""},
{errors.New("must error"), false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.RTrim, primitiveValsToConstants(ctx, []interface{}{c.arg})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.RTrim].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestHexFunc(t *testing.T) {
ctx := createContext(t)
cases := []struct {
arg interface{}
isNil bool
getErr bool
res string
}{
{"abc", false, false, "616263"},
{"你好", false, false, "E4BDA0E5A5BD"},
{12, false, false, "C"},
{12.3, false, false, "C"},
{12.8, false, false, "D"},
{-1, false, false, "FFFFFFFFFFFFFFFF"},
{-12.3, false, false, "FFFFFFFFFFFFFFF4"},
{-12.8, false, false, "FFFFFFFFFFFFFFF3"},
{types.NewBinaryLiteralFromUint(0xC, -1), false, false, "0C"},
{0x12, false, false, "12"},
{nil, true, false, ""},
{errors.New("must err"), false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Hex, primitiveValsToConstants(ctx, []interface{}{c.arg})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
strCases := []struct {
arg string
chs string
res string
errCode int
}{
{"你好", "", "E4BDA0E5A5BD", 0},
{"你好", "gbk", "C4E3BAC3", 0},
{"一忒(๑•ㅂ•)و✧", "", "E4B880E5BF9228E0B991E280A2E38582E280A229D988E29CA7", 0},
{"一忒(๑•ㅂ•)و✧", "gbk", "", errno.ErrInvalidCharacterString},
}
for _, c := range strCases {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, ast.Hex, primitiveValsToConstants(ctx, []interface{}{c.arg})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.errCode != 0 {
require.Error(t, err)
require.True(t, strings.Contains(err.Error(), strconv.Itoa(c.errCode)))
} else {
require.NoError(t, err)
require.Equal(t, c.res, d.GetString())
}
}
_, err := funcs[ast.Hex].getFunction(ctx, []Expression{getInt8Con()})
require.NoError(t, err)
_, err = funcs[ast.Hex].getFunction(ctx, []Expression{getVarcharCon()})
require.NoError(t, err)
}
func TestUnhexFunc(t *testing.T) {
ctx := createContext(t)
cases := []struct {
arg interface{}
isNil bool
getErr bool
res string
}{
{"4D7953514C", false, false, "MySQL"},
{"1267", false, false, string([]byte{0x12, 0x67})},
{"126", false, false, string([]byte{0x01, 0x26})},
{"", false, false, ""},
{1267, false, false, string([]byte{0x12, 0x67})},
{126, false, false, string([]byte{0x01, 0x26})},
{1267.3, true, false, ""},
{"string", true, false, ""},
{"你好", true, false, ""},
{nil, true, false, ""},
{errors.New("must error"), false, true, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Unhex, primitiveValsToConstants(ctx, []interface{}{c.arg})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.Unhex].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestBitLength(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args interface{}
chs string
expected int64
isNil bool
getErr bool
}{
{"hi", "", 16, false, false},
{"你好", "", 48, false, false},
{"", "", 0, false, false},
{"abc", "gbk", 24, false, false},
{"一二三", "gbk", 48, false, false},
{"一二三", "", 72, false, false},
{"一二三!", "gbk", 56, false, false},
{"一二三!", "", 80, false, false},
}
for _, c := range cases {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, ast.BitLength, primitiveValsToConstants(ctx, []interface{}{c.args})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
_, err := funcs[ast.BitLength].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestChar(t *testing.T) {
ctx := createContext(t)
ctx.GetSessionVars().StmtCtx.IgnoreTruncate.Store(true)
tbl := []struct {
str string
iNum int64
fNum float64
charset interface{}
result interface{}
warnings int
}{
{"65", 66, 67.5, "utf8", "ABD", 0}, // float
{"65", 16740, 67.5, "utf8", "AAdD", 0}, // large num
{"65", -1, 67.5, nil, "A\xff\xff\xff\xffD", 0}, // negative int
{"a", -1, 67.5, nil, "\x00\xff\xff\xff\xffD", 0}, // invalid 'a'
{"65", -1, 67.5, "utf8", nil, 1}, // with utf8, return nil
{"a", -1, 67.5, "utf8", nil, 1}, // with utf8, return nil
{"1234567", 1234567, 1234567, "gbk", "\u0012謬\u0012謬\u0012謬", 0}, // test char for gbk
{"123456789", 123456789, 123456789, "gbk", nil, 1}, // invalid 123456789 in gbk
}
run := func(i int, result interface{}, warnCnt int, dts ...interface{}) {
fc := funcs[ast.CharFunc]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(dts...)))
require.NoError(t, err, i)
require.NotNil(t, f, i)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err, i)
testutil.DatumEqual(t, types.NewDatum(result), r, i)
if warnCnt != 0 {
warnings := ctx.GetSessionVars().StmtCtx.TruncateWarnings(0)
require.Equal(t, warnCnt, len(warnings), fmt.Sprintf("%d: %v", i, warnings))
}
}
for i, v := range tbl {
run(i, v.result, v.warnings, v.str, v.iNum, v.fNum, v.charset)
}
// char() returns null only when the sql_mode is strict.
ctx.GetSessionVars().StrictSQLMode = true
run(-1, nil, 1, 123456, "utf8")
ctx.GetSessionVars().StrictSQLMode = false
run(-2, string([]byte{1}), 1, 123456, "utf8")
}
func TestCharLength(t *testing.T) {
ctx := createContext(t)
tbl := []struct {
input interface{}
result interface{}
}{
{"33", 2}, // string
{"你好", 2}, // mb string
{33, 2}, // int
{3.14, 4}, // float
{nil, nil}, // nil
}
for _, v := range tbl {
fc := funcs[ast.CharLength]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(v.input)))
require.NoError(t, err)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(v.result), r)
}
// Test binary string
tbl = []struct {
input interface{}
result interface{}
}{
{"33", 2}, // string
{"你好", 6}, // mb string
{"CAFÉ", 5}, // mb string
{"", 0}, // mb string
{nil, nil}, // nil
}
for _, v := range tbl {
fc := funcs[ast.CharLength]
arg := datumsToConstants(types.MakeDatums(v.input))
tp := arg[0].GetType()
tp.SetType(mysql.TypeVarString)
tp.SetCharset(charset.CharsetBin)
tp.SetCollate(charset.CollationBin)
tp.SetFlen(types.UnspecifiedLength)
tp.SetFlag(mysql.BinaryFlag)
f, err := fc.getFunction(ctx, arg)
require.NoError(t, err)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(v.result), r)
}
}
func TestFindInSet(t *testing.T) {
ctx := createContext(t)
for _, c := range []struct {
str interface{}
strlst interface{}
ret interface{}
}{
{"foo", "foo,bar", 1},
{"foo", "foobar,bar", 0},
{" foo ", "foo, foo ", 2},
{"", "foo,bar,", 3},
{"", "", 0},
{1, 1, 1},
{1, "1", 1},
{"1", 1, 1},
{"a,b", "a,b,c", 0},
{"foo", nil, nil},
{nil, "bar", nil},
} {
fc := funcs[ast.FindInSet]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(c.str, c.strlst)))
require.NoError(t, err)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(c.ret), r, fmt.Sprintf("FindInSet(%s, %s)", c.str, c.strlst))
}
}
func TestField(t *testing.T) {
ctx := createContext(t)
stmtCtx := ctx.GetSessionVars().StmtCtx
origin := stmtCtx.IgnoreTruncate.Load()
stmtCtx.IgnoreTruncate.Store(true)
defer func() {
stmtCtx.IgnoreTruncate.Store(origin)
}()
tbl := []struct {
argLst []interface{}
ret interface{}
}{
{[]interface{}{"ej", "Hej", "ej", "Heja", "hej", "foo"}, int64(2)},
{[]interface{}{"fo", "Hej", "ej", "Heja", "hej", "foo"}, int64(0)},
{[]interface{}{"ej", "Hej", "ej", "Heja", "ej", "hej", "foo"}, int64(2)},
{[]interface{}{1, 2, 3, 11, 1}, int64(4)},
{[]interface{}{nil, 2, 3, 11, 1}, int64(0)},
{[]interface{}{1.1, 2.1, 3.1, 11.1, 1.1}, int64(4)},
{[]interface{}{1.1, "2.1", "3.1", "11.1", "1.1"}, int64(4)},
{[]interface{}{"1.1a", 2.1, 3.1, 11.1, 1.1}, int64(4)},
{[]interface{}{1.10, 0, 11e-1}, int64(2)},
{[]interface{}{"abc", 0, 1, 11.1, 1.1}, int64(1)},
}
for _, c := range tbl {
fc := funcs[ast.Field]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(c.argLst...)))
require.NoError(t, err)
require.NotNil(t, f)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(c.ret), r)
}
}
func TestLpad(t *testing.T) {
ctx := createContext(t)
tests := []struct {
str string
len int64
padStr string
expect interface{}
}{
{"hi", 5, "?", "???hi"},
{"hi", 1, "?", "h"},
{"hi", 0, "?", ""},
{"hi", -1, "?", nil},
{"hi", 1, "", "h"},
{"hi", 5, "", nil},
{"hi", 5, "ab", "abahi"},
{"hi", 6, "ab", "ababhi"},
}
fc := funcs[ast.Lpad]
for _, test := range tests {
str := types.NewStringDatum(test.str)
length := types.NewIntDatum(test.len)
padStr := types.NewStringDatum(test.padStr)
f, err := fc.getFunction(ctx, datumsToConstants([]types.Datum{str, length, padStr}))
require.NoError(t, err)
require.NotNil(t, f)
result, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
if test.expect == nil {
require.Equal(t, types.KindNull, result.Kind())
} else {
expect, _ := test.expect.(string)
require.Equal(t, expect, result.GetString())
}
}
}
func TestRpad(t *testing.T) {
ctx := createContext(t)
tests := []struct {
str string
len int64
padStr string
expect interface{}
}{
{"hi", 5, "?", "hi???"},
{"hi", 1, "?", "h"},
{"hi", 0, "?", ""},
{"hi", -1, "?", nil},
{"hi", 1, "", "h"},
{"hi", 5, "", nil},
{"hi", 5, "ab", "hiaba"},
{"hi", 6, "ab", "hiabab"},
}
fc := funcs[ast.Rpad]
for _, test := range tests {
str := types.NewStringDatum(test.str)
length := types.NewIntDatum(test.len)
padStr := types.NewStringDatum(test.padStr)
f, err := fc.getFunction(ctx, datumsToConstants([]types.Datum{str, length, padStr}))
require.NoError(t, err)
require.NotNil(t, f)
result, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
if test.expect == nil {
require.Equal(t, types.KindNull, result.Kind())
} else {
expect, _ := test.expect.(string)
require.Equal(t, expect, result.GetString())
}
}
}
func TestRpadSig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeVarchar),
types.NewFieldType(mysql.TypeLonglong),
types.NewFieldType(mysql.TypeVarchar),
}
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(1000)
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
&Column{Index: 1, RetType: colTypes[1]},
&Column{Index: 2, RetType: colTypes[2]},
}
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
rpad := &builtinRpadUTF8Sig{base, 1000}
input := chunk.NewChunkWithCapacity(colTypes, 10)
input.AppendString(0, "abc")
input.AppendString(0, "abc")
input.AppendInt64(1, 6)
input.AppendInt64(1, 10000)
input.AppendString(2, "123")
input.AppendString(2, "123")
res, isNull, err := rpad.evalString(input.GetRow(0))
require.Equal(t, "abc123", res)
require.False(t, isNull)
require.NoError(t, err)
res, isNull, err = rpad.evalString(input.GetRow(1))
require.Equal(t, "", res)
require.True(t, isNull)
require.NoError(t, err)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Equal(t, 1, len(warnings))
lastWarn := warnings[len(warnings)-1]
require.Truef(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err), "err %v", lastWarn.Err)
}
func TestInsertBinarySig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeVarchar),
types.NewFieldType(mysql.TypeLonglong),
types.NewFieldType(mysql.TypeLonglong),
types.NewFieldType(mysql.TypeVarchar),
}
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(3)
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
&Column{Index: 1, RetType: colTypes[1]},
&Column{Index: 2, RetType: colTypes[2]},
&Column{Index: 3, RetType: colTypes[3]},
}
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
insert := &builtinInsertSig{base, 3}
input := chunk.NewChunkWithCapacity(colTypes, 2)
input.AppendString(0, "abc")
input.AppendString(0, "abc")
input.AppendString(0, "abc")
input.AppendNull(0)
input.AppendString(0, "abc")
input.AppendString(0, "abc")
input.AppendString(0, "abc")
input.AppendInt64(1, 3)
input.AppendInt64(1, 3)
input.AppendInt64(1, 0)
input.AppendInt64(1, 3)
input.AppendNull(1)
input.AppendInt64(1, 3)
input.AppendInt64(1, 3)
input.AppendInt64(2, -1)
input.AppendInt64(2, -1)
input.AppendInt64(2, -1)
input.AppendInt64(2, -1)
input.AppendInt64(2, -1)
input.AppendNull(2)
input.AppendInt64(2, -1)
input.AppendString(3, "d")
input.AppendString(3, "de")
input.AppendString(3, "d")
input.AppendString(3, "d")
input.AppendString(3, "d")
input.AppendString(3, "d")
input.AppendNull(3)
res, isNull, err := insert.evalString(input.GetRow(0))
require.Equal(t, "abd", res)
require.False(t, isNull)
require.NoError(t, err)
res, isNull, err = insert.evalString(input.GetRow(1))
require.Equal(t, "", res)
require.True(t, isNull)
require.NoError(t, err)
res, isNull, err = insert.evalString(input.GetRow(2))
require.Equal(t, "abc", res)
require.False(t, isNull)
require.NoError(t, err)
res, isNull, err = insert.evalString(input.GetRow(3))
require.Equal(t, "", res)
require.True(t, isNull)
require.NoError(t, err)
res, isNull, err = insert.evalString(input.GetRow(4))
require.Equal(t, "", res)
require.True(t, isNull)
require.NoError(t, err)
res, isNull, err = insert.evalString(input.GetRow(5))
require.Equal(t, "", res)
require.True(t, isNull)
require.NoError(t, err)
res, isNull, err = insert.evalString(input.GetRow(6))
require.Equal(t, "", res)
require.True(t, isNull)
require.NoError(t, err)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Equal(t, 1, len(warnings))
lastWarn := warnings[len(warnings)-1]
require.Truef(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err), "err %v", lastWarn.Err)
}
func TestInstr(t *testing.T) {
ctx := createContext(t)
tbl := []struct {
Args []interface{}
Want interface{}
}{
{[]interface{}{"foobarbar", "bar"}, 4},
{[]interface{}{"xbar", "foobar"}, 0},
{[]interface{}{123456234, 234}, 2},
{[]interface{}{123456, 567}, 0},
{[]interface{}{1e10, 1e2}, 1},
{[]interface{}{1.234, ".234"}, 2},
{[]interface{}{1.234, ""}, 1},
{[]interface{}{"", 123}, 0},
{[]interface{}{"", ""}, 1},
{[]interface{}{"中文美好", "美好"}, 3},
{[]interface{}{"中文美好", "世界"}, 0},
{[]interface{}{"中文abc", "a"}, 3},
{[]interface{}{"live long and prosper", "long"}, 6},
{[]interface{}{"not binary string", "binary"}, 5},
{[]interface{}{"upper case", "upper"}, 1},
{[]interface{}{"UPPER CASE", "CASE"}, 7},
{[]interface{}{"中文abc", "abc"}, 3},
{[]interface{}{"foobar", nil}, nil},
{[]interface{}{nil, "foobar"}, nil},
{[]interface{}{nil, nil}, nil},
}
Dtbl := tblToDtbl(tbl)
instr := funcs[ast.Instr]
for i, c := range Dtbl {
f, err := instr.getFunction(ctx, datumsToConstants(c["Args"]))
require.NoError(t, err)
require.NotNil(t, f)
got, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.Equalf(t, c["Want"][0], got, "[%d]: args: %v", i, c["Args"])
}
}
func TestLoadFile(t *testing.T) {
ctx := createContext(t)
cases := []struct {
arg interface{}
isNil bool
getErr bool
res string
}{
{"", true, false, ""},
{"/tmp/tikv/tikv.frm", true, false, ""},
{"tidb.sql", true, false, ""},
{nil, true, false, ""},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.LoadFile, primitiveValsToConstants(ctx, []interface{}{c.arg})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
_, err := funcs[ast.LoadFile].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestMakeSet(t *testing.T) {
ctx := createContext(t)
tbl := []struct {
argList []interface{}
ret interface{}
}{
{[]interface{}{1, "a", "b", "c"}, "a"},
{[]interface{}{1 | 4, "hello", "nice", "world"}, "hello,world"},
{[]interface{}{1 | 4, "hello", "nice", nil, "world"}, "hello"},
{[]interface{}{0, "a", "b", "c"}, ""},
{[]interface{}{nil, "a", "b", "c"}, nil},
{[]interface{}{-100 | 4, "hello", "nice", "abc", "world"}, "abc,world"},
{[]interface{}{-1, "hello", "nice", "abc", "world"}, "hello,nice,abc,world"},
}
for _, c := range tbl {
fc := funcs[ast.MakeSet]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(c.argList...)))
require.NoError(t, err)
require.NotNil(t, f)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(c.ret), r)
}
}
func TestOct(t *testing.T) {
ctx := createContext(t)
octTests := []struct {
origin interface{}
ret string
}{
{"-2.7", "1777777777777777777776"},
{-1.5, "1777777777777777777777"},
{-1, "1777777777777777777777"},
{"0", "0"},
{"1", "1"},
{"8", "10"},
{"12", "14"},
{"20", "24"},
{"100", "144"},
{"1024", "2000"},
{"2048", "4000"},
{1.0, "1"},
{9.5, "11"},
{13, "15"},
{1025, "2001"},
{"8a8", "10"},
{"abc", "0"},
// overflow uint64
{"9999999999999999999999999", "1777777777777777777777"},
{"-9999999999999999999999999", "1777777777777777777777"},
{types.NewBinaryLiteralFromUint(255, -1), "377"}, // b'11111111'
{types.NewBinaryLiteralFromUint(10, -1), "12"}, // b'1010'
{types.NewBinaryLiteralFromUint(5, -1), "5"}, // b'0101'
}
fc := funcs[ast.Oct]
for _, tt := range octTests {
in := types.NewDatum(tt.origin)
f, _ := fc.getFunction(ctx, datumsToConstants([]types.Datum{in}))
require.NotNil(t, f)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
res, err := r.ToString()
require.NoError(t, err)
require.Equalf(t, tt.ret, res, "select oct(%v);", tt.origin)
}
// tt NULL input for sha
var argNull types.Datum
f, _ := fc.getFunction(ctx, datumsToConstants([]types.Datum{argNull}))
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
require.True(t, r.IsNull())
}
func TestFormat(t *testing.T) {
ctx := createContext(t)
formatTests := []struct {
number interface{}
precision interface{}
locale string
ret interface{}
}{
{12332.12341111111111111111111111111111111111111, 4, "en_US", "12,332.1234"},
{nil, 22, "en_US", nil},
}
formatTests1 := []struct {
number interface{}
precision interface{}
ret interface{}
warnings int
}{
// issue #8796
{1.12345, 4, "1.1235", 0},
{9.99999, 4, "10.0000", 0},
{1.99999, 4, "2.0000", 0},
{1.09999, 4, "1.1000", 0},
{-2.5000, 0, "-3", 0},
{12332.123444, 4, "12,332.1234", 0},
{12332.123444, 0, "12,332", 0},
{12332.123444, -4, "12,332", 0},
{-12332.123444, 4, "-12,332.1234", 0},
{-12332.123444, 0, "-12,332", 0},
{-12332.123444, -4, "-12,332", 0},
{"12332.123444", "4", "12,332.1234", 0},
{"12332.123444A", "4", "12,332.1234", 1},
{"-12332.123444", "4", "-12,332.1234", 0},
{"-12332.123444A", "4", "-12,332.1234", 1},
{"A123345", "4", "0.0000", 1},
{"-A123345", "4", "0.0000", 1},
{"-12332.123444", "A", "-12,332", 1},
{"12332.123444", "A", "12,332", 1},
{"-12332.123444", "4A", "-12,332.1234", 1},
{"12332.123444", "4A", "12,332.1234", 1},
{"-A12332.123444", "A", "0", 2},
{"A12332.123444", "A", "0", 2},
{"-A12332.123444", "4A", "0.0000", 2},
{"A12332.123444", "4A", "0.0000", 2},
{"-.12332.123444", "4A", "-0.1233", 2},
{".12332.123444", "4A", "0.1233", 2},
{"12332.1234567890123456789012345678901", 22, "12,332.1234567890110000000000", 0},
{nil, 22, nil, 0},
{1, 1024, "1.000000000000000000000000000000", 0},
{"", 1, "0.0", 0},
{1, "", "1", 1},
}
formatTests2 := struct {
number interface{}
precision interface{}
locale string
ret interface{}
}{-12332.123456, -4, "zh_CN", "-12,332"}
formatTests3 := struct {
number interface{}
precision interface{}
locale string
ret interface{}
}{"-12332.123456", "4", "de_GE", "-12,332.1235"}
formatTests4 := struct {
number interface{}
precision interface{}
locale interface{}
ret interface{}
}{1, 4, nil, "1.0000"}
fc := funcs[ast.Format]
for _, tt := range formatTests {
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(tt.number, tt.precision, tt.locale)))
require.NoError(t, err)
require.NotNil(t, f)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(tt.ret), r)
}
origConfig := ctx.GetSessionVars().StmtCtx.TruncateAsWarning
ctx.GetSessionVars().StmtCtx.TruncateAsWarning = true
for _, tt := range formatTests1 {
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(tt.number, tt.precision)))
require.NoError(t, err)
require.NotNil(t, f)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(tt.ret), r, fmt.Sprintf("test %v", tt))
if tt.warnings > 0 {
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Lenf(t, warnings, tt.warnings, "test %v", tt)
for i := 0; i < tt.warnings; i++ {
require.Truef(t, terror.ErrorEqual(types.ErrTruncatedWrongVal, warnings[i].Err), "test %v", tt)
}
ctx.GetSessionVars().StmtCtx.SetWarnings([]stmtctx.SQLWarn{})
}
}
ctx.GetSessionVars().StmtCtx.TruncateAsWarning = origConfig
f2, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(formatTests2.number, formatTests2.precision, formatTests2.locale)))
require.NoError(t, err)
require.NotNil(t, f2)
r2, err := evalBuiltinFunc(f2, chunk.Row{})
testutil.DatumEqual(t, types.NewDatum(errors.New("not implemented")), types.NewDatum(err))
testutil.DatumEqual(t, types.NewDatum(formatTests2.ret), r2)
f3, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(formatTests3.number, formatTests3.precision, formatTests3.locale)))
require.NoError(t, err)
require.NotNil(t, f3)
r3, err := evalBuiltinFunc(f3, chunk.Row{})
testutil.DatumEqual(t, types.NewDatum(errors.New("not support for the specific locale")), types.NewDatum(err))
testutil.DatumEqual(t, types.NewDatum(formatTests3.ret), r3)
f4, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(formatTests4.number, formatTests4.precision, formatTests4.locale)))
require.NoError(t, err)
require.NotNil(t, f4)
r4, err := evalBuiltinFunc(f4, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(formatTests4.ret), r4)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Equal(t, 3, len(warnings))
for i := 0; i < 3; i++ {
require.True(t, terror.ErrorEqual(errUnknownLocale, warnings[i].Err))
}
ctx.GetSessionVars().StmtCtx.SetWarnings([]stmtctx.SQLWarn{})
}
func TestFromBase64(t *testing.T) {
ctx := createContext(t)
tests := []struct {
args interface{}
expect interface{}
}{
{"", ""},
{"YWJj", "abc"},
{"YWIgYw==", "ab c"},
{"YWIKYw==", "ab\nc"},
{"YWIJYw==", "ab\tc"},
{"cXdlcnR5MTIzNDU2", "qwerty123456"},
{
"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVphYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ejAxMjM0\nNTY3ODkrL0FCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4\neXowMTIzNDU2Nzg5Ky9BQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWmFiY2RlZmdoaWprbG1ub3Bx\ncnN0dXZ3eHl6MDEyMzQ1Njc4OSsv",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
},
{
"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVphYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ejAxMjM0NTY3ODkrLw==",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
},
{
"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVphYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ejAxMjM0NTY3ODkrLw==",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
},
{
"QUJDREVGR0hJSkt\tMTU5PUFFSU1RVVld\nYWVphYmNkZ\rWZnaGlqa2xt bm9wcXJzdHV2d3h5ejAxMjM0NTY3ODkrLw==",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
},
}
fc := funcs[ast.FromBase64]
for _, test := range tests {
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(test.args)))
require.NoError(t, err)
require.NotNil(t, f)
result, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
if test.expect == nil {
require.Equal(t, types.KindNull, result.Kind())
} else {
expect, _ := test.expect.(string)
require.Equal(t, expect, result.GetString())
}
}
}
func TestFromBase64Sig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeVarchar),
}
tests := []struct {
args string
expect string
isNil bool
maxAllowPacket uint64
}{
{"YWJj", "abc", false, 3},
{"YWJj", "", true, 2},
{
"QUJDREVGR0hJSkt\tMTU5PUFFSU1RVVld\nYWVphYmNkZ\rWZnaGlqa2xt bm9wcXJzdHV2d3h5ejAxMjM0NTY3ODkrLw==",
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
false,
70,
},
{
"QUJDREVGR0hJSkt\tMTU5PUFFSU1RVVld\nYWVphYmNkZ\rWZnaGlqa2xt bm9wcXJzdHV2d3h5ejAxMjM0NTY3ODkrLw==",
"",
true,
69,
},
}
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
}
for _, test := range tests {
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(mysql.MaxBlobWidth)
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
fromBase64 := &builtinFromBase64Sig{base, test.maxAllowPacket}
input := chunk.NewChunkWithCapacity(colTypes, 1)
input.AppendString(0, test.args)
res, isNull, err := fromBase64.evalString(input.GetRow(0))
require.NoError(t, err)
require.Equal(t, test.isNil, isNull)
if isNull {
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Equal(t, 1, len(warnings))
lastWarn := warnings[len(warnings)-1]
require.True(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err))
ctx.GetSessionVars().StmtCtx.SetWarnings([]stmtctx.SQLWarn{})
}
require.Equal(t, test.expect, res)
}
}
func TestInsert(t *testing.T) {
ctx := createContext(t)
tests := []struct {
args []interface{}
expect interface{}
}{
{[]interface{}{"Quadratic", 3, 4, "What"}, "QuWhattic"},
{[]interface{}{"Quadratic", -1, 4, "What"}, "Quadratic"},
{[]interface{}{"Quadratic", 3, 100, "What"}, "QuWhat"},
{[]interface{}{nil, 3, 100, "What"}, nil},
{[]interface{}{"Quadratic", nil, 4, "What"}, nil},
{[]interface{}{"Quadratic", 3, nil, "What"}, nil},
{[]interface{}{"Quadratic", 3, 4, nil}, nil},
{[]interface{}{"Quadratic", 3, -1, "What"}, "QuWhat"},
{[]interface{}{"Quadratic", 3, 1, "What"}, "QuWhatdratic"},
{[]interface{}{"Quadratic", -1, nil, "What"}, nil},
{[]interface{}{"Quadratic", -1, 4, nil}, nil},
{[]interface{}{"我叫小雨呀", 3, 2, "王雨叶"}, "我叫王雨叶呀"},
{[]interface{}{"我叫小雨呀", -1, 2, "王雨叶"}, "我叫小雨呀"},
{[]interface{}{"我叫小雨呀", 3, 100, "王雨叶"}, "我叫王雨叶"},
{[]interface{}{nil, 3, 100, "王雨叶"}, nil},
{[]interface{}{"我叫小雨呀", nil, 4, "王雨叶"}, nil},
{[]interface{}{"我叫小雨呀", 3, nil, "王雨叶"}, nil},
{[]interface{}{"我叫小雨呀", 3, 4, nil}, nil},
{[]interface{}{"我叫小雨呀", 3, -1, "王雨叶"}, "我叫王雨叶"},
{[]interface{}{"我叫小雨呀", 3, 1, "王雨叶"}, "我叫王雨叶雨呀"},
{[]interface{}{"我叫小雨呀", -1, nil, "王雨叶"}, nil},
{[]interface{}{"我叫小雨呀", -1, 2, nil}, nil},
}
fc := funcs[ast.InsertFunc]
for _, test := range tests {
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(test.args...)))
require.NoError(t, err)
require.NotNil(t, f)
result, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
if test.expect == nil {
require.Equal(t, types.KindNull, result.Kind())
} else {
expect, _ := test.expect.(string)
require.Equal(t, expect, result.GetString())
}
}
}
func TestOrd(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args interface{}
expected int64
chs string
isNil bool
getErr bool
}{
{"2", 50, "", false, false},
{2, 50, "", false, false},
{"23", 50, "", false, false},
{23, 50, "", false, false},
{2.3, 50, "", false, false},
{nil, 0, "", true, false},
{"", 0, "", false, false},
{"你好", 14990752, "utf8mb4", false, false},
{"にほん", 14909867, "utf8mb4", false, false},
{"한국", 15570332, "utf8mb4", false, false},
{"👍", 4036989325, "utf8mb4", false, false},
{"א", 55184, "utf8mb4", false, false},
{"abc", 97, "gbk", false, false},
{"一二三", 53947, "gbk", false, false},
{"àáèé", 43172, "gbk", false, false},
{"数据库", 51965, "gbk", false, false},
}
for _, c := range cases {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, ast.Ord, primitiveValsToConstants(ctx, []interface{}{c.args})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.expected, d.GetInt64())
}
}
}
_, err := funcs[ast.Ord].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
}
func TestElt(t *testing.T) {
ctx := createContext(t)
tbl := []struct {
argLst []interface{}
ret interface{}
}{
{[]interface{}{1, "Hej", "ej", "Heja", "hej", "foo"}, "Hej"},
{[]interface{}{9, "Hej", "ej", "Heja", "hej", "foo"}, nil},
{[]interface{}{-1, "Hej", "ej", "Heja", "ej", "hej", "foo"}, nil},
{[]interface{}{0, 2, 3, 11, 1}, nil},
{[]interface{}{3, 2, 3, 11, 1}, "11"},
{[]interface{}{1.1, "2.1", "3.1", "11.1", "1.1"}, "2.1"},
}
for _, c := range tbl {
fc := funcs[ast.Elt]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(c.argLst...)))
require.NoError(t, err)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(c.ret), r)
}
}
func TestExportSet(t *testing.T) {
ctx := createContext(t)
estd := []struct {
argLst []interface{}
res string
}{
{[]interface{}{-9223372036854775807, "Y", "N", ",", 5}, "Y,N,N,N,N"},
{[]interface{}{-6, "Y", "N", ",", 5}, "N,Y,N,Y,Y"},
{[]interface{}{5, "Y", "N", ",", 4}, "Y,N,Y,N"},
{[]interface{}{5, "Y", "N", ",", 0}, ""},
{[]interface{}{5, "Y", "N", ",", 1}, "Y"},
{[]interface{}{6, "1", "0", ",", 10}, "0,1,1,0,0,0,0,0,0,0"},
{[]interface{}{333333, "Ysss", "sN", "---", 9}, "Ysss---sN---Ysss---sN---Ysss---sN---sN---sN---sN"},
{[]interface{}{7, "Y", "N"}, "Y,Y,Y,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N"},
{[]interface{}{7, "Y", "N", 6}, "Y6Y6Y6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N"},
{[]interface{}{7, "Y", "N", 6, 133}, "Y6Y6Y6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N6N"},
}
fc := funcs[ast.ExportSet]
for _, c := range estd {
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(c.argLst...)))
require.NoError(t, err)
require.NotNil(t, f)
exportSetRes, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
res, err := exportSetRes.ToString()
require.NoError(t, err)
require.Equal(t, c.res, res)
}
}
func TestBin(t *testing.T) {
tbl := []struct {
Input interface{}
Expected interface{}
}{
{"10", "1010"},
{"10.2", "1010"},
{"10aa", "1010"},
{"10.2aa", "1010"},
{"aaa", "0"},
{"", nil},
{10, "1010"},
{10.0, "1010"},
{-1, "1111111111111111111111111111111111111111111111111111111111111111"},
{"-1", "1111111111111111111111111111111111111111111111111111111111111111"},
{nil, nil},
}
fc := funcs[ast.Bin]
dtbl := tblToDtbl(tbl)
ctx := mock.NewContext()
ctx.GetSessionVars().StmtCtx.IgnoreTruncate.Store(true)
for _, c := range dtbl {
f, err := fc.getFunction(ctx, datumsToConstants(c["Input"]))
require.NoError(t, err)
require.NotNil(t, f)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(c["Expected"][0]), r)
}
}
func TestQuote(t *testing.T) {
ctx := createContext(t)
tbl := []struct {
arg interface{}
ret interface{}
}{
{`Don\'t!`, `'Don\\\'t!'`},
{`Don't`, `'Don\'t'`},
{`Don"`, `'Don"'`},
{`Don\"`, `'Don\\"'`},
{`\'`, `'\\\''`},
{`\"`, `'\\"'`},
{`萌萌哒(๑•ᴗ•๑)😊`, `'萌萌哒(๑•ᴗ•๑)😊'`},
{`㍿㌍㍑㌫`, `'㍿㌍㍑㌫'`},
{string([]byte{0, 26}), `'\0\Z'`},
{nil, "NULL"},
}
for _, c := range tbl {
fc := funcs[ast.Quote]
f, err := fc.getFunction(ctx, datumsToConstants(types.MakeDatums(c.arg)))
require.NoError(t, err)
require.NotNil(t, f)
r, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
testutil.DatumEqual(t, types.NewDatum(c.ret), r)
}
}
func TestToBase64(t *testing.T) {
ctx := createContext(t)
tests := []struct {
args interface{}
expect string
isNil bool
getErr bool
}{
{"", "", false, false},
{"abc", "YWJj", false, false},
{"ab c", "YWIgYw==", false, false},
{1, "MQ==", false, false},
{1.1, "MS4x", false, false},
{"ab\nc", "YWIKYw==", false, false},
{"ab\tc", "YWIJYw==", false, false},
{"qwerty123456", "cXdlcnR5MTIzNDU2", false, false},
{
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVphYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ejAxMjM0\nNTY3ODkrLw==",
false,
false,
},
{
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVphYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ejAxMjM0\nNTY3ODkrL0FCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4\neXowMTIzNDU2Nzg5Ky9BQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWmFiY2RlZmdoaWprbG1ub3Bx\ncnN0dXZ3eHl6MDEyMzQ1Njc4OSsv",
false,
false,
},
{
"ABCD EFGHI\nJKLMNOPQRSTUVWXY\tZabcdefghijklmnopqrstuv wxyz012\r3456789+/",
"QUJDRCAgRUZHSEkKSktMTU5PUFFSU1RVVldYWQlaYWJjZGVmZ2hpamtsbW5vcHFyc3R1diAgd3h5\nejAxMg0zNDU2Nzg5Ky8=",
false,
false,
},
{nil, "", true, false},
}
if strconv.IntSize == 32 {
tests = append(tests, struct {
args interface{}
expect string
isNil bool
getErr bool
}{
strings.Repeat("a", 1589695687),
"",
true,
false,
})
}
for _, test := range tests {
f, err := newFunctionForTest(ctx, ast.ToBase64, primitiveValsToConstants(ctx, []interface{}{test.args})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if test.getErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if test.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, test.expect, d.GetString())
}
}
}
_, err := funcs[ast.ToBase64].getFunction(ctx, []Expression{NewZero()})
require.NoError(t, err)
// Test GBK String
tbl := []struct {
input string
chs string
result string
}{
{"abc", "gbk", "YWJj"},
{"一二三", "gbk", "0ru2/sj9"},
{"一二三", "", "5LiA5LqM5LiJ"},
{"一二三!", "gbk", "0ru2/sj9IQ=="},
{"一二三!", "", "5LiA5LqM5LiJIQ=="},
}
for _, c := range tbl {
err := ctx.GetSessionVars().SetSystemVarWithoutValidation(variable.CharacterSetConnection, c.chs)
require.NoError(t, err)
f, err := newFunctionForTest(ctx, ast.ToBase64, primitiveValsToConstants(ctx, []interface{}{c.input})...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
require.NoError(t, err)
require.Equal(t, c.result, d.GetString())
}
}
func TestToBase64Sig(t *testing.T) {
ctx := createContext(t)
colTypes := []*types.FieldType{
types.NewFieldType(mysql.TypeVarchar),
}
tests := []struct {
args string
expect string
isNil bool
maxAllowPacket uint64
}{
{"abc", "YWJj", false, 4},
{"abc", "", true, 3},
{
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVphYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ejAxMjM0\nNTY3ODkrLw==",
false,
89,
},
{
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
"",
true,
88,
},
{
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
"QUJDREVGR0hJSktMTU5PUFFSU1RVVldYWVphYmNkZWZnaGlqa2xtbm9wcXJzdHV2d3h5ejAxMjM0\nNTY3ODkrL0FCQ0RFRkdISUpLTE1OT1BRUlNUVVZXWFlaYWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4\neXowMTIzNDU2Nzg5Ky9BQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWmFiY2RlZmdoaWprbG1ub3Bx\ncnN0dXZ3eHl6MDEyMzQ1Njc4OSsv",
false,
259,
},
{
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
"",
true,
258,
},
}
args := []Expression{
&Column{Index: 0, RetType: colTypes[0]},
}
for _, test := range tests {
resultType := &types.FieldType{}
resultType.SetType(mysql.TypeVarchar)
resultType.SetFlen(base64NeededEncodedLength(len(test.args)))
base := baseBuiltinFunc{args: args, ctx: ctx, tp: resultType}
toBase64 := &builtinToBase64Sig{base, test.maxAllowPacket}
input := chunk.NewChunkWithCapacity(colTypes, 1)
input.AppendString(0, test.args)
res, isNull, err := toBase64.evalString(input.GetRow(0))
require.NoError(t, err)
if test.isNil {
require.True(t, isNull)
warnings := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Equal(t, 1, len(warnings))
lastWarn := warnings[len(warnings)-1]
require.True(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err))
ctx.GetSessionVars().StmtCtx.SetWarnings([]stmtctx.SQLWarn{})
} else {
require.False(t, isNull)
}
require.Equal(t, test.expect, res)
}
}
func TestStringRight(t *testing.T) {
ctx := createContext(t)
fc := funcs[ast.Right]
tests := []struct {
str interface{}
length interface{}
expect interface{}
}{
{"helloworld", 5, "world"},
{"helloworld", 10, "helloworld"},
{"helloworld", 11, "helloworld"},
{"helloworld", -1, ""},
{"", 2, ""},
{nil, 2, nil},
}
for _, test := range tests {
str := types.NewDatum(test.str)
length := types.NewDatum(test.length)
f, _ := fc.getFunction(ctx, datumsToConstants([]types.Datum{str, length}))
result, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
if result.IsNull() {
require.Nil(t, test.expect)
continue
}
res, err := result.ToString()
require.NoError(t, err)
require.Equal(t, test.expect, res)
}
}
func TestWeightString(t *testing.T) {
ctx := createContext(t)
fc := funcs[ast.WeightString]
tests := []struct {
expr interface{}
padding string
length int
expect interface{}
}{
{nil, "NONE", 0, nil},
{7, "NONE", 0, nil},
{7.0, "NONE", 0, nil},
{"a", "NONE", 0, "a"},
{"a ", "NONE", 0, "a"},
{"中", "NONE", 0, "中"},
{"中 ", "NONE", 0, "中"},
{nil, "CHAR", 5, nil},
{7, "CHAR", 5, nil},
{7.0, "NONE", 0, nil},
{"a", "CHAR", 5, "a"},
{"a ", "CHAR", 5, "a"},
{"中", "CHAR", 5, "中"},
{"中 ", "CHAR", 5, "中"},
{nil, "BINARY", 5, nil},
{7, "BINARY", 2, "7\x00"},
{7.0, "NONE", 0, nil},
{"a", "BINARY", 1, "a"},
{"ab", "BINARY", 1, "a"},
{"a", "BINARY", 5, "a\x00\x00\x00\x00"},
{"a ", "BINARY", 5, "a \x00\x00\x00"},
{"中", "BINARY", 1, "\xe4"},
{"中", "BINARY", 2, "\xe4\xb8"},
{"中", "BINARY", 3, "中"},
{"中", "BINARY", 5, "中\x00\x00"},
}
for _, test := range tests {
str := types.NewDatum(test.expr)
var f builtinFunc
var err error
if test.padding == "NONE" {
f, err = fc.getFunction(ctx, datumsToConstants([]types.Datum{str}))
} else {
padding := types.NewDatum(test.padding)
length := types.NewDatum(test.length)
f, err = fc.getFunction(ctx, datumsToConstants([]types.Datum{str, padding, length}))
}
require.NoError(t, err)
retType := f.getRetTp()
require.Equal(t, charset.CollationBin, retType.GetCollate())
// Reset warnings.
ctx.GetSessionVars().StmtCtx.ResetForRetry()
result, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
if result.IsNull() {
require.Nil(t, test.expect)
continue
}
res, err := result.ToString()
require.NoError(t, err)
require.Equal(t, test.expect, res)
if test.expr == nil {
continue
}
strExpr := fmt.Sprintf("%v", test.expr)
if test.padding == "BINARY" && test.length < len(strExpr) {
expectWarn := fmt.Sprintf("[expression:1292]Truncated incorrect BINARY(%d) value: '%s'", test.length, strExpr)
obtainedWarns := ctx.GetSessionVars().StmtCtx.GetWarnings()
require.Equal(t, 1, len(obtainedWarns))
require.Equal(t, "Warning", obtainedWarns[0].Level)
require.Equal(t, expectWarn, obtainedWarns[0].Err.Error())
}
}
}
func TestTranslate(t *testing.T) {
ctx := createContext(t)
cases := []struct {
args []interface{}
isNil bool
isErr bool
res string
}{
{[]interface{}{"ABC", "A", "B"}, false, false, "BBC"},
{[]interface{}{"ABC", "Z", "ABC"}, false, false, "ABC"},
{[]interface{}{"A.B.C", ".A", "|"}, false, false, "|B|C"},
{[]interface{}{"中文", "文", "国"}, false, false, "中国"},
{[]interface{}{"UPPERCASE", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", "abcdefghijklmnopqrstuvwxyz"}, false, false, "uppercase"},
{[]interface{}{"lowercase", "abcdefghijklmnopqrstuvwxyz", "ABCDEFGHIJKLMNOPQRSTUVWXYZ"}, false, false, "LOWERCASE"},
{[]interface{}{"aaaaabbbbb", "aaabbb", "xyzXYZ"}, false, false, "xxxxxXXXXX"},
{[]interface{}{"Ti*DB User's Guide", " */'", "___"}, false, false, "Ti_DB_Users_Guide"},
{[]interface{}{"abc", "ab", ""}, false, false, "c"},
{[]interface{}{"aaa", "a", ""}, false, false, ""},
{[]interface{}{"", "null", "null"}, false, false, ""},
{[]interface{}{"null", "", "null"}, false, false, "null"},
{[]interface{}{"null", "null", ""}, false, false, ""},
{[]interface{}{nil, "error", "error"}, true, false, ""},
{[]interface{}{"error", nil, "error"}, true, false, ""},
{[]interface{}{"error", "error", nil}, true, false, ""},
{[]interface{}{nil, nil, nil}, true, false, ""},
{[]interface{}{[]byte{255}, []byte{255}, []byte{255}}, false, false, string([]byte{255})},
{[]interface{}{[]byte{255, 255}, []byte{255}, []byte{254}}, false, false, string([]byte{254, 254})},
{[]interface{}{[]byte{255, 255}, []byte{255, 255}, []byte{254, 253}}, false, false, string([]byte{254, 254})},
{[]interface{}{[]byte{255, 254, 253, 252, 251}, []byte{253, 252, 251}, []byte{254, 253}}, false, false, string([]byte{255, 254, 254, 253})},
}
for _, c := range cases {
f, err := newFunctionForTest(ctx, ast.Translate, primitiveValsToConstants(ctx, c.args)...)
require.NoError(t, err)
d, err := f.Eval(chunk.Row{})
if c.isErr {
require.Error(t, err)
} else {
require.NoError(t, err)
if c.isNil {
require.Equal(t, types.KindNull, d.Kind())
} else {
require.Equal(t, c.res, d.GetString())
}
}
}
}
func TestCIWeightString(t *testing.T) {
ctx := createContext(t)
type weightStringTest struct {
str string
padding string
length int
expect interface{}
}
checkResult := func(collation string, tests []weightStringTest) {
fc := funcs[ast.WeightString]
for _, test := range tests {
str := types.NewCollationStringDatum(test.str, collation)
var f builtinFunc
var err error
if test.padding == "NONE" {
f, err = fc.getFunction(ctx, datumsToConstants([]types.Datum{str}))
} else {
padding := types.NewDatum(test.padding)
length := types.NewDatum(test.length)
f, err = fc.getFunction(ctx, datumsToConstants([]types.Datum{str, padding, length}))
}
require.NoError(t, err)
result, err := evalBuiltinFunc(f, chunk.Row{})
require.NoError(t, err)
if result.IsNull() {
require.Nil(t, test.expect)
continue
}
res, err := result.ToString()
require.NoError(t, err)
require.Equal(t, test.expect, res, "test case: '%s' '%s' %d", test.str, test.padding, test.length)
}
}
generalTests := []weightStringTest{
{"aAÁàãăâ", "NONE", 0, "\x00A\x00A\x00A\x00A\x00A\x00A\x00A"},
{"中", "NONE", 0, "\x4E\x2D"},
{"a", "CHAR", 5, "\x00A"},
{"a ", "CHAR", 5, "\x00A"},
{"中", "CHAR", 5, "\x4E\x2D"},
{"中 ", "CHAR", 5, "\x4E\x2D"},
{"a", "BINARY", 1, "a"},
{"ab", "BINARY", 1, "a"},
{"a", "BINARY", 5, "a\x00\x00\x00\x00"},
{"a ", "BINARY", 5, "a \x00\x00\x00"},
{"中", "BINARY", 1, "\xe4"},
{"中", "BINARY", 2, "\xe4\xb8"},
{"中", "BINARY", 3, "中"},
{"中", "BINARY", 5, "中\x00\x00"},
}
unicodeTests := []weightStringTest{
{"aAÁàãăâ", "NONE", 0, "\x0e3\x0e3\x0e3\x0e3\x0e3\x0e3\x0e3"},
{"中", "NONE", 0, "\xfb\x40\xce\x2d"},
{"a", "CHAR", 5, "\x0e3"},
{"a ", "CHAR", 5, "\x0e3"},
{"中", "CHAR", 5, "\xfb\x40\xce\x2d"},
{"中 ", "CHAR", 5, "\xfb\x40\xce\x2d"},
{"a", "BINARY", 1, "a"},
{"ab", "BINARY", 1, "a"},
{"a", "BINARY", 5, "a\x00\x00\x00\x00"},
{"a ", "BINARY", 5, "a \x00\x00\x00"},
{"中", "BINARY", 1, "\xe4"},
{"中", "BINARY", 2, "\xe4\xb8"},
{"中", "BINARY", 3, "中"},
{"中", "BINARY", 5, "中\x00\x00"},
}
unicode0900Tests := []weightStringTest{
{"aAÁàãăâ", "NONE", 0, "\x1cG\x1cG\x1cG\x1cG\x1cG\x1cG\x1cG"},
{"中", "NONE", 0, "\xfb\x40\xce\x2d"},
{"a", "CHAR", 5, "\x1c\x47\x02\x09\x02\x09\x02\x09\x02\x09"},
{"a ", "CHAR", 5, "\x1c\x47\x02\x09\x02\x09\x02\x09\x02\x09"},
{"中", "CHAR", 5, "\xfb\x40\xce\x2d\x02\x09\x02\x09\x02\x09\x02\x09"},
{"中 ", "CHAR", 5, "\xfb\x40\xce\x2d\x02\x09\x02\x09\x02\x09\x02\x09"},
{"a", "BINARY", 1, "a"},
{"ab", "BINARY", 1, "a"},
{"a", "BINARY", 5, "a\x00\x00\x00\x00"},
{"a ", "BINARY", 5, "a \x00\x00\x00"},
{"中", "BINARY", 1, "\xe4"},
{"中", "BINARY", 2, "\xe4\xb8"},
{"中", "BINARY", 3, "中"},
{"中", "BINARY", 5, "中\x00\x00"},
}
checkResult("utf8mb4_general_ci", generalTests)
checkResult("utf8mb4_unicode_ci", unicodeTests)
checkResult("utf8mb4_0900_ai_ci", unicode0900Tests)
}
|
package users
import (
mock_user_repo "2019_2_IBAT/pkg/app/users/service/mock_user_repo"
"fmt"
"testing"
"time"
. "2019_2_IBAT/pkg/pkg/models"
"github.com/golang/mock/gomock"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
func TestUserService_CreateFavorite(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockUserRepo := mock_user_repo.NewMockRepository(mockCtrl)
h := UserService{
Storage: mockUserRepo,
}
tests := []struct {
name string
record AuthStorageValue
wantFail bool
wantErrorMessage string
vacancyId uuid.UUID
}{
{
name: "Test1",
record: AuthStorageValue{
ID: uuid.MustParse("6ba7b810-9dad-11d1-0000-00004fd430c8"),
Role: SeekerStr,
Expires: time.Now().In(Loc).Add(24 * time.Hour).Format(TimeFormat),
},
vacancyId: uuid.New(),
},
{
name: "Test2",
record: AuthStorageValue{
ID: uuid.MustParse("6ba7b810-9dad-11d1-0000-00004fd430c8"),
Role: SeekerStr,
Expires: time.Now().In(Loc).Add(24 * time.Hour).Format(TimeFormat),
},
vacancyId: uuid.New(),
wantFail: true,
wantErrorMessage: "Error while creating favorite_vacancy",
},
{
name: "Test3",
wantFail: true,
wantErrorMessage: "Invalid action",
record: AuthStorageValue{
ID: uuid.MustParse("6ba7b810-9dad-11d1-0000-00004fd430c8"),
Role: EmployerStr,
Expires: time.Now().In(Loc).Add(24 * time.Hour).Format(TimeFormat),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if !tt.wantFail {
mockUserRepo.
EXPECT().
CreateFavorite(gomock.Any()).
Return(true)
} else if tt.wantErrorMessage != "Invalid action" {
mockUserRepo.
EXPECT().
CreateFavorite(gomock.Any()).
Return(false)
}
err := h.CreateFavorite(tt.vacancyId, tt.record)
if !tt.wantFail {
require.Equal(t, err, nil)
} else {
require.Equal(t, tt.wantErrorMessage, err.Error(), "The two values should be the same.")
}
})
}
}
func TestUserService_GetFavoriteVacancies(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockUserRepo := mock_user_repo.NewMockRepository(mockCtrl)
h := UserService{
Storage: mockUserRepo,
}
expVacancies := []Vacancy{
{
ID: uuid.MustParse("11111111-9dad-11d1-80b1-00c04fd430c8"),
OwnerID: uuid.MustParse("6ba7b810-9dad-11d1-80b1-00c04fd430c8"),
CompanyName: "MCDonalds",
Experience: "None",
Position: "",
Tasks: "bring food to costumers",
Requirements: "middle school education",
WageFrom: "1000 USD",
Conditions: "nice team",
About: "nice job",
},
{
ID: uuid.MustParse("11111111-9dad-11d1-1111-00c04fd430c8"),
OwnerID: uuid.MustParse("6ba7b810-9bbb-1111-1111-00c04fd430c8"),
CompanyName: "PETUH",
Experience: "None",
Position: "driver",
Tasks: "drive",
Requirements: "middle school education",
WageFrom: "50000 RUB",
Conditions: "nice team",
About: "nice job",
},
}
tests := []struct {
name string
record AuthStorageValue
wantFail bool
wantRecomms bool
wantErrorMessage string
}{
{
name: "Test1",
record: AuthStorageValue{
ID: uuid.New(),
Role: SeekerStr,
},
},
{
name: "Test2",
wantFail: true,
wantErrorMessage: "Invalid action",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if !tt.wantFail {
mockUserRepo.
EXPECT().
GetFavoriteVacancies(gomock.Any()).
Return(expVacancies, nil)
} else {
mockUserRepo.
EXPECT().
GetFavoriteVacancies(tt.record).
Return([]Vacancy{}, fmt.Errorf("Invalid action"))
}
gotVacs, err := h.GetFavoriteVacancies(tt.record)
if !tt.wantFail {
if err != nil {
t.Error("Error is not nil\n")
}
require.Equal(t, expVacancies, gotVacs, "The two values should be the same.")
} else {
require.Equal(t, tt.wantErrorMessage, err.Error(), "The two values should be the same.")
}
})
}
}
// func (h *UserService) DeleteFavoriteVacancy(vacancyId uuid.UUID, authInfo AuthStorageValue) error {
// err := h.Storage.DeleteFavoriteVacancy(vacancyId, authInfo)
// if err != nil {
// return errors.New(InternalErrorMsg)
// }
// return nil
// }
func TestUserService_DeleteFavoriteVacancy(t *testing.T) {
mockCtrl := gomock.NewController(t)
defer mockCtrl.Finish()
mockUserRepo := mock_user_repo.NewMockRepository(mockCtrl)
h := UserService{
Storage: mockUserRepo,
}
tests := []struct {
name string
vacancyId uuid.UUID
record AuthStorageValue
vacancy Vacancy
wantFail bool
wantUnauth bool
wantErrorMessage string
}{
{
name: "Test1",
vacancyId: uuid.MustParse("1ba7b811-9dad-11d1-0000-00004fd430c8"),
record: AuthStorageValue{
ID: uuid.MustParse("6ba7b810-9dad-11d1-0000-00004fd430c8"),
Role: EmployerStr,
Expires: time.Now().In(Loc).Add(24 * time.Hour).Format(TimeFormat),
},
},
{
name: "Test2",
wantFail: true,
wantErrorMessage: InternalErrorMsg,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if !tt.wantFail {
mockUserRepo.
EXPECT().
DeleteFavoriteVacancy(tt.vacancyId, tt.record).
Return(nil)
} else {
mockUserRepo.
EXPECT().
DeleteFavoriteVacancy(tt.vacancyId, tt.record).
Return(errors.New(InternalErrorMsg))
}
err := h.DeleteFavoriteVacancy(tt.vacancyId, tt.record)
if !tt.wantFail {
if err != nil {
t.Error("Error is not nil\n")
}
} else {
require.Equal(t, tt.wantErrorMessage, err.Error(), "The two values should be the same.")
}
})
}
}
|
package util
import (
"crypto/md5"
"crypto/sha256"
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/crypto/sha3"
"github.com/ethereum/go-ethereum/rlp"
"github.com/sanguohot/medichain/zap"
)
func RlpHash(x interface{}) (h common.Hash) {
hw := sha3.NewKeccak256()
rlp.Encode(hw, x)
hw.Sum(h[:0])
return h
}
func Sha256Hash(input []byte) (common.Hash) {
hash := sha256.New()
hash.Write(input)
return common.BytesToHash(hash.Sum(nil))
}
func Md5(data []byte) string {
has := md5.Sum(data)
return fmt.Sprintf("%x", has) //将[]byte转成16进制
}
func PrintHexUseComma(data []byte) {
var str string
//fmt.Printf("% #x", data)
//0xed 0x88 0x60 0x27 0xbe 0x89 0x31 0x62 0x60 0xab 0x55 0x21 0x52 0x48 0xb5 0x79 0x98 0x6c 0xe2 0x16 0xce 0xab 0x58 0x0b 0x84 0x51 0x11 0xc5 0x01 0x5b 0xfd 0x52
for _, item := range data {
if str == "" {
str = fmt.Sprintf("0x%02x", item)
} else {
str = fmt.Sprintf("%s, 0x%02x", str, item)
}
}
str = fmt.Sprintf("%s,", str)
//0xed, 0x88, 0x60, 0x27, 0xbe, 0x89, 0x31, 0x62, 0x60, 0xab, 0x55, 0x21, 0x52, 0x48, 0xb5, 0x79, 0x98, 0x6c, 0xe2, 0x16, 0xce, 0xab, 0x58, 0x0b, 0x84, 0x51, 0x11, 0xc5, 0x01, 0x5b, 0xfd, 0x52,
zap.Sugar.Info(str)
}
func Bytes32_4Hash(input [4][32]byte) common.Hash {
return crypto.Keccak256Hash(input[0][:], input[1][:], input[2][:], input[3][:])
}
|
package main
/*
* @lc app=leetcode id=46 lang=golang
*
* [46] Permutations
*/
// 方法一:选择法 + 挑选记录数组
func permute(nums []int) [][]int {
res := make([][]int, 0, len(nums))
helper_46_2(&res, nums, make([]int, len(nums)), 0, make([]bool, len(nums)))
return res
}
func helper_46_2(res *[][]int, nums, path []int, start int, used []bool) {
if start == len(nums) {
// copy path
copied := make([]int, len(path))
copy(copied, path)
*res = append(*res, copied)
return
}
for i := 0; i < len(nums); i++ {
if !used[i] {
used[i] = true
path[start] = nums[i]
helper_46_2(res, nums, path, start+1, used)
used[i] = false // 关键
}
}
}
// Solution 1: 交换法
func permute_Solution1(nums []int) [][]int {
res := make([][]int, 0, len(nums))
helper_46(&res, nums, 0)
return res
}
func helper_46(res *[][]int, nums []int, start int) {
if start == len(nums) {
path := make([]int, len(nums))
copy(path, nums)
*res = append(*res, path)
return
}
for i := start; i < len(nums); i++ {
swap_46(&nums[start], &nums[i])
helper_46(res, nums, start+1)
swap_46(&nums[start], &nums[i])
}
}
func swap_46(i, j *int) {
*i, *j = *j, *i
}
|
package light
import (
"github.com/calbim/ray-tracer/src/color"
"github.com/calbim/ray-tracer/src/tuple"
)
//Light represents a light of given intensity at a position
type Light struct {
Intensity color.Color
Position tuple.Tuple
}
//PointLight returns a light originating at point p and intensity i
func PointLight(p tuple.Tuple, i color.Color) Light {
return Light{
Intensity: i,
Position: p,
}
}
|
package handler
import (
"net/http"
"github.com/teejays/clog"
"github.com/teejays/n-factor-vault/backend/library/go-api"
"github.com/teejays/n-factor-vault/backend/library/id"
"github.com/teejays/n-factor-vault/backend/src/totp"
)
type CreateAccountRequest struct {
Name string
PrivateKey string
}
// HandleCreateTOTPAccount creates a new vault for the authenticated user
func HandleCreateTOTPAccount(w http.ResponseWriter, r *http.Request) {
var body CreateAccountRequest
err := api.UnmarshalJSONFromRequest(r, &body)
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
var req = totp.CreateAccountRequest{
Name: body.Name,
PrivateKey: []byte(body.PrivateKey),
}
// Attempt login and get the token
a, err := totp.CreateAccount(req)
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
api.WriteResponse(w, http.StatusCreated, a.ID)
}
// HandleTOTPGetCode (GET) returns the vaults that the authenticated user is a part of
func HandleTOTPGetCode(w http.ResponseWriter, r *http.Request) {
var req totp.GetCodeRequest
// TODO: Verify that the requesting user has access to this TOTP account
// AND the user has been approved by peers to access the code
// Get the ID of the TOTP account for which we need the code
accountID, err := api.GetMuxParamStr(r, "totp_account_id")
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
req.AccountID, err = id.StrToID(accountID)
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
// Attempt login and get the token
code, err := totp.GetCode(req)
if err != nil {
api.WriteError(w, http.StatusInternalServerError, err, true, nil)
return
}
clog.Debugf("%s: HandleGetCode(): returning:\n%+v", "HandleTOTPGetCode", code)
api.WriteResponse(w, http.StatusOK, code)
}
|
package goSolution
import (
"reflect"
"runtime/debug"
"strconv"
"strings"
"testing"
"unicode/utf8"
)
func AssertEqual(t *testing.T, b interface{}, a interface{}) {
if reflect.DeepEqual(a, b) {
return
}
debug.PrintStack()
t.Errorf("Received %v (type %v), expected %v (type %v)", a, reflect.TypeOf(a), b, reflect.TypeOf(b))
}
func max(vars ...int) int {
r := vars[0]
for i := 1; i < len(vars); i++ {
if r < vars[i] {
r = vars[i]
}
}
return r
}
func min(vars ...int) int {
r := vars[0]
for i := 1; i < len(vars); i++ {
if r > vars[i] {
r = vars[i]
}
}
return r
}
func ReverseString(s string) string {
size := len(s)
buf := make([]byte, size)
for start := 0; start < size; {
r, n := utf8.DecodeRuneInString(s[start:])
start += n
utf8.EncodeRune(buf[size-start:], r)
}
return string(buf)
}
func CompareStringAsInt(x, y string) int {
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return 1
}
return strings.Compare(x, y)
}
func IsPalindrome(s string) bool {
n := len(s)
for i := 0; i < n>>1; i++ {
if s[i] != s[n-i-1] {
return false
}
}
return true
}
func sum(a []int) int {
ret := 0
for _, v := range a {
ret += v
}
return ret
}
func GetPrefixSum(a []int) []int {
n := len(a)
ret := make([]int, n + 1)
for i, v := range a {
ret[i + 1] = ret[i] + v
}
return ret
}
func GetLastBit(x int) int {
return x ^ (x & (x - 1))
}
var LG2MAP = make(map[int]int)
var DX = []int{1, -1, 0, 0, 1, 1, -1, -1}
var DY = []int{0, 0, 1, -1, 1, -1, 1, -1}
func initLg2Map(n int) {
p := 1
for i := 0; i < n; i++ {
LG2MAP[p] = i + 1
p = p << 1
}
}
func lg2(x int) int {
return LG2MAP[x] - 1
}
func Initialize2DIntSlice(n, m, v int) [][]int {
ret := make([][]int, n)
for i := 0; i < n; i++ {
ret[i] = make([]int, m)
for j := 0; j < m; j++ {
ret[i][j] = v
}
}
return ret
}
func Initialize2DBoolSlice(n, m int, v bool) [][]bool {
ret := make([][]bool, n)
for i := 0; i < n; i++ {
ret[i] = make([]bool, m)
for j := 0; j < m; j++ {
ret[i][j] = v
}
}
return ret
}
func IsNumeric(x string) bool {
_, err := strconv.ParseInt(x, 10, 64)
if err != nil {
return false
}
return true
}
const MODULO = 1000000007
|
package main
import (
"bytes"
"fmt"
"regexp"
"strings"
corev2 "github.com/sensu/sensu-go/api/core/v2"
"github.com/sensu/sensu-plugins-go-library/sensu"
"github.com/bluele/slack"
)
type HandlerConfig struct {
sensu.PluginConfig
SlackWebhookUrl string
SlackChannel string
SlackUsername string
SlackIconUrl string
redactMatch string
redact bool
SlackIncludeCheckLabels bool
SlackIncludeEntityLabels bool
}
const (
webHookUrl = "webhook-url"
channel = "channel"
userName = "username"
iconUrl = "icon-url"
incCheckLabels = "include-check-labels"
incEntityLabels = "include-entity-labels"
redactMatch = "redact-match"
redact = "redact"
)
var (
config = HandlerConfig{
PluginConfig: sensu.PluginConfig{
Name: "sensu-slack-handler",
Short: "The Sensu Go Slack handler for notifying a channel",
Timeout: 10,
Keyspace: "sensu.io/plugins/slack/config",
},
}
slackConfigOptions = []*sensu.PluginConfigOption{
{
Path: webHookUrl,
Env: "SENSU_SLACK_WEBHOOK_URL",
Argument: webHookUrl,
Shorthand: "w",
Default: "",
Usage: "The webhook url to send messages to, defaults to value of SLACK_WEBHOOK_URL env variable",
Value: &config.SlackWebhookUrl,
},
{
Path: redactMatch,
Env: "SENSU_SLACK_REDACTMATCH",
Argument: redactMatch,
Shorthand: "m",
Default: "(?i).*(pass|key).*",
Usage: "Regex to redact values of matching labels",
Value: &config.redactMatch,
},
{
Path: redact,
Env: "SENSU_SLACK_REDACTMATCH",
Argument: redact,
Shorthand: "r",
Default: false,
Usage: "Enable redaction of labels",
Value: &config.redact,
},
{
Path: channel,
Env: "SENSU_SLACK_CHANNEL",
Argument: channel,
Shorthand: "c",
Default: "#general",
Usage: "The channel to post messages to",
Value: &config.SlackChannel,
},
{
Path: userName,
Env: "SENSU_SLACK_USERNAME",
Argument: userName,
Shorthand: "u",
Default: "sensu",
Usage: "The username that messages will be sent as",
Value: &config.SlackUsername,
},
{
Path: iconUrl,
Env: "SENSU_SLACK_ICON_URL",
Argument: iconUrl,
Shorthand: "i",
Default: "http://s3-us-west-2.amazonaws.com/sensuapp.org/sensu.png",
Usage: "A URL to an image to use as the user avatar",
Value: &config.SlackIconUrl,
},
{
Path: incCheckLabels,
Env: "SENSU_SLACK_INCLUDE_CHECK_LABELS",
Argument: incCheckLabels,
Shorthand: "l",
Default: false,
Usage: "Include check labels in slack message?",
Value: &config.SlackIncludeCheckLabels,
},
{
Path: incEntityLabels,
Env: "SENSU_SLACK_INCLUDE_ENTITY_LABELS",
Argument: incEntityLabels,
Shorthand: "e",
Default: false,
Usage: "Include entity labels in slack message?",
Value: &config.SlackIncludeEntityLabels,
},
}
)
func main() {
goHandler := sensu.NewGoHandler(&config.PluginConfig, slackConfigOptions, checkArgs, sendMessage)
goHandler.Execute()
}
func checkArgs(_ *corev2.Event) (e error) {
if len(config.SlackWebhookUrl) == 0 {
return fmt.Errorf("--webhook-url or SENSU_SLACK_WEBHOOK_URL environment variable is required")
}
// validate the regex compiles, if not catch the panic and return error
defer func() {
if r := recover(); r != nil {
e = fmt.Errorf("regexp (%s) specified by SENSU_SLACK_REDACT or --redact is invalid", config.redactMatch)
}
return
}()
regexp.MustCompile(config.redactMatch)
return
}
func formattedEventAction(event *corev2.Event) string {
switch event.Check.Status {
case 0:
return "RESOLVED"
default:
return "ALERT"
}
}
func chomp(s string) string {
return strings.Trim(strings.Trim(strings.Trim(s, "\n"), "\r"), "\r\n")
}
func eventKey(event *corev2.Event) string {
return fmt.Sprintf("%s/%s", event.Entity.Name, event.Check.Name)
}
func eventSummary(event *corev2.Event, maxLength int) string {
output := chomp(event.Check.Output)
if len(event.Check.Output) > maxLength {
output = output[0:maxLength] + "..."
}
return fmt.Sprintf("%s:%s", eventKey(event), output)
}
func formattedMessage(event *corev2.Event) string {
return fmt.Sprintf("%s - %s", formattedEventAction(event), eventSummary(event, 100))
}
func attachCheckLabels(event *corev2.Event, attachment *slack.Attachment, config HandlerConfig) {
re := regexp.MustCompile(config.redactMatch)
if event.Check.Labels == nil {
return
}
buf := bytes.Buffer{}
for k, v := range event.Check.Labels {
if config.redact && re.MatchString(k) {
v = "**REDACTED**"
}
fmt.Fprintf(&buf, "%s=%s\n", k, v)
}
attachment.Fields = append(attachment.Fields, &slack.AttachmentField{
Title: "Check Labels",
Value: buf.String(),
Short: false,
})
return
}
func attachEntityLabels(event *corev2.Event, attachment *slack.Attachment, config HandlerConfig) {
re := regexp.MustCompile(config.redactMatch)
if event.Entity.Labels == nil {
return
}
buf := bytes.Buffer{}
for k, v := range event.Entity.Labels {
if config.redact && re.MatchString(k) {
v = "**REDACTED**"
}
fmt.Fprintf(&buf, "%s=%s\n", k, v)
}
attachment.Fields = append(attachment.Fields, &slack.AttachmentField{
Title: "Entity Labels",
Value: buf.String(),
Short: false,
})
return
}
func messageColor(event *corev2.Event) string {
switch event.Check.Status {
case 0:
return "good"
case 2:
return "danger"
default:
return "warning"
}
}
func messageStatus(event *corev2.Event) string {
switch event.Check.Status {
case 0:
return "Resolved"
case 2:
return "Critical"
default:
return "Warning"
}
}
func messageAttachment(event *corev2.Event) *slack.Attachment {
attachment := &slack.Attachment{
Title: "Description",
Text: event.Check.Output,
Fallback: formattedMessage(event),
Color: messageColor(event),
Fields: []*slack.AttachmentField{
{
Title: "Status",
Value: messageStatus(event),
Short: false,
},
{
Title: "Entity",
Value: event.Entity.Name,
Short: true,
},
{
Title: "Check",
Value: event.Check.Name,
Short: true,
},
},
}
if config.SlackIncludeEntityLabels {
attachEntityLabels(event, attachment, config)
}
if config.SlackIncludeCheckLabels {
attachCheckLabels(event, attachment, config)
}
return attachment
}
func sendMessage(event *corev2.Event) error {
hook := slack.NewWebHook(config.SlackWebhookUrl)
return hook.PostMessage(&slack.WebHookPostPayload{
Attachments: []*slack.Attachment{messageAttachment(event)},
Channel: config.SlackChannel,
IconUrl: config.SlackIconUrl,
Username: config.SlackUsername,
})
}
|
package models
import "time"
type SysLog struct {
ID int `gorm:"primary_key" json:"id"` //日志id
UserId int `json:"user_id"` //操作用户id
Description string `json:"description"` //描述
LogType int `json:"log_type"` //日志类型
Method string `json:"method"` //方法名
Params string `json:"params"` //参数
RequestIp string `json:"request_ip"` //请求ip
RequestTime int `json:"request_time"` //请求耗时(毫秒值)
Address string `json:"address"` //地址
Browser string `json:"browser"` //浏览器
ExceptionDetail string `json:"exception_detail"` //详细异常
CreateBy int `json:"create_by"` //创建人id
UpdateBy int `json:"update_by"` //更新人id
CreateTime time.Time `json:"create_time"` //创建时间
UpdateTime time.Time `json:"update_time"` //更新时间
IsDeleted []byte `json:"is_deleted"` //软删除(默认值为0,1为删除)
}
|
package main
import (
"bufio"
"flag"
"fmt"
"io"
"os"
"sync"
"github.com/fatih/color"
"golang.org/x/net/websocket"
)
// Current version number
const Version = "0.0.0"
var (
origin string
url string
protocol string
displayHelp bool
displayVersion bool
red = color.New(color.FgRed).SprintFunc()
magenta = color.New(color.FgMagenta).SprintFunc()
green = color.New(color.FgGreen).SprintFunc()
yellow = color.New(color.FgYellow).SprintFunc()
cyan = color.New(color.FgCyan).SprintFunc()
wg sync.WaitGroup
)
func init() {
flag.StringVar(&origin, "origin", "http://localhost/", "origin of WebSocket client")
flag.StringVar(&url, "url", "ws://localhost:1337/ws", "WebSocket server address to connect to")
flag.StringVar(&protocol, "protocol", "", "WebSocket subprotocol")
flag.BoolVar(&displayHelp, "help", false, "Display help information about wsd")
flag.BoolVar(&displayVersion, "version", false, "Display version number")
}
func inLoop(ws *websocket.Conn, errors chan<- error, in chan<- []byte) {
var msg = make([]byte, 512)
for {
var n int
var err error
n, err = ws.Read(msg)
if err != nil {
errors <- err
continue
}
in <- msg[:n]
}
}
func printErrors(errors <-chan error) {
for err := range errors {
if err == io.EOF {
fmt.Printf("\r✝ %v - connection closed by remote\n", magenta(err))
os.Exit(0)
} else {
fmt.Printf("\rerr %v\n> ", red(err))
}
}
}
func printReceivedMessages(in <-chan []byte) {
for msg := range in {
fmt.Printf("\r< %s\n> ", cyan(string(msg)))
}
}
func outLoop(ws *websocket.Conn, out <-chan []byte, errors chan<- error) {
for msg := range out {
_, err := ws.Write(msg)
if err != nil {
errors <- err
}
}
}
func main() {
flag.Parse()
if displayVersion {
fmt.Fprintf(os.Stdout, "%s version %s\n", os.Args[0], Version)
os.Exit(0)
}
if displayHelp {
fmt.Fprintf(os.Stdout, "Usage of %s:\n", os.Args[0])
flag.PrintDefaults()
os.Exit(0)
}
ws, err := websocket.Dial(url, protocol, origin)
if err != nil {
fmt.Println(red("Connection Error:"), err.Error())
os.Exit(0)
}
if protocol != "" {
fmt.Println(yellow("Connecting to:"), url, "via", protocol, "from", origin, "...")
} else {
fmt.Println(yellow("Connecting to:"), url, "from", origin, "...")
}
defer ws.Close()
fmt.Println(green("Successfully connected\n"))
wg.Add(3)
errors := make(chan error)
in := make(chan []byte)
out := make(chan []byte)
defer close(errors)
defer close(out)
defer close(in)
go inLoop(ws, errors, in)
go printReceivedMessages(in)
go printErrors(errors)
go outLoop(ws, out, errors)
scanner := bufio.NewScanner(os.Stdin)
fmt.Print("> ")
for scanner.Scan() {
out <- []byte(scanner.Text())
fmt.Print("> ")
}
wg.Wait()
}
|
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/vertex/alpha/vertex_alpha_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/vertex/alpha"
)
// ModelDeploymentServer implements the gRPC interface for ModelDeployment.
type ModelDeploymentServer struct{}
// ProtoToModelDeploymentDedicatedResources converts a ModelDeploymentDedicatedResources object from its proto representation.
func ProtoToVertexAlphaModelDeploymentDedicatedResources(p *alphapb.VertexAlphaModelDeploymentDedicatedResources) *alpha.ModelDeploymentDedicatedResources {
if p == nil {
return nil
}
obj := &alpha.ModelDeploymentDedicatedResources{
MachineSpec: ProtoToVertexAlphaModelDeploymentDedicatedResourcesMachineSpec(p.GetMachineSpec()),
MinReplicaCount: dcl.Int64OrNil(p.GetMinReplicaCount()),
MaxReplicaCount: dcl.Int64OrNil(p.GetMaxReplicaCount()),
}
return obj
}
// ProtoToModelDeploymentDedicatedResourcesMachineSpec converts a ModelDeploymentDedicatedResourcesMachineSpec object from its proto representation.
func ProtoToVertexAlphaModelDeploymentDedicatedResourcesMachineSpec(p *alphapb.VertexAlphaModelDeploymentDedicatedResourcesMachineSpec) *alpha.ModelDeploymentDedicatedResourcesMachineSpec {
if p == nil {
return nil
}
obj := &alpha.ModelDeploymentDedicatedResourcesMachineSpec{
MachineType: dcl.StringOrNil(p.GetMachineType()),
}
return obj
}
// ProtoToModelDeployment converts a ModelDeployment resource from its proto representation.
func ProtoToModelDeployment(p *alphapb.VertexAlphaModelDeployment) *alpha.ModelDeployment {
obj := &alpha.ModelDeployment{
Model: dcl.StringOrNil(p.GetModel()),
Id: dcl.StringOrNil(p.GetId()),
DedicatedResources: ProtoToVertexAlphaModelDeploymentDedicatedResources(p.GetDedicatedResources()),
Endpoint: dcl.StringOrNil(p.GetEndpoint()),
Location: dcl.StringOrNil(p.GetLocation()),
Project: dcl.StringOrNil(p.GetProject()),
}
return obj
}
// ModelDeploymentDedicatedResourcesToProto converts a ModelDeploymentDedicatedResources object to its proto representation.
func VertexAlphaModelDeploymentDedicatedResourcesToProto(o *alpha.ModelDeploymentDedicatedResources) *alphapb.VertexAlphaModelDeploymentDedicatedResources {
if o == nil {
return nil
}
p := &alphapb.VertexAlphaModelDeploymentDedicatedResources{}
p.SetMachineSpec(VertexAlphaModelDeploymentDedicatedResourcesMachineSpecToProto(o.MachineSpec))
p.SetMinReplicaCount(dcl.ValueOrEmptyInt64(o.MinReplicaCount))
p.SetMaxReplicaCount(dcl.ValueOrEmptyInt64(o.MaxReplicaCount))
return p
}
// ModelDeploymentDedicatedResourcesMachineSpecToProto converts a ModelDeploymentDedicatedResourcesMachineSpec object to its proto representation.
func VertexAlphaModelDeploymentDedicatedResourcesMachineSpecToProto(o *alpha.ModelDeploymentDedicatedResourcesMachineSpec) *alphapb.VertexAlphaModelDeploymentDedicatedResourcesMachineSpec {
if o == nil {
return nil
}
p := &alphapb.VertexAlphaModelDeploymentDedicatedResourcesMachineSpec{}
p.SetMachineType(dcl.ValueOrEmptyString(o.MachineType))
return p
}
// ModelDeploymentToProto converts a ModelDeployment resource to its proto representation.
func ModelDeploymentToProto(resource *alpha.ModelDeployment) *alphapb.VertexAlphaModelDeployment {
p := &alphapb.VertexAlphaModelDeployment{}
p.SetModel(dcl.ValueOrEmptyString(resource.Model))
p.SetId(dcl.ValueOrEmptyString(resource.Id))
p.SetDedicatedResources(VertexAlphaModelDeploymentDedicatedResourcesToProto(resource.DedicatedResources))
p.SetEndpoint(dcl.ValueOrEmptyString(resource.Endpoint))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
return p
}
// applyModelDeployment handles the gRPC request by passing it to the underlying ModelDeployment Apply() method.
func (s *ModelDeploymentServer) applyModelDeployment(ctx context.Context, c *alpha.Client, request *alphapb.ApplyVertexAlphaModelDeploymentRequest) (*alphapb.VertexAlphaModelDeployment, error) {
p := ProtoToModelDeployment(request.GetResource())
res, err := c.ApplyModelDeployment(ctx, p)
if err != nil {
return nil, err
}
r := ModelDeploymentToProto(res)
return r, nil
}
// applyVertexAlphaModelDeployment handles the gRPC request by passing it to the underlying ModelDeployment Apply() method.
func (s *ModelDeploymentServer) ApplyVertexAlphaModelDeployment(ctx context.Context, request *alphapb.ApplyVertexAlphaModelDeploymentRequest) (*alphapb.VertexAlphaModelDeployment, error) {
cl, err := createConfigModelDeployment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyModelDeployment(ctx, cl, request)
}
// DeleteModelDeployment handles the gRPC request by passing it to the underlying ModelDeployment Delete() method.
func (s *ModelDeploymentServer) DeleteVertexAlphaModelDeployment(ctx context.Context, request *alphapb.DeleteVertexAlphaModelDeploymentRequest) (*emptypb.Empty, error) {
cl, err := createConfigModelDeployment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteModelDeployment(ctx, ProtoToModelDeployment(request.GetResource()))
}
// ListVertexAlphaModelDeployment handles the gRPC request by passing it to the underlying ModelDeploymentList() method.
func (s *ModelDeploymentServer) ListVertexAlphaModelDeployment(ctx context.Context, request *alphapb.ListVertexAlphaModelDeploymentRequest) (*alphapb.ListVertexAlphaModelDeploymentResponse, error) {
cl, err := createConfigModelDeployment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListModelDeployment(ctx, request.GetProject(), request.GetLocation(), request.GetEndpoint())
if err != nil {
return nil, err
}
var protos []*alphapb.VertexAlphaModelDeployment
for _, r := range resources.Items {
rp := ModelDeploymentToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListVertexAlphaModelDeploymentResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigModelDeployment(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package handlers
import (
"encoding/json"
"io"
"net/http"
"github.com/root-gg/plik/server/common"
"github.com/root-gg/plik/server/context"
)
// LoginParams to be POSTed by clients to authenticate
type LoginParams struct {
Login string `json:"login"`
Password string `json:"password"`
}
// LocalLogin handler to authenticate local users
func LocalLogin(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {
config := ctx.GetConfig()
if config.FeatureAuthentication == common.FeatureDisabled {
ctx.BadRequest("authentication is disabled")
return
}
// Read request body
defer func() { _ = req.Body.Close() }()
req.Body = http.MaxBytesReader(resp, req.Body, 1048576)
body, err := io.ReadAll(req.Body)
if err != nil {
ctx.BadRequest("unable to read request body : %s", err)
return
}
loginParams := &LoginParams{}
err = json.Unmarshal(body, loginParams)
if err != nil {
ctx.BadRequest("unable to deserialize request body : %s", err)
return
}
if loginParams.Login == "" {
ctx.MissingParameter("login")
return
}
if loginParams.Password == "" {
ctx.MissingParameter("password")
return
}
// Get user from metadata backend
user, err := ctx.GetMetadataBackend().GetUser(common.GetUserID(common.ProviderLocal, loginParams.Login))
if err != nil {
ctx.InternalServerError("unable to get user from metadata backend", err)
return
}
if user == nil {
ctx.Forbidden("invalid credentials")
return
}
if !common.CheckPasswordHash(loginParams.Password, user.Password) {
ctx.Forbidden("invalid credentials")
return
}
// Set Plik session cookie and xsrf cookie
sessionCookie, xsrfCookie, err := ctx.GetAuthenticator().GenAuthCookies(user)
if err != nil {
ctx.InternalServerError("unable to generate session cookies", err)
}
http.SetCookie(resp, sessionCookie)
http.SetCookie(resp, xsrfCookie)
_, _ = resp.Write([]byte("ok"))
}
|
// Copyright (c) 2016, Gerasimos Maropoulos
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
// or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER AND CONTRIBUTOR, GERASIMOS MAROPOULOS
// BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package pongo
import (
"compress/gzip"
"github.com/flosch/pongo2"
"github.com/kataras/iris/context"
"github.com/kataras/iris/utils"
)
var (
buffer *utils.BufferPool
)
type (
Config struct {
Directory string
// Filters for pongo2, map[name of the filter] the filter function . The filters are auto register
Filters map[string]pongo2.FilterFunction
}
Engine struct {
Config *Config
Templates *pongo2.TemplateSet
}
)
func New() *Engine {
if buffer == nil {
buffer = utils.NewBufferPool(64)
}
return &Engine{Config: &Config{Directory: "templates", Filters: make(map[string]pongo2.FilterFunction, 0)}}
}
func (p *Engine) Execute(ctx context.IContext, name string, binding interface{}) error {
// get the template from cache, I never used pongo2 but I think reading its code helps me to understand that this is the best way to do it with the best performance.
tmpl, err := p.Templates.FromCache(name)
if err != nil {
return err
}
// Retrieve a buffer from the pool to write to.
out := buffer.Get()
err = tmpl.ExecuteWriter(binding.(pongo2.Context), out)
if err != nil {
buffer.Put(out)
return err
}
w := ctx.GetRequestCtx().Response.BodyWriter()
out.WriteTo(w)
// Return the buffer to the pool.
buffer.Put(out)
return nil
}
func (p *Engine) ExecuteGzip(ctx context.IContext, name string, binding interface{}) error {
tmpl, err := p.Templates.FromCache(name)
if err != nil {
return err
}
// Retrieve a buffer from the pool to write to.
out := gzip.NewWriter(ctx.GetRequestCtx().Response.BodyWriter())
err = tmpl.ExecuteWriter(binding.(pongo2.Context), out)
if err != nil {
return err
}
//out.Flush()
out.Close()
ctx.GetRequestCtx().Response.Header.Add("Content-Encoding", "gzip")
return nil
}
func (p *Engine) BuildTemplates() error {
if p.Config.Directory == "" {
return nil
}
return nil
}
|
package main
import (
"fmt"
"io"
"strings"
)
/*
io.Reader interface is used by lots of go libraries.
it has a .Read() method
strings.NewReader() is one library that's satisfies io.Reader interface
byte-by-byte chunks are read, and io.EOF error marks when stream ends.
*/
func main() {
r := strings.NewReader("Hello, Reader!")
b := make([]byte, 8)
for {
n, err := r.Read(b)
fmt.Printf("n = %v err = %v b = %v\n", n, err, b)
fmt.Printf("b[:n] = %q\n", b[:n])
if err == io.EOF {
break
}
}
}
|
package base
import (
"errors"
"fmt"
"gengine/context"
"gengine/internal/core"
"reflect"
"runtime"
"strings"
)
// := or =
type Assignment struct {
SourceCode
Variable string
MapVar *MapVar
AssignOperator string
MathExpression *MathExpression
Expression *Expression
}
func (a *Assignment) Evaluate(dc *context.DataContext, Vars map[string]reflect.Value) (value reflect.Value, err error) {
defer func() {
if e := recover(); e != nil {
size := 1 << 10 * 10
buf := make([]byte, size)
rs := runtime.Stack(buf, false)
if rs > size {
rs = size
}
buf = buf[:rs]
eMsg := fmt.Sprintf("line %d, column %d, code: %s, %+v \n%s", a.LineNum, a.Column, a.Code, e, string(buf))
eMsg = strings.ReplaceAll(eMsg, "panic", "error")
err = errors.New(eMsg)
}
}()
var mv reflect.Value
if a.MathExpression != nil {
mv, err = a.MathExpression.Evaluate(dc, Vars)
if err != nil {
return reflect.ValueOf(nil), err
}
}
if a.Expression != nil {
mv, err = a.Expression.Evaluate(dc, Vars)
if err != nil {
return reflect.ValueOf(nil), err
}
}
var sv reflect.Value
if a.AssignOperator == "=" || a.AssignOperator == ":=" {
goto END
}
if len(a.Variable) > 0 {
sv, err = dc.GetValue(Vars, a.Variable)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column:%d, code: %s, %+v:", a.LineNum, a.Column, a.Code, err))
}
}
if a.MapVar != nil {
sv, err = a.MapVar.Evaluate(dc, Vars)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column:%d, code: %s, %+v:", a.LineNum, a.Column, a.Code, err))
}
}
if a.AssignOperator == "+=" {
_mv, err := core.Add(sv, mv)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column:%d, code: %s, %+v:", a.LineNum, a.Column, a.Code, err))
}
mv = reflect.ValueOf(_mv)
goto END
}
if a.AssignOperator == "-=" {
_mv, err := core.Sub(sv, mv)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column:%d, code: %s, %+v:", a.LineNum, a.Column, a.Code, err))
}
mv = reflect.ValueOf(_mv)
goto END
}
if a.AssignOperator == "*=" {
_mv, err := core.Mul(sv, mv)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column:%d, code: %s, %+v:", a.LineNum, a.Column, a.Code, err))
}
mv = reflect.ValueOf(_mv)
goto END
}
if a.AssignOperator == "/=" {
_mv, err := core.Div(sv, mv)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column:%d, code: %s, %+v:", a.LineNum, a.Column, a.Code, err))
}
mv = reflect.ValueOf(_mv)
goto END
}
END:
if len(a.Variable) > 0 {
err = dc.SetValue(Vars, a.Variable, mv)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column %d, code: %s, %+v", a.LineNum, a.Column, a.Code, err))
}
return
}
if a.MapVar != nil {
err = dc.SetMapVarValue(Vars, a.MapVar.Name, a.MapVar.Strkey, a.MapVar.Varkey, a.MapVar.Intkey, mv)
if err != nil {
return reflect.ValueOf(nil), errors.New(fmt.Sprintf("line %d, column:%d, code: %s, %+v:", a.LineNum, a.Column, a.Code, err))
}
return
}
return
}
func (a *Assignment) AcceptMathExpression(me *MathExpression) error {
if a.MathExpression == nil {
a.MathExpression = me
return nil
}
return errors.New("MathExpression already set twice! ")
}
func (a *Assignment) AcceptVariable(name string) error {
if len(a.Variable) == 0 {
a.Variable = name
return nil
}
return errors.New("Variable already set twice! ")
}
func (a *Assignment) AcceptMapVar(mapVar *MapVar) error {
if a.MapVar == nil {
a.MapVar = mapVar
return nil
}
return errors.New("MapVar already set twice")
}
func (a *Assignment) AcceptExpression(exp *Expression) error {
if a.Expression == nil {
a.Expression = exp
return nil
}
return errors.New("Expression already set twice! ")
}
|
package slow_tasks
import (
"encoding/base32"
ldap_client "github.com/lucabodd/go-ldap-client"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"log"
"context"
. "github.com/lucabodd/maicsd/pkg/utils"
"strings"
)
type User struct {
Sys_username string `bson:"sys_username"`
Email string `bson:"email"`
Role string `bson:"role"`
Key_last_unlock string `bson:"key_last_unlock"`
PubKey string `bson:"pubKey"`
Password string `bson:"password"`
Otp_secret string `bson:"otp_secret"`
PwdChangedTime string `bson:"pwdChangedTime"`
PwdAccountLockedTime *string `bson:"pwdAccountLockedTime"`
}
//slow tasks
/************************************
Task executed every 10 minutes
*************************************/
func SshKeyExpire(mdb *mongo.Client, mongo_instance string, ldap *ldap_client.LDAPClient){
log.Println("[*] Undergoing key expiration procedure")
log.Println(" |___")
// vars
users := mdb.Database(mongo_instance).Collection("users")
expirationDelta := 9
findOptProj := options.Find().SetProjection(bson.M{"sys_username":1, "email":1, "pubKey": 1, "otp_secret":1, "key_last_unlock":1})
cur, err := users.Find(context.TODO(), bson.M{ "pubKey": bson.M{ "$exists": true, "$nin": bson.A{nil, ""} }}, findOptProj)
Check(err)
defer cur.Close(context.TODO())
for cur.Next(context.TODO()) {
var user User
err := cur.Decode(&user)
Check(err)
diff := TimeHoursDiff(user.Key_last_unlock)
if (diff >= expirationDelta) {
//cipher string only if it is unciphered
if(strings.Contains(user.PubKey, "ssh-rsa")) {
//return a byte string
b32_decoded_otp_secret, err := base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(user.Otp_secret)
Check(err)
key := b32_decoded_otp_secret
encKey := AESencrypt(string(key), user.PubKey)
_, err = users.UpdateOne(context.TODO(), bson.M{"email":user.Email }, bson.M{ "$set": bson.M{ "pubKey" : encKey}})
Check(err)
_, err = ldap.SetUserAttribute(user.Sys_username, "sshPublicKey", encKey)
Check(err)
log.Println(" |- SSH public key for user "+user.Sys_username+" Locked due to expiration")
}
}
}
log.Println("[+] Expired keys locked successfully")
}
|
package B
import "fmt"
func Call() {
fmt.Println("B!")
}
|
package main
func (a *App) initializeRoutes() {
a.Router.HandleFunc("/companies", a.getCompanies).Methods("GET")
a.Router.HandleFunc("/company/{id:[0-9]+}", a.getCompany).Methods("GET")
}
|
package util
import (
"context"
"time"
)
// Sleeper is a device that facilitates Context-cancellable sleeping.
//
// Sleeper is not safe for concurrent usage.
type Sleeper struct {
t *time.Timer
}
// Sleep sleeps until either the specified period, d, has expired, or the
// supplied Context has been cancelled.
//
// If Sleep exits naturally, it will return nil. Otherwise, if it is cancelled
// prematurely, the Context's error will be returned.
func (s *Sleeper) Sleep(c context.Context, d time.Duration) error {
// If we're not sleeping for a positive amount of time, return immediately.
if d <= 0 {
return nil
}
// If our Context is already cancelled, don't do anything.
select {
case <-c.Done():
return c.Err()
default:
}
// We assume that t is in a triggered state from previous use.
if s.t == nil {
s.t = time.NewTimer(d)
} else {
s.t.Reset(d)
}
select {
case <-c.Done():
// Our Context has finished before our timer has finished. Cancel the timer.
if !s.t.Stop() {
<-s.t.C
}
return c.Err()
case <-s.t.C:
// The timer has ticked, our sleep completed successfully.
return nil
}
}
// Close closes the Sleeper, releasing any resources that it owns.
//
// Close is optional, but may offer better resource management if called.
func (s *Sleeper) Close() {
s.t.Stop()
s.t = nil
}
// Sleep is a shortcut for a single-use Sleeper.
func Sleep(c context.Context, d time.Duration) error {
var s Sleeper
defer s.Close()
return s.Sleep(c, d)
}
|
package validation
import (
"fmt"
"reflect"
validator "gopkg.in/validator.v2"
)
func NewValidator(name string, registry interface{}) {
validator.SetValidationFunc(
name,
func(v interface{}, param string) error {
st := reflect.ValueOf(v)
if st.Kind() == reflect.Ptr {
if st.Pointer() == 0 {
return nil
}
st = st.Elem()
}
if st.Kind() != reflect.String {
return validator.ErrUnsupported
}
value := st.String()
switch reflect.TypeOf(registry).Kind() {
case reflect.Slice:
enum := reflect.ValueOf(registry)
for i := 0; i < enum.Len(); i++ {
if value == enum.Index(i).String() {
return nil
}
}
}
return fmt.Errorf("unknown value '%s'", value)
},
)
}
|
package main
import (
"encoding/json"
"fmt"
"os"
"github.com/SoftwareAG/adabas-go-api/adabas"
"github.com/SoftwareAG/adabas-go-api/adatypes"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Employees example exmployee native inmap usage
type Employees struct {
Index uint64 `adabas:":isn"`
ID string `adabas:":key:AA"`
FullName *FullNameInMap `adabas:"::AB"`
Income []*IncomeInMap `adabas:"::AQ"`
}
// FullNameInMap full name inmap database reference
type FullNameInMap struct {
FirstName string `adabas:"::AC"`
MiddleName string `adabas:"::AD"`
Name string `adabas:"::AE"`
}
// IncomeInMap income inmap database reference
type IncomeInMap struct {
Salary uint64 `adabas:"::AS"`
Bonus []uint64 `adabas:"::AT"`
Currency string `adabas:"::AR"`
Summary uint64 `adabas:":ignore"`
}
type streamStruct struct {
store *adabas.StoreRequest
}
func initLogLevelWithFile(fileName string, level zapcore.Level) (err error) {
p := os.Getenv("LOGPATH")
if p == "" {
p = "."
}
name := p + string(os.PathSeparator) + fileName
rawJSON := []byte(`{
"level": "error",
"encoding": "console",
"outputPaths": [ "XXX"],
"errorOutputPaths": ["stderr"],
"encoderConfig": {
"messageKey": "message",
"levelKey": "level",
"levelEncoder": "lowercase"
}
}`)
var cfg zap.Config
if err := json.Unmarshal(rawJSON, &cfg); err != nil {
fmt.Printf("Initial logging JSON configuration error: %v\n", err)
os.Exit(1)
}
cfg.Level.SetLevel(level)
cfg.OutputPaths = []string{name}
logger, err := cfg.Build()
if err != nil {
fmt.Printf("Initial logging error: %v\n", err)
os.Exit(1)
}
defer logger.Sync()
sugar := logger.Sugar()
adatypes.Central.SetDebugLevel(true)
sugar.Infof("Start logging with level %v", level)
adatypes.Central.Log = sugar
return
}
func updateStream(record *adabas.Record, x interface{}) error {
tc := x.(*streamStruct)
last := uint32(record.ValueQuantity("AS"))
vi, err := record.SearchValueIndex("AS", []uint32{last})
if err != nil {
return err
}
sv, _ := vi.Int32()
sv += 1000
err = record.SetValueWithIndex("AS", []uint32{last}, sv)
if err != nil {
return err
}
fmt.Println("Updated record", record)
return tc.store.Update(record)
}
func main() {
initLogLevelWithFile("employees.log", zapcore.DebugLevel)
adabasModDBIDs := "1"
if len(os.Args) > 1 {
adabasModDBIDs = os.Args[1]
}
fmt.Println("Open connection to", adabasModDBIDs)
connection, err := adabas.NewConnection(fmt.Sprintf("acj;inmap=%s", adabasModDBIDs))
if err != nil {
fmt.Println("Error connecting database:", err)
return
}
defer connection.Close()
readRequest, rerr := connection.CreateMapReadRequest(&Employees{}, 11)
if rerr != nil {
fmt.Println("Error creating read request:", rerr)
return
}
err = readRequest.QueryFields("AA,AB,AS")
if err != nil {
fmt.Println("Error query field:", err)
return
}
storeRequest, serr := connection.CreateMapStoreRequest(&Employees{}, 11)
if serr != nil {
fmt.Println("Error creating store request:", serr)
return
}
serr = storeRequest.StoreFields("AS")
if serr != nil {
fmt.Println("Error define store fields:", serr)
return
}
fmt.Println("Read logical search...")
tc := &streamStruct{store: storeRequest}
_, err = readRequest.ReadLogicalWithStream("AE='SMITH'", updateStream, tc)
if err != nil {
fmt.Println("Error updating records:", err)
return
}
err = storeRequest.EndTransaction()
if err != nil {
fmt.Println("Error end of transaction:", err)
return
}
}
|
package genstruct
import (
"io/ioutil"
"strings"
)
const (
golangByteArray = "[]byte"
gureguNullInt = "null.Int"
sqlNullInt = "sql.NullInt64"
golangInt = "int"
golangInt64 = "int64"
gureguNullFloat = "null.Float"
sqlNullFloat = "sql.NullFloat64"
golangFloat = "float"
golangFloat32 = "float32"
golangFloat64 = "float64"
gureguNullString = "null.String"
sqlNullString = "sql.NullString"
gureguNullTime = "null.Time"
golangTime = "time.Time"
)
type MainStruct struct {
Version string `json:"version"`
Revision int `json:"revision"`
TemplatePath string `json:"-"`
OutputPath string `json:"-"`
Modules map[string]Module `json:"modules,omitempty"`
}
type Module struct {
Name string `json:"name"`
Mode string `json:"mode"`
InjectionMode string `json:"injection_mode"`
UnpublishField []string `json:"unpublish_field"`
UneditableField []string `json:"uneditable_field"`
FilterList []string `json:"filter_list"`
FilterDetail []string `json:"filter_detail"`
UniqueKey []string `json:"unique_key"`
Validator map[string]string `json:"validator"`
CacheGroupBackend []string `json:"cache_group_backend"`
CacheGroupFrontend []string `json:"cache_group_frontend"`
CacheDuration int `json:"cache_duration"`
Plugin map[string]map[string]string `json:"plugin"`
Relation map[string][]string `json:"relation"`
Columns map[string]Column `json:"columns"`
}
type Column struct {
Name string `json:"name"`
Type string `json:"type"`
NullAble string `json:"nullable"`
Key string `json:"key"`
Default string `json:"default"`
}
type Plugin struct {
Version string `json:"version"`
Name string `json:"name"`
Schema []Schema `json:"schema"`
}
type Schema struct {
Provider string `json:"provider"`
Type string `json:"type"`
Columns map[string]Column `json:"columns"`
}
func title(s string) string {
s = strings.Replace(s, "_", " ", -1)
s = strings.Title(s)
s = strings.Replace(s, " ", "", -1)
return s
}
func readTemplate(file string) string {
template, err := ioutil.ReadFile(TemplatePath + "/" + file)
check(err)
return string(template)
}
func defultStructType(mysqlType string) string {
switch mysqlType {
case "tinyint", "int", "smallint", "mediumint", "bigint":
return "0"
case "char", "enum", "varchar", "longtext", "mediumtext", "text", "tinytext":
return "\"\""
case "date", "datetime", "time", "timestamp":
return "\"\""
case "decimal", "double":
return "0"
case "float":
return "0"
case "binary", "blob", "longblob", "mediumblob", "varbinary":
return "\"\""
}
return ""
}
func convertStructType(mysqlType string) string {
switch mysqlType {
case "tinyint", "int", "smallint", "mediumint":
return golangInt
case "bigint":
return golangInt64
case "char", "enum", "varchar", "longtext", "mediumtext", "text", "tinytext":
return "string"
case "date", "datetime", "time", "timestamp":
return golangTime
case "decimal", "double":
return golangFloat64
case "float":
return golangFloat32
case "binary", "blob", "longblob", "mediumblob", "varbinary":
return golangByteArray
}
return ""
}
func mysqlTypeToGoType(mysqlType string, nullable bool, gureguTypes bool) string {
switch mysqlType {
case "tinyint", "int", "smallint", "mediumint":
if nullable {
if gureguTypes {
return gureguNullInt
}
return sqlNullInt
}
return golangInt
case "bigint", "timestamp":
if nullable {
if gureguTypes {
return gureguNullInt
}
return sqlNullInt
}
return golangInt64
case "char", "enum", "varchar", "longtext", "mediumtext", "text", "tinytext", "time":
if nullable {
if gureguTypes {
return gureguNullString
}
return sqlNullString
}
return "string"
case "date", "datetime":
if nullable && gureguTypes {
return gureguNullTime
}
return golangTime
case "decimal", "double":
if nullable {
if gureguTypes {
return gureguNullFloat
}
return sqlNullFloat
}
return golangFloat64
case "float":
if nullable {
if gureguTypes {
return gureguNullFloat
}
return sqlNullFloat
}
return golangFloat32
case "binary", "blob", "longblob", "mediumblob", "varbinary":
return golangByteArray
}
return ""
}
func contains(arr []string, str string) bool {
for _, a := range arr {
if a == str {
return true
}
}
return false
}
|
package _783_Minimum_Distance_Between_BST_Nodes
import "math"
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func minDiffInBST(root *TreeNode) int {
return minDiffInBSTInOrderNR(root)
}
func minDiffInBSTInOrderNR(root *TreeNode) int {
var (
stack []*TreeNode
node = root
pre *TreeNode
min int = math.MaxInt32
)
for node != nil || len(stack) > 0 {
if node != nil {
stack = append(stack, node)
node = node.Left
} else {
node = stack[len(stack)-1]
stack = stack[:len(stack)-1]
if pre != nil {
min = minInt(min, absInt(node.Val, pre.Val))
}
pre = node
node = node.Right
}
}
return min
}
func absInt(a, b int) int {
if a > b {
return a - b
} else {
return b - a
}
}
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
|
package handlers
import (
"forum/internal/handlers/dashboard"
"github.com/gin-gonic/gin"
)
func dashboardRouter(r *gin.RouterGroup) {
r.POST("forum", dashboard.CreateForum)
r.GET("forum", dashboard.AllForum)
r.GET("forum/:forum", dashboard.ShowForum)
r.DELETE("forum/:forum", dashboard.DeleteForum)
r.PUT("forum/:forum", dashboard.UpdateForum)
}
|
/*
You are driving a little too fast, and a police officer stops you. Write code to compute the result, encoded as an int value: 0=no ticket, 1=small ticket, 2=big ticket. If speed is 60 or less, the result is 0. If speed is between 61 and 80 inclusive, the result is 1. If speed is 81 or more, the result is 2. Unless it is your birthday -- on that day, your speed can be 5 higher in all cases.
*/
package main
import (
"fmt"
)
func caught_speeding(speed int, is_birthday bool) int {
if is_birthday {
speed -= 5
}
if speed <= 60 {
return 0
} else if speed > 60 || speed <= 80 {
return 1
}
return 2
}
func main(){
var status int = 0
if caught_speeding(60, false) == 0 {
status += 1
}
if caught_speeding(84, true) == 1 {
status += 1
}
if caught_speeding(65, false) == 1 {
status += 1
}
if caught_speeding(65, true) == 0 {
status += 1
}
if status == 4 {
fmt.Println("OK")
} else {
fmt.Println("NOT OK")
}
}
|
package pget
import (
"bytes"
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestMain(m *testing.M) {
stdout = ioutil.Discard
os.Exit(m.Run())
}
func TestPget(t *testing.T) {
// listening file server
mux := http.NewServeMux()
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/moo", http.StatusFound)
})
mux.HandleFunc("/moo", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/mooo", http.StatusFound)
})
mux.HandleFunc("/mooo", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "/test.tar.gz", http.StatusFound)
})
mux.HandleFunc("/test.tar.gz", func(w http.ResponseWriter, r *http.Request) {
fp := "_testdata/test.tar.gz"
data, err := ioutil.ReadFile(fp)
if err != nil {
t.Errorf("failed to readfile: %s", err)
}
http.ServeContent(w, r, fp, time.Now(), bytes.NewReader(data))
})
ts := httptest.NewServer(mux)
defer ts.Close()
// begin tests
url := ts.URL
tmpdir := t.TempDir()
cfg := &DownloadConfig{
Filename: "test.tar.gz",
ContentLength: 1719652,
Dirname: tmpdir,
Procs: 4,
URLs: []string{ts.URL},
Client: newDownloadClient(1),
}
t.Run("check", func(t *testing.T) {
target, err := Check(context.Background(), &CheckConfig{
URLs: []string{url},
Timeout: 10 * time.Second,
})
if err != nil {
t.Fatalf("failed to check header: %s", err)
}
if len(target.URLs) == 0 {
t.Fatalf("invalid URL length %d", len(target.URLs))
}
// could redirect?
assert.NotEqual(t, target.URLs[0], url, "failed to get of the last url in the redirect")
})
t.Run("download", func(t *testing.T) {
err := Download(context.Background(), cfg)
if err != nil {
t.Fatal(err)
}
// check of the file to exists
for i := 0; i < cfg.Procs; i++ {
filename := filepath.Join(tmpdir, "_test.tar.gz.4", fmt.Sprintf("test.tar.gz.2.%d", i))
_, err := os.Stat(filename)
if err == nil {
t.Errorf("%q does not exist: %v", filename, err)
}
}
cmpFileChecksum(t, "_testdata/test.tar.gz", filepath.Join(tmpdir, cfg.Filename))
})
}
func get2md5(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
hash := md5.New()
if _, err := io.Copy(hash, f); err != nil {
return "", err
}
// get the 16 bytes hash
bytes := hash.Sum(nil)[:16]
return hex.EncodeToString(bytes), nil
}
func cmpFileChecksum(t *testing.T, wantPath, gotPath string) {
t.Helper()
want, err := get2md5(wantPath)
if err != nil {
t.Fatalf("failed to md5sum of original file: %s", err)
}
resultfp, err := get2md5(gotPath)
if err != nil {
t.Fatalf("failed to md5sum of result file: %s", err)
}
if want != resultfp {
t.Errorf("expected %s got %s", want, resultfp)
}
}
|
package main
import(
"fmt"
"log"
"net/http"
"encoding/json"
)
type Passage struct {
Company string `json:"Company"`
Money string `json:"Money"`
}
type Passages []Passage
func enableCors(w *http.ResponseWriter) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
}
func allPassages(w http.ResponseWriter, r *http.Request){
enableCors(&w)
passages := Passages{
Passage{Company:"GOL", Money:"1000"},
Passage{Company:"TAM", Money:"2000"},
Passage{Company:"AIR", Money:"500"},
}
fmt.Println("Todos os dados mostrados", passages)
json.NewEncoder(w).Encode(passages)
}
func home(w http.ResponseWriter, r *http.Request){
fmt.Fprintf(w, "Pagina inicial")
}
func handleRequests(){
http.HandleFunc("/", home)
http.HandleFunc("/passages", allPassages)
log.Fatal(http.ListenAndServe(":8686", nil))
}
func main(){
handleRequests()
}
|
package main
import (
"os"
"path"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/mattn/go-shellwords"
"github.com/ayufan/docker-composer/cmds"
"github.com/ayufan/docker-composer/compose"
)
func init() {
workTree := os.Getenv("GIT_WORK_TREE")
if workTree == "" {
return
}
workTree = filepath.Clean(workTree)
hookPath, err := filepath.Abs(os.Args[0])
if err != nil {
logrus.Fatalln("Abs:", err)
}
hookName := filepath.Base(hookPath)
hooksDir := filepath.Dir(hookPath)
if !strings.HasSuffix(hooksDir, "/.git/hooks") {
return
}
link, err := os.Readlink(os.Args[0])
if err != nil {
logrus.Fatalln("Readlink:", err)
}
appName := filepath.Base(workTree)
os.Args = append([]string{link, "git-" + hookName, appName}, os.Args[1:]...)
}
func main() {
app := cli.NewApp()
app.Name = path.Base(os.Args[0])
app.Usage = "a Docker Composer Service"
app.Author = "Kamil Trzciński"
app.Email = "ayufan@ayufan.eu"
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
Usage: "debug mode",
EnvVar: "DEBUG",
},
cli.StringFlag{
Name: "log-level, l",
Value: "info",
Usage: "Log level (options: debug, info, warn, error, fatal, panic)",
EnvVar: "LOG_LEVEL",
},
cli.StringFlag{
Name: "apps-dir",
Value: "/srv/apps",
Usage: "Directory where all the apps are stored",
Destination: &compose.AppsDirectory,
EnvVar: "APPS_DIR",
},
cli.StringFlag{
Name: "c",
Usage: "Custom command to execute",
},
}
// logs
app.Before = func(c *cli.Context) error {
logrus.SetOutput(os.Stderr)
level, err := logrus.ParseLevel(c.String("log-level"))
if err != nil {
logrus.Fatalf(err.Error())
}
logrus.SetLevel(level)
logrus.SetFormatter(&logrus.TextFormatter{
ForceColors: true,
})
// If a log level wasn't specified and we are running in debug mode,
// enforce log-level=debug.
if !c.IsSet("log-level") && !c.IsSet("l") && c.Bool("debug") {
logrus.SetLevel(logrus.DebugLevel)
}
return nil
}
defaultAction := app.Action
app.Action = func(c *cli.Context) error {
if command := c.String("c"); command != "" {
args, err := shellwords.Parse(command)
if err != nil {
logrus.Fatalln(err)
}
args = append([]string{os.Args[0]}, args...)
return app.Run(args)
}
return cli.HandleAction(defaultAction, c)
}
app.Commands = cmds.Commands
err := app.Run(os.Args)
if err != nil {
logrus.Fatalln(err)
}
}
|
package 链表
func hasCycle(head *ListNode) bool {
slow,fast := head,head
for fast!=nil && fast.Next!=nil{
slow = slow.Next
fast=fast.Next.Next
if slow==fast{
return true
}
}
return false
}
/*
题目链接: https://leetcode-cn.com/problems/linked-list-cycle/comments/
*/
|
package TmxTileset
import (
"testing"
)
var testTilesets= [...]string{"../../../tilesets/jumper.tsx"}
func TestTilesetParsingp(t *testing.T) {
for _, filename := range testTilesets {
ReadTileSetFile(filename)
}
}
|
package moviedetail
import (
"context"
"github.com/ariefrpm/movies2/gen/go/proto/v1"
"github.com/ariefrpm/movies2/pkg/library/router"
"google.golang.org/grpc"
)
type request struct {
ID string
}
type response struct {
*Movie
}
func endpoint(s Service) router.Endpoint {
return func(ctx context.Context, req interface{}) (interface{}, error) {
r := req.(request)
m, err := s.MovieDetail(r.ID)
return response{m}, err
}
}
func RestHandler(svc Service, r router.Router) {
handler := router.NewHandler(endpoint(svc), restDecodeRequest, restEncodeResponse, restEncodeError)
r.GET("/api/movie_detail", handler)
}
func GrpcHandler(svc Service, grpcServer *grpc.Server) {
t := &grpcTransport{handler:router.NewGrpcHandler(endpoint(svc), grpcDecodeRequest, grpcEncodeResponse)}
proto.RegisterMovieDetailServiceServer(grpcServer, t)
}
|
package logger
import (
"log"
"os"
)
var (
outfile, _ = os.OpenFile("./logger/info.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0755)
LogFile = log.New(outfile, "", 0)
)
func ForError(err error) {
if err != nil {
LogFile.Println(err)
// LogFile.Fatal(err)
}
}
func LogCommandResult(str string){
if str != "" {
LogFile.Println(str)
// LogFile.Fatal(err)
}
}
|
package firewall
import "fmt"
type Mock struct {
}
func (f *Mock) AddIP(ip string) error {
//fmt.Printf("sudo /sbin/ipset add blacklist %s\n", ip)
return nil
}
func (f *Mock) RemoveIP(ip string) error {
fmt.Printf("sudo /sbin/ipset del blacklist %s\n", ip)
return nil
}
|
package main
import (
"KServer/manage"
"KServer/manage/config"
"KServer/server/utils"
"KServer/server/utils/msg"
"KServer/server/websocket/response"
"KServer/server/websocket/services"
"fmt"
"os"
"os/signal"
"syscall"
)
func main() {
mConf := config.NewManageConfig()
//mConf.Socket.Client = true
//mConf.Socket.Server = true
//mConf.DB.Redis = true
mConf.WebSocket.Client = true
mConf.WebSocket.Server = true
mConf.Server.Head = msg.AgentServerTopic
mConf.Message.Kafka = true
// 新建管理器
m := manage.NewManage(mConf)
// 管理器启动redis Pool
//redisConf := config.NewRedisConfig(utils.RedisConFile)
//m.DB().Redis().StartMasterPool(redisConf.GetMasterAddr(), redisConf.Master.PassWord, redisConf.Master.MaxIdle, redisConf.Master.MaxActive)
//m.DB().Redis().StartSlavePool(redisConf.GetSlaveAddr(), redisConf.Slave.PassWord, redisConf.Slave.MaxIdle, redisConf.Slave.MaxActive)
// 启动消息通道
kafkaConf := config.NewKafkaConfig(utils.KafkaConFile)
err := m.Message().Kafka().Send().Open([]string{kafkaConf.GetAddr()})
if err != nil {
fmt.Println("消息通道启动失败")
return
}
// 启动消息返回
is := response.NewIServerResponse(m)
alls := response.NewAllServerResponse(m)
// 新建socket server
//socketServer := socket.NewSocket()
// 注册连接钩子 和连接验证路由
connect := services.NewConnect(m)
//注册链接hook回调函数
m.WebSocket().Server().SetOnConnStart(connect.DoConnectionBegin)
m.WebSocket().Server().SetOnConnStop(connect.DoConnectionLost)
// 注册socket路由
m.WebSocket().Server().AddHandle(msg.OauthId, connect) //添加开始连接路由
// 注册一个自定义头 用于转发非注册msg 配合服务发现
CustomHandle := services.NewWebSocketCustomHandle(m)
m.WebSocket().Server().AddCustomHandle(CustomHandle)
// 添加监听路由
m.Message().Kafka().AddRouter(m.Server().GetId(), msg.OauthId, connect.ResponseOauth)
m.Message().Kafka().AddRouter(m.Server().GetId(), msg.AgentSendAllClient, is.SendAllClient) // 通知所有客户端消息
// 所有服务器接受消息
m.Message().Kafka().AddRouter(msg.AgentServerAllTopic, msg.AgentAllServerId, alls.ResponseAllServer)
// 注册服务发现回调
// 全局服务发现
m.Message().Kafka().AddRouter(msg.ServiceDiscoveryListenTopic, msg.ServiceDiscoveryID, CustomHandle.DiscoverHandle)
// 首次获取服务发现
m.Message().Kafka().AddRouter(m.Server().GetId(), msg.ServiceDiscoveryID, CustomHandle.DiscoverHandle)
//m.Discover().CallRegisterService()
// 开启监听 和返回通道关闭
closeFunc := m.Message().Kafka().StartListen([]string{kafkaConf.GetAddr()}, m.Server().GetId(), -1)
m.Message().Kafka().CallCheckAllService(m.Server().GetId()) //查询所有服务
//开启scoket服务
//s.Serve()
m.WebSocket().Server().Serve()
fmt.Println("[服务器加载完毕]")
sigs := make(chan os.Signal, 1)
done := make(chan bool, 1)
signal.Notify(sigs, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
go func() {
<-sigs
//fmt.Println()
//fmt.Println(sig)
done <- true
}()
//fmt.Println("awaiting signal")
<-done
fmt.Println("Server Close...")
// 关闭消息监听
// 关闭socket
//socket.Stop()
// 关闭redis
//_ = m.DB().Redis().CloseMaster()
//_ = m.DB().Redis().CloseSlave()
// 关闭消息通道
m.Message().Kafka().Send().Close()
closeFunc()
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2021/7/7 9:47 上午
# @File : jz_24_二叉树中某个值的路径.go
# @Description :
# @Attention :
*/
package offer
func FindPath(root *TreeNode, expectNumber int) [][]int {
r := make([][]int, 0)
dfsFindPath(root, expectNumber, &r, make([]int, 0))
return r
}
func dfsFindPath(root *TreeNode, left int, res *[][]int, path []int) {
if nil == root {
return
}
val := root.Val
path = append(path, val)
left -= val
if left < 0 {
return
}
if left == 0 && root.Left == nil && root.Right == nil {
*res = append(*res, path)
return
}
dfsFindPath(root.Left, left, res, path)
dfsFindPath(root.Right, left, res, path)
}
|
package command
import (
"errors"
"fmt"
"github.com/opsgenie/opsgenie-go-sdk-v2/logs"
gcli "github.com/urfave/cli"
"io"
"net/http"
"os"
"strings"
"time"
)
func NewCustomerLogClient(c *gcli.Context) (*logs.Client, error) {
logsCli, cliErr := logs.NewClient(getConfigurations(c))
if cliErr != nil {
message := "Can not create the logs client. " + cliErr.Error()
printMessage(INFO, message)
return nil, errors.New(message)
}
printMessage(DEBUG,"Logs Client created.")
return logsCli, nil
}
func DownloadLogs(c *gcli.Context) {
cli, err := NewCustomerLogClient(c)
if err != nil {
os.Exit(1)
}
req := logs.ListLogFilesRequest{}
if val, success := getVal("start", c); success {
req.Marker = val
}
filePath := "."
if val, success := getVal("path", c); success {
filePath = val
printMessage(DEBUG,fmt.Sprintf("Downloading log files under: %s", filePath))
} else {
printMessage(DEBUG,"Downloading log files into current directory..")
}
req.Limit = getListLogsCommandDefaultSize()
endDate := ""
if val, success := getVal("end", c); success {
endDate = val
}
printMessage(DEBUG,"List Downloadable Logs request prepared from flags, sending request to Opsgenie..")
for {
response, err := cli.ListLogFiles(nil, &req)
if err != nil {
printMessage(ERROR, err.Error())
os.Exit(1)
}
if response.Marker == "" {
printMessage(DEBUG,"Successfully downloaded all the files")
break
}
req.Marker = getLinksAndDownloadTheFile(response.Logs, endDate, filePath, cli)
if req.Marker == "" {
printMessage(DEBUG,"Successfully downloaded all the files")
break
}
}
}
func getLinksAndDownloadTheFile(receivedLogs []logs.Log, endDate string, filePath string, cli *logs.Client) string {
currentFileDate := ""
for _, log := range receivedLogs {
downloadResponse, err := cli.GenerateLogFileDownloadLink(nil, &logs.GenerateLogFileDownloadLinkRequest{
FileName: log.FileName,
})
time.Sleep(time.Duration(500 * time.Millisecond))
if err != nil {
printMessage(DEBUG,fmt.Sprintf("Error: %s while downloading log file: %s, but proceding rest of the log files", err.Error(), log.FileName))
continue
}
currentFileDate = log.FileName[:len(log.FileName)-5]
if endDate == "" || checkDate(endDate, currentFileDate) {
err := downloadFile(filePath+fmt.Sprintf("/%s", log.FileName), downloadResponse.LogFileDownloadLink)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,fmt.Sprintf("Successfully downloaded file: %s", log.FileName))
} else {
currentFileDate = ""
break
}
}
return currentFileDate
}
func downloadFile(filepath string, url string) error {
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close()
// Get the data
resp, err := http.Get(url)
if err != nil {
return err
}
defer resp.Body.Close()
// Write the body to file
_, err = io.Copy(out, resp.Body)
if err != nil {
return err
}
return nil
}
func checkDate(endDate string, currentFileDate string) bool {
a := strings.Split(endDate, "-")
b := strings.Split(currentFileDate, "-")
for i, s := range a {
var ai, bi int
fmt.Sscanf(s, "%d", &ai)
fmt.Sscanf(b[i], "%d", &bi)
if ai > bi {
return true
}
if bi > ai {
return false
}
}
return true
}
|
package main
import (
"fmt"
"strings"
)
func wordFrequency(text string) {
words := strings.Fields(text)
freq := make(map[string]int)
for i, word := range words {
words[i] = strings.Trim(word, ".!?;',")
words[i] = strings.ToLower(words[i])
freq[words[i]]++
}
for word, freq := range freq {
if freq > 1 {
fmt.Printf("%s %v\n", word, freq)
}
}
}
func countWords() {
text := "A aa b aaa, aaaa a aa. B bb!"
wordFrequency(text)
}
|
package autoscaler
import "fmt"
type InstanceVariety struct {
InstanceType string
Subnet Subnet
}
func (v InstanceVariety) Capacity() (float64, error) {
return CapacityFromInstanceType(v.InstanceType)
}
type SortInstanceVarietiesByCapacity []InstanceVariety
func (s SortInstanceVarietiesByCapacity) Len() int {
return len(s)
}
func (s SortInstanceVarietiesByCapacity) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s SortInstanceVarietiesByCapacity) Less(i, j int) bool {
ic, err := s[i].Capacity()
if err != nil {
panic(err)
}
jc, err := s[j].Capacity()
if err != nil {
panic(err)
}
if ic != jc {
return ic < jc
}
if s[i].Subnet != s[j].Subnet {
return s[i].Subnet.SubnetID < s[j].Subnet.SubnetID
}
if s[i].InstanceType != s[j].InstanceType {
return s[i].InstanceType < s[j].InstanceType
}
panic(fmt.Sprintf("%#v and %#v must be different", s[i], s[j]))
}
|
package main
import (
"os"
"github.com/nyks06/go-logger"
)
func main() {
//First of all, you need to Init the logger.
//You'll have to do it only one time in your program and you'll have to save the returned pointer.
//These functions show you how to add a logger type.
//You can add as much logger as you want, even with the same type (excepted for syslog, for the moment).
logger.AddConsoleLogger(os.Stderr)
logger.AddFileLogger("./log.file")
//Windows way to add a syslog logger
logger.AddSyslogLogger("udp", "127.0.0.1:514", "syslog-Tag")
//Unix way to add a syslog logger
logger.AddSyslogLogger("", "", "syslog-Tag")
//You can enable or disable the color display with a simple function
//By default, the text is in color. (Configurable only for console output)
//You can check in the color display is enabled or not with a function returning a bool
logger.DisableColor()
logger.EnableColor()
if logger.CheckColorStatus() {
//Color is enabled
} else {
//Color is not enabled
}
//You also can enable or disable the logger at every moment with a simple function
//As you can check the status of the color display, you can check the status of the logger.
//When the logger is disabled, messages will be trashed. By default the logger is enabled.
logger.Disable()
logger.Enable()
if logger.IsEnabled() {
//Logger is enabled
} else {
//Logger is disabled
}
//You easily can configure other things like enable or disable file, console or syslog logging.
logger.DisableConsoleLogger()
logger.DisableFileLogger()
logger.DisableSyslogLogger()
logger.EnableConsoleLogger()
logger.EnableFileLogger()
logger.EnableSyslogLogger()
//You can display message using functions with the name corresponding to the level of log you want.
//You can format your log message as if you were using log package function fmt.Printf
logger.Debug("This is a %s message", "debug")
logger.Info("This is an info message - %d", 42)
logger.Notice("This is a notice message")
logger.Warning("This is a warning message")
logger.Error("This is an error message")
logger.Critical("This is a critical error message")
logger.Alert("This is an alert error message")
logger.Emergency("This is a emergency message")
//Finally, you can close all Opened files with the logger.Quit() function
logger.Close()
}
|
/*
Copyright © 2021 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"fmt"
"github.com/hejianlai/kubeimg/pkg/client"
"github.com/hejianlai/kubeimg/pkg/tools"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
example = `
# List all deployment images version in ps output format.
kubeimg image
# List the deployment images version of the specified namespaces
kubeimg image -n <namespace>
`
)
func Images() error {
clientset := client.Clientset()
n, _ := rootCmd.Flags().GetString("namespace")
deploymentList, _ := clientset.AppsV1().Deployments(n).List(context.TODO(), metav1.ListOptions{})
var itemSlice []string
itemSlice = append(itemSlice, "NAMESPACE\tDEPLOY\tCONTAINER\tIMAGE")
for i := 0; i < len(deploymentList.Items); i++ {
d := deploymentList.Items[i]
for i := 0; i < len(d.Spec.Template.Spec.Containers); i++ {
c := d.Spec.Template.Spec.Containers[i]
item := d.Namespace + "\t" + d.Name + "\t" + c.Name + "\t" + c.Image
itemSlice = append(itemSlice, item)
}
}
tools.Fprint(itemSlice)
return nil
}
// imageCmd represents the image command
var imageCmd = &cobra.Command{
Use: "image",
Short: "List the deployment images version in ps output format",
Long: `List the deployment images version in ps output format.`,
Example: fmt.Sprintf(example, "kubeimg"),
Run: func(cmd *cobra.Command, args []string) {
if err := Images(); err != nil {
panic(err.Error())
}
},
}
func init() {
rootCmd.AddCommand(imageCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// imageCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// imageCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
package main
import (
"testing"
)
func BenchmarkMasterRegister1_5(b *testing.B) {
benchmarkRegister(1, 5, b)
}
func BenchmarkMasterRegister10_5(b *testing.B) {
benchmarkRegister(10, 5, b)
}
func BenchmarkMasterRegister100_5(b *testing.B) {
benchmarkRegister(100, 5, b)
}
func BenchmarkMasterRegister1000_5(b *testing.B) {
benchmarkRegister(1000, 5, b)
}
func BenchmarkMasterRegister5000_5(b *testing.B) {
benchmarkRegister(1000, 5, b)
}
func BenchmarkMasterRegister1_10(b *testing.B) {
benchmarkRegister(1, 10, b)
}
func BenchmarkMasterRegister10_10(b *testing.B) {
benchmarkRegister(10, 10, b)
}
func BenchmarkMasterRegister100_10(b *testing.B) {
benchmarkRegister(100, 10, b)
}
func BenchmarkMasterRegister1000_10(b *testing.B) {
benchmarkRegister(1000, 10, b)
}
func BenchmarkMasterRegister5000_10(b *testing.B) {
benchmarkRegister(1000, 10, b)
}
|
package logs
import (
"errors"
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/10gen/realm-cli/internal/cli"
"github.com/10gen/realm-cli/internal/cloud/realm"
"github.com/10gen/realm-cli/internal/utils/test/assert"
"github.com/10gen/realm-cli/internal/utils/test/mock"
)
const (
testDateFormat = "2006-01-02T15:04:05-0700" // avoid millisecond precision
)
func TestLogsList(t *testing.T) {
t.Run("should return an error when client fails to find app", func(t *testing.T) {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return nil, errors.New("something bad happened")
}
cmd := &CommandList{listInputs{}}
err := cmd.Handler(nil, nil, cli.Clients{Realm: realmClient})
assert.Equal(t, errors.New("something bad happened"), err)
})
t.Run("should return an error when client fails to get logs", func(t *testing.T) {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{{}}, nil
}
realmClient.LogsFn = func(groupID, appID string, opts realm.LogsOptions) (realm.Logs, error) {
return nil, errors.New("something bad happened")
}
cmd := &CommandList{}
err := cmd.Handler(nil, nil, cli.Clients{Realm: realmClient})
assert.Equal(t, errors.New("something bad happened"), err)
})
t.Run("should print logs returned by the client", func(t *testing.T) {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{{}}, nil
}
realmClient.LogsFn = func(groupID, appID string, opts realm.LogsOptions) (realm.Logs, error) {
return realm.Logs{
{
Type: realm.LogTypeServiceStreamFunction,
Messages: []interface{}{"a test log message"},
Started: time.Date(2021, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2021, time.June, 22, 7, 54, 43, 234_000_000, time.UTC),
FunctionName: "func0",
},
{
Type: realm.LogTypeScheduledTrigger,
Messages: []interface{}{"one message", "two message", "red message", "blue message"},
Started: time.Date(2020, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2020, time.June, 22, 7, 54, 42, 123_000_000, time.UTC),
EventSubscriptionName: "suessTrigger",
},
{
Type: realm.LogTypeAuth,
Error: "something bad happened",
ErrorCode: "Test",
Started: time.Date(2019, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2019, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
},
}, nil
}
out, ui := mock.NewUI()
cmd := &CommandList{listInputs{ProjectInputs: cli.ProjectInputs{Project: "project", App: "test-app"}}}
err := cmd.Handler(nil, ui, cli.Clients{Realm: realmClient})
assert.Nil(t, err)
assert.Equal(t, `2019-06-22T07:54:42.000+0000 [5ms] Authentication: TestError - something bad happened
2020-06-22T07:54:42.000+0000 [123ms] Trigger -> Scheduled suessTrigger: OK
one message
two message
red message
blue message
2021-06-22T07:54:42.000+0000 [1.234s] Stream Function -> Service func0: OK
a test log message
`, out.String())
})
}
func TestLogsListTail(t *testing.T) {
t.Run("should poll for logs until a shutdown signal is received", func(t *testing.T) {
var logIdx int
testLogs := []realm.Logs{
{{
Type: realm.LogTypeAuth,
Started: time.Date(2019, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2019, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
Messages: []interface{}{"initial log"},
}},
{{
Type: realm.LogTypeAuth,
Started: time.Date(2020, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2020, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
Messages: []interface{}{"second log"},
}},
{{
Type: realm.LogTypeAuth,
Started: time.Date(2021, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2021, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
Messages: []interface{}{"third log"},
}},
}
startDates := make([]time.Time, len(testLogs))
var wg sync.WaitGroup
wg.Add(len(testLogs))
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{{}}, nil
}
realmClient.LogsFn = func(groupID, appID string, opts realm.LogsOptions) (realm.Logs, error) {
logs := testLogs[logIdx]
startDates[logIdx] = opts.Start
wg.Done()
logIdx++
return logs, nil
}
out, ui := mock.NewUI()
sigShutdown := make(chan os.Signal, 1)
go func() {
wg.Wait()
sigShutdown <- os.Interrupt
}()
cmd := &CommandList{listInputs{sigShutdown: sigShutdown, Tail: true}}
cmdStart := time.Now()
assert.Nil(t, cmd.Handler(nil, ui, cli.Clients{Realm: realmClient}))
assert.Equal(t, `2019-06-22T07:54:42.000+0000 [5ms] Authentication: OK
initial log
2020-06-22T07:54:42.000+0000 [5ms] Authentication: OK
second log
2021-06-22T07:54:42.000+0000 [5ms] Authentication: OK
third log
`, out.String())
assert.Equal(t, time.Time{}, startDates[0])
for i := 0; i < len(startDates)-1; i++ {
expectedStart := cmdStart.Add(time.Duration(5*i) * time.Second).Format(testDateFormat)
actualStart := startDates[i+1].Format(testDateFormat)
assert.Equal(t, expectedStart, actualStart)
}
})
t.Run("should poll for logs until an api call returns an error", func(t *testing.T) {
origTailLookBehind := tailLookBehind
defer func() { tailLookBehind = origTailLookBehind }()
tailLookBehind = 2
testLogs := []realm.Logs{
{
{
Type: realm.LogTypeAuth,
Started: time.Date(2021, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2021, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
Messages: []interface{}{"lower log"},
},
{
Type: realm.LogTypeAuth,
Started: time.Date(2020, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2020, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
Messages: []interface{}{"upper log"},
},
{
Type: realm.LogTypeAuth,
Started: time.Date(2019, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2019, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
Messages: []interface{}{"skipped log"},
},
},
{{
Type: realm.LogTypeAuth,
Started: time.Date(2022, time.June, 22, 7, 54, 42, 0, time.UTC),
Completed: time.Date(2022, time.June, 22, 7, 54, 42, 5_000_000, time.UTC),
Messages: []interface{}{"tailed log"},
}},
}
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{{}}, nil
}
var counter int
realmClient.LogsFn = func(groupID, appID string, opts realm.LogsOptions) (realm.Logs, error) {
defer func() { counter++ }()
if counter < len(testLogs) {
return testLogs[counter], nil
}
return nil, errors.New("something bad happened")
}
out, ui := mock.NewUI()
cmd := &CommandList{listInputs{Tail: true}}
err := cmd.Handler(nil, ui, cli.Clients{Realm: realmClient})
assert.Equal(t, errors.New("something bad happened"), err)
assert.Equal(t, `2020-06-22T07:54:42.000+0000 [5ms] Authentication: OK
upper log
2021-06-22T07:54:42.000+0000 [5ms] Authentication: OK
lower log
2022-06-22T07:54:42.000+0000 [5ms] Authentication: OK
tailed log
`, out.String())
})
}
func TestLogNameDisplay(t *testing.T) {
for _, tc := range []struct {
description string
log realm.Log
name string
}{
{
description: "nothing for a log type that has no name",
},
{
description: "name for an auth trigger log",
log: realm.Log{Type: realm.LogTypeAuthTrigger, EventSubscriptionID: "id", EventSubscriptionName: "name"},
name: " name",
},
{
description: "id for an auth trigger log without a name",
log: realm.Log{Type: realm.LogTypeAuthTrigger, EventSubscriptionID: "id"},
name: " id",
},
{
description: "name for a database trigger log",
log: realm.Log{Type: realm.LogTypeDBTrigger, EventSubscriptionID: "id", EventSubscriptionName: "name"},
name: " name",
},
{
description: "id for a database trigger log without a name",
log: realm.Log{Type: realm.LogTypeDBTrigger, EventSubscriptionID: "id"},
name: " id",
},
{
description: "name for a scheduled trigger log",
log: realm.Log{Type: realm.LogTypeScheduledTrigger, EventSubscriptionID: "id", EventSubscriptionName: "name"},
name: " name",
},
{
description: "id for a scheduled trigger log without a name",
log: realm.Log{Type: realm.LogTypeScheduledTrigger, EventSubscriptionID: "id"},
name: " id",
},
{
description: "name for a function log",
log: realm.Log{Type: realm.LogTypeFunction, FunctionID: "id", FunctionName: "name"},
name: " name",
},
{
description: "id for a function log without a name",
log: realm.Log{Type: realm.LogTypeFunction, FunctionID: "id"},
name: " id",
},
{
description: "name for a service stream function log",
log: realm.Log{Type: realm.LogTypeServiceStreamFunction, FunctionName: "name"},
name: " name",
},
{
description: "name for a service function log",
log: realm.Log{Type: realm.LogTypeServiceFunction, FunctionName: "name"},
name: " name",
},
{
description: "name for an auth log",
log: realm.Log{Type: realm.LogTypeAuth, AuthEvent: realm.LogAuthEvent{Provider: "provider"}},
name: " provider",
},
{
description: "name for a webhook log",
log: realm.Log{Type: realm.LogTypeWebhook, IncomingWebhookID: "id", IncomingWebhookName: "name"},
name: " name",
},
{
description: "id for a webhook log without a name",
log: realm.Log{Type: realm.LogTypeWebhook, IncomingWebhookID: "id"},
name: " id",
},
} {
t.Run("should display name for "+tc.description, func(t *testing.T) {
assert.Equal(t, tc.name, logNameDisplay(tc.log))
})
}
}
func TestLogStatusDisplay(t *testing.T) {
for _, tc := range []struct {
description string
log realm.Log
status string
}{
{
description: "a log without error",
status: "OK",
},
{
description: "a log with a generic error",
log: realm.Log{Error: "something bad happened"},
status: "Error - something bad happened",
},
{
description: "a log with a custom error",
log: realm.Log{Error: "something bad happened", ErrorCode: "Custom"},
status: "CustomError - something bad happened",
},
} {
t.Run("should display status for "+tc.description, func(t *testing.T) {
assert.Equal(t, tc.status, logStatusDisplay(tc.log))
})
}
}
func TestLogTypeDisplay(t *testing.T) {
var maxWidth int
for _, tc := range []struct {
logType string
display string
}{
{realm.LogTypeAPI, "Other"},
{realm.LogTypeAPIKey, "API Key"},
{realm.LogTypeAuth, "Authentication"},
{realm.LogTypeAuthTrigger, "Trigger -> Auth"},
{realm.LogTypeDBTrigger, "Trigger -> Database"},
{realm.LogTypeFunction, "Function"},
{realm.LogTypeGraphQL, "GraphQL"},
{realm.LogTypePush, "Push Notification"},
{realm.LogTypeScheduledTrigger, "Trigger -> Scheduled"},
{realm.LogTypeSchemaAdditiveChange, "Schema -> Additive Change"},
{realm.LogTypeSchemaGeneration, "Schema -> Generation"},
{realm.LogTypeSchemaValidation, "Schema -> Validation"},
{realm.LogTypeServiceFunction, "Function -> Service"},
{realm.LogTypeServiceStreamFunction, "Stream Function -> Service"},
{realm.LogTypeStreamFunction, "Stream Function"},
{realm.LogTypeSyncClientWrite, "Sync -> Write"},
{realm.LogTypeSyncConnectionEnd, "Sync -> Connection End"},
{realm.LogTypeSyncConnectionStart, "Sync -> Connection Start"},
{realm.LogTypeSyncError, "Sync -> Error"},
{realm.LogTypeSyncOther, "Sync -> Other"},
{realm.LogTypeSyncSessionEnd, "Sync -> Session End"},
{realm.LogTypeSyncSessionStart, "Sync -> Session Start"},
{realm.LogTypeWebhook, "Webhook"},
} {
t.Run(fmt.Sprintf("should show proper display for log type %s", strings.ToLower(tc.logType)), func(t *testing.T) {
display := logTypeDisplay(realm.Log{Type: tc.logType})
assert.Equal(t, tc.display, display)
if maxWidth < len(display) {
maxWidth = len(display)
}
})
}
t.Logf("the max width for log type display is: %d", maxWidth)
}
func TestLogTypeFormat(t *testing.T) {
t.Run("should query for all types of schema logs for log type schema", func(t *testing.T) {
realmClient := mock.RealmClient{}
realmClient.FindAppsFn = func(filter realm.AppFilter) ([]realm.App, error) {
return []realm.App{{}}, nil
}
var logsOpts realm.LogsOptions
realmClient.LogsFn = func(groupID, appID string, opts realm.LogsOptions) (realm.Logs, error) {
logsOpts = opts
return realm.Logs{}, nil
}
typeInputs := listInputs{Types: []string{logTypeSchema}}
logTypes := []string{realm.LogTypeSchemaAdditiveChange, realm.LogTypeSchemaGeneration, realm.LogTypeSchemaValidation}
cmd := &CommandList{typeInputs}
assert.Nil(t, cmd.Handler(nil, nil, cli.Clients{Realm: realmClient}))
assert.Equal(t, logTypes, logsOpts.Types)
})
}
|
// This file was generated for SObject ContentVersionComment, API Version v43.0 at 2018-07-30 03:47:50.575198719 -0400 EDT m=+36.919190462
package sobjects
import (
"fmt"
"strings"
)
type ContentVersionComment struct {
BaseSObject
ContentDocumentId string `force:",omitempty"`
ContentVersionId string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
Id string `force:",omitempty"`
UserComment string `force:",omitempty"`
}
func (t *ContentVersionComment) ApiName() string {
return "ContentVersionComment"
}
func (t *ContentVersionComment) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("ContentVersionComment #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tContentDocumentId: %v\n", t.ContentDocumentId))
builder.WriteString(fmt.Sprintf("\tContentVersionId: %v\n", t.ContentVersionId))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tUserComment: %v\n", t.UserComment))
return builder.String()
}
type ContentVersionCommentQueryResponse struct {
BaseQuery
Records []ContentVersionComment `json:"Records" force:"records"`
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"cmp"
"slices"
"github.com/google/uuid"
"github.com/pingcap/tidb/br/pkg/lightning/backend"
)
// DiskUsage is an interface to obtain the size occupied locally of all engines
type DiskUsage interface {
// EngineFileSizes obtains the size occupied locally of all engines managed
// by this backend. This method is used to compute disk quota.
// It can return nil if the content are all stored remotely.
EngineFileSizes() (res []backend.EngineFileSize)
}
// CheckDiskQuota verifies if the total engine file size is below the given
// quota. If the quota is exceeded, this method returns an array of engines,
// which after importing can decrease the total size below quota.
func CheckDiskQuota(mgr DiskUsage, quota int64) (
largeEngines []uuid.UUID,
inProgressLargeEngines int,
totalDiskSize int64,
totalMemSize int64,
) {
sizes := mgr.EngineFileSizes()
slices.SortFunc(sizes, func(i, j backend.EngineFileSize) int {
if i.IsImporting != j.IsImporting {
if i.IsImporting {
return -1
}
return 1
}
return cmp.Compare(i.DiskSize+i.MemSize, j.DiskSize+j.MemSize)
})
for _, size := range sizes {
totalDiskSize += size.DiskSize
totalMemSize += size.MemSize
if totalDiskSize+totalMemSize > quota {
if size.IsImporting {
inProgressLargeEngines++
} else {
largeEngines = append(largeEngines, size.UUID)
}
}
}
return
}
|
package gov
import (
"github.com/irisnet/irishub/app/v1/asset"
"github.com/irisnet/irishub/app/v1/auth"
distr "github.com/irisnet/irishub/app/v1/distribution"
"github.com/irisnet/irishub/app/v1/gov"
"github.com/irisnet/irishub/app/v1/mint"
"github.com/irisnet/irishub/app/v1/params"
"github.com/irisnet/irishub/app/v1/service"
"github.com/irisnet/irishub/app/v1/slashing"
"github.com/irisnet/irishub/app/v1/stake"
"github.com/irisnet/irishub/app/v2/coinswap"
sdk "github.com/irisnet/irishub/types"
)
var ParamSets = make(map[string]params.ParamSet)
func init() {
params.RegisterParamSet(ParamSets, &mint.Params{}, &slashing.Params{}, &service.Params{}, &auth.Params{}, &stake.Params{}, &distr.Params{}, &asset.Params{}, &gov.GovParams{}, &coinswap.Params{})
}
// Deposit
type DepositOutput struct {
Depositor sdk.AccAddress `json:"depositor"` // Address of the depositor
ProposalID int64 `json:"proposal_id"` // proposalID of the proposal
Amount []string `json:"amount"` // Deposit amount
}
type KvPair struct {
K string `json:"key"`
V string `json:"value"`
}
// NormalizeVoteOption - normalize user specified vote option
func NormalizeVoteOption(option string) string {
switch option {
case "Yes", "yes":
return "Yes"
case "Abstain", "abstain":
return "Abstain"
case "No", "no":
return "No"
case "NoWithVeto", "no_with_veto":
return "NoWithVeto"
}
return option
}
//NormalizeProposalType - normalize user specified proposal type
func NormalizeProposalType(proposalType string) string {
switch proposalType {
case "Parameter", "parameter":
return "Parameter"
case "SoftwareUpgrade", "software_upgrade":
return "SoftwareUpgrade"
case "SystemHalt", "system_halt":
return "SystemHalt"
case "CommunityTaxUsage", "community_tax_usage":
return "CommunityTaxUsage"
case "TokenAddition", "token_addition":
return "TokenAddition"
}
return proposalType
}
//NormalizeProposalStatus - normalize user specified proposal status
func NormalizeProposalStatus(status string) string {
switch status {
case "DepositPeriod", "deposit_period":
return "DepositPeriod"
case "VotingPeriod", "voting_period":
return "VotingPeriod"
case "Passed", "passed":
return "Passed"
case "Rejected", "rejected":
return "Rejected"
}
return status
}
func ValidateParam(param gov.Param) error {
if p, ok := ParamSets[param.Subspace]; ok {
if p.ReadOnly() {
return gov.ErrInvalidParam(gov.DefaultCodespace, param.Subspace)
}
if _, err := p.Validate(param.Key, param.Value); err != nil {
return err
}
} else {
return gov.ErrInvalidParam(gov.DefaultCodespace, param.Subspace)
}
return nil
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"bytes"
"context"
"encoding/hex"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/log"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/driver/backoff"
"github.com/pingcap/tidb/util/chunk"
tikverr "github.com/tikv/client-go/v2/error"
"github.com/tikv/client-go/v2/tikv"
"github.com/tikv/client-go/v2/tikvrpc"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
var _ exec.Executor = &CompactTableTiFlashExec{}
const (
compactRequestTimeout = time.Minute * 60 // A single compact request may take at most 1 hour.
compactMaxBackoffSleepMs = 5 * 1000 // Backoff at most 5 seconds for each request.
compactProgressReportInterval = time.Second * 10
)
// TODO: maybe we can cache it.
func getTiFlashStores(ctx sessionctx.Context) ([]infoschema.ServerInfo, error) {
// TODO: Don't use infoschema, to preserve StoreID information.
aliveTiFlashStores := make([]infoschema.ServerInfo, 0)
stores, err := infoschema.GetStoreServerInfo(ctx)
if err != nil {
return nil, err
}
for _, store := range stores {
if store.ServerType == kv.TiFlash.Name() {
aliveTiFlashStores = append(aliveTiFlashStores, store)
}
}
return aliveTiFlashStores, nil
}
// CompactTableTiFlashExec represents an executor for "ALTER TABLE [NAME] COMPACT TIFLASH REPLICA" statement.
type CompactTableTiFlashExec struct {
exec.BaseExecutor
tableInfo *model.TableInfo
partitionIDs []int64
done bool
tikvStore tikv.Storage
}
// Next implements the Executor Next interface.
func (e *CompactTableTiFlashExec) Next(ctx context.Context, chk *chunk.Chunk) error {
chk.Reset()
if e.done {
return nil
}
e.done = true
return e.doCompact(ctx)
}
func (e *CompactTableTiFlashExec) doCompact(execCtx context.Context) error {
vars := e.Ctx().GetSessionVars()
if e.tableInfo.TiFlashReplica == nil || e.tableInfo.TiFlashReplica.Count == 0 {
vars.StmtCtx.AppendWarning(errors.Errorf("compact skipped: no tiflash replica in the table"))
return nil
}
// We will do a TiFlash compact in this way:
// For each TiFlash instance (in parallel): <--- This is called "storeCompactTask"
// For each partition (in series):
// Send a series of compact request for this partition. <--- Handled by "compactOnePhysicalTable"
tiFlashStores, err := getTiFlashStores(e.Ctx())
if err != nil {
return err
}
g, ctx := errgroup.WithContext(execCtx)
// TODO: We may add concurrency control in future.
for _, store := range tiFlashStores {
task := &storeCompactTask{
ctx: ctx,
parentExec: e,
targetStore: store,
}
g.Go(task.work)
}
_ = g.Wait() // Errors have been turned into warnings, let's simply discard them.
return nil
}
// storeCompactTask compacts a logical table described by parentExec in a targetStore.
type storeCompactTask struct {
ctx context.Context // Maybe cancelled by other tasks, or parentExec is killed.
parentExec *CompactTableTiFlashExec
targetStore infoschema.ServerInfo
startAt time.Time
// Fields below are used to output the progress in the log.
allPhysicalTables int
compactedPhysicalTables int
lastProgressOutputAt time.Time
}
func (task *storeCompactTask) work() error {
// We will :
// For each partition (in series):
// Send a series of compact request for this partition. <--- Handled by "compactOnePhysicalTable"
var stopAllTasks bool
var err error
log.Info("Begin compacting table in a store",
zap.String("table", task.parentExec.tableInfo.Name.O),
zap.Int64("table-id", task.parentExec.tableInfo.ID),
zap.Int64s("partition-id", task.parentExec.partitionIDs),
zap.String("store-address", task.targetStore.Address),
)
task.startAt = time.Now()
task.lastProgressOutputAt = task.startAt
if task.parentExec.tableInfo.Partition != nil {
// There is no need for partition-level concurrency, as TiFlash will limit table compaction one at a time.
allPartitions := task.parentExec.partitionIDs
if len(allPartitions) == 0 {
// There are partitions, but user did not specify partitions.
for _, definition := range task.parentExec.tableInfo.Partition.Definitions {
allPartitions = append(allPartitions, definition.ID)
}
}
task.allPhysicalTables = len(allPartitions)
task.compactedPhysicalTables = 0
for _, partitionID := range allPartitions {
stopAllTasks, err = task.compactOnePhysicalTable(partitionID)
task.compactedPhysicalTables++
if err != nil {
// Stop remaining partitions when error happens.
break
}
}
} else {
task.allPhysicalTables = 1
task.compactedPhysicalTables = 0
stopAllTasks, err = task.compactOnePhysicalTable(task.parentExec.tableInfo.ID)
task.compactedPhysicalTables++
}
if err == nil {
log.Info("Compact table finished in a store",
zap.Duration("elapsed", time.Since(task.startAt)),
zap.String("table", task.parentExec.tableInfo.Name.O),
zap.Int64("table-id", task.parentExec.tableInfo.ID),
zap.Int64s("partition-id", task.parentExec.partitionIDs),
zap.String("store-address", task.targetStore.Address),
)
}
if err != nil && stopAllTasks {
// Propagate the error to the errgroup, to stop tasks for other stores.
return err
}
return nil
}
func (task *storeCompactTask) logFailure(otherFields ...zap.Field) {
allFields := []zap.Field{
zap.String("table", task.parentExec.tableInfo.Name.O),
zap.Int64("table-id", task.parentExec.tableInfo.ID),
zap.Int64s("partition-id", task.parentExec.partitionIDs),
zap.String("store-address", task.targetStore.Address),
}
log.Warn("Compact table failed", append(allFields, otherFields...)...)
}
func (task *storeCompactTask) logProgressOptionally() {
// Output progress every 10 seconds.
if time.Since(task.lastProgressOutputAt) > compactProgressReportInterval {
task.lastProgressOutputAt = time.Now()
log.Info("Compact table in progress",
zap.Float64("compacted-ratio", float64(task.compactedPhysicalTables)/float64(task.allPhysicalTables)),
zap.Duration("elapsed", time.Since(task.startAt)),
zap.String("table", task.parentExec.tableInfo.Name.O),
zap.Int64("table-id", task.parentExec.tableInfo.ID),
zap.Int64s("partition-id", task.parentExec.partitionIDs),
zap.String("store-address", task.targetStore.Address),
zap.Int("all-physical-tables", task.allPhysicalTables),
zap.Int("compacted-physical-tables", task.compactedPhysicalTables),
)
}
}
// compactOnePhysicalTable compacts one physical table in the TiFlash store, in an incremental way.
// Returns when compaction is finished. When there are network problems it will retry internally.
//
// There are two kind of errors may be returned:
// A. Error only cancel tasks related with this store, e.g. this store is down even after retry.
//
// The remaining partitions in this store should be cancelled.
//
// B. Error that should cancel tasks of other stores, e.g. CompactErrorCompactInProgress.
//
// The remaining partitions in this store should be cancelled, and tasks of other stores should also be cancelled.
//
// During this function, some "problems" will cause it to early return, e.g. physical table not exist in this
// store any more (maybe caused by DDL). No errors will be produced so that remaining partitions will continue
// being compacted.
//
// Returns: (stopAllTasks, err)
func (task *storeCompactTask) compactOnePhysicalTable(physicalTableID int64) (bool, error) {
var startKey []byte
for { // This loop is to compact incrementally for all data. Each RPC request will only compact a partial of data.
if task.ctx.Err() != nil {
return true, task.ctx.Err()
}
task.logProgressOptionally()
resp, err := task.sendRequestWithRetry(&tikvrpc.Request{
Type: tikvrpc.CmdCompact,
StoreTp: tikvrpc.TiFlash,
Req: &kvrpcpb.CompactRequest{
LogicalTableId: task.parentExec.tableInfo.ID,
PhysicalTableId: physicalTableID,
StartKey: startKey,
},
})
if err != nil {
// Even after backoff, the request is still failed.., or the request is cancelled or timed out
// For example, the store is down. Let's simply don't compact other partitions.
warn := errors.Errorf("compact on store %s failed: %v", task.targetStore.Address, err)
task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn)
task.logFailure(
zap.Int64("physical-table-id", physicalTableID),
zap.Error(err))
return false, warn
}
if resp.GetError() != nil {
switch resp.GetError().GetError().(type) {
case *kvrpcpb.CompactError_ErrCompactInProgress:
warn := errors.Errorf("compact on store %s failed: table is compacting in progress", task.targetStore.Address)
task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn)
task.logFailure(
zap.Int64("physical-table-id", physicalTableID),
zap.Error(warn))
// TiFlash reported that there are existing compacting for the same table.
// We should stop the whole SQL execution, including compacting requests to other stores, as repeatedly
// compacting the same table is a waste of resource.
return true, warn
case *kvrpcpb.CompactError_ErrTooManyPendingTasks:
// The store is already very busy, don't retry and don't compact other partitions.
warn := errors.Errorf("compact on store %s failed: store is too busy", task.targetStore.Address)
task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn)
task.logFailure(
zap.Int64("physical-table-id", physicalTableID),
zap.Error(warn))
return false, warn
case *kvrpcpb.CompactError_ErrPhysicalTableNotExist:
// The physical table does not exist, don't retry this partition, but other partitions should still be compacted.
// This may happen when partition or table is dropped during the long compaction.
log.Info("Compact physical table skipped",
zap.String("table", task.parentExec.tableInfo.Name.O),
zap.Int64("table-id", task.parentExec.tableInfo.ID),
zap.String("store-address", task.targetStore.Address),
zap.Any("response-error", resp.GetError().GetError()))
// We don't need to produce any user warnings.
return false, nil
default:
// Others are unexpected errors, don't retry and don't compact other partitions.
warn := errors.Errorf("compact on store %s failed: internal error (check logs for details)", task.targetStore.Address)
task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn)
task.logFailure(
zap.Int64("physical-table-id", physicalTableID),
zap.Any("response-error", resp.GetError().GetError()))
return false, warn
}
}
if !resp.HasRemaining {
return false, nil
}
// Let's send more compact requests, as there are remaining data to compact.
lastEndKey := resp.GetCompactedEndKey()
if len(lastEndKey) == 0 || bytes.Compare(lastEndKey, startKey) <= 0 {
// The TiFlash server returned an invalid compacted end key.
// This is unexpected...
warn := errors.Errorf("compact on store %s failed: internal error (check logs for details)", task.targetStore.Address)
task.parentExec.Ctx().GetSessionVars().StmtCtx.AppendWarning(warn)
task.logFailure(
zap.Int64("physical-table-id", physicalTableID),
zap.String("compacted-start-key", hex.EncodeToString(resp.GetCompactedStartKey())),
zap.String("compacted-end-key", hex.EncodeToString(resp.GetCompactedEndKey())),
)
return false, warn
}
startKey = lastEndKey
}
}
// sendRequestWithRetry sends one Compact request to the remote.
// It will backoff and retry when encountering network errors.
func (task *storeCompactTask) sendRequestWithRetry(req *tikvrpc.Request) (*kvrpcpb.CompactResponse, error) {
bo := backoff.NewBackoffer(task.ctx, compactMaxBackoffSleepMs)
for {
resp, err := task.parentExec.tikvStore.
GetTiKVClient().
SendRequest(task.ctx, task.targetStore.Address, req, compactRequestTimeout)
if err != nil {
if errors.Cause(err) == context.Canceled || errors.Cause(err) == context.DeadlineExceeded || status.Code(errors.Cause(err)) == codes.Canceled {
// The request is timed out, or cancelled because of Killed
// No need to retry.
return nil, err
}
if bo.Backoff(tikv.BoTiFlashRPC(), err) != nil {
// Exceeds max sleep time,
return nil, err
}
// Otherwise: let's loop again to retry.
continue
}
if resp.Resp == nil {
// The response is invalid.. This is unexpected, no need to retry.
return nil, tikverr.ErrBodyMissing
}
return resp.Resp.(*kvrpcpb.CompactResponse), nil
}
}
|
package common
import (
"fmt"
"time"
)
// ProviderGoogle for authentication
const ProviderGoogle = "google"
// ProviderOVH for authentication
const ProviderOVH = "ovh"
// ProviderLocal for authentication
const ProviderLocal = "local"
// User is a Plik user
type User struct {
ID string `json:"id,omitempty"`
Provider string `json:"provider"`
Login string `json:"login,omitempty"`
Password string `json:"-"`
Name string `json:"name,omitempty"`
Email string `json:"email,omitempty"`
IsAdmin bool `json:"admin"`
MaxFileSize int64 `json:"maxFileSize"`
MaxUserSize int64 `json:"maxUserSize"`
MaxTTL int `json:"maxTTL"`
Tokens []*Token `json:"tokens,omitempty"`
CreatedAt time.Time `json:"createdAt"`
}
// NewUser create a new user object
func NewUser(provider string, providerID string) (user *User) {
user = &User{}
user.ID = GetUserID(provider, providerID)
user.Provider = provider
return user
}
// GetUserID return user ID from provider and login
func GetUserID(provider string, providerID string) string {
return fmt.Sprintf("%s:%s", provider, providerID)
}
// IsValidProvider return true if the provider string is valid
func IsValidProvider(provider string) bool {
switch provider {
case ProviderLocal, ProviderGoogle, ProviderOVH:
return true
default:
return false
}
}
// NewToken add a new token to a user
func (user *User) NewToken() (token *Token) {
token = NewToken()
token.UserID = user.ID
user.Tokens = append(user.Tokens, token)
return token
}
// NewToken add a new token to a user
func (user *User) String() string {
str := user.Provider + ":" + user.Login
if user.Name != "" {
str += " " + user.Name
}
if user.Email != "" {
str += " " + user.Email
}
return str
}
// CreateUserFromParams return a user object ready to be inserted in the metadata backend
func CreateUserFromParams(userParams *User) (user *User, err error) {
if !IsValidProvider(userParams.Provider) {
return nil, fmt.Errorf("invalid provider")
}
if len(userParams.Login) < 4 {
return nil, fmt.Errorf("login is too short (min 4 chars)")
}
user = NewUser(userParams.Provider, userParams.Login)
user.Login = userParams.Login
user.Name = userParams.Name
user.Email = userParams.Email
user.IsAdmin = userParams.IsAdmin
user.MaxFileSize = userParams.MaxFileSize
user.MaxUserSize = userParams.MaxUserSize
user.MaxTTL = userParams.MaxTTL
if user.Provider == ProviderLocal {
if len(userParams.Password) < 8 {
return nil, fmt.Errorf("password is too short (min 8 chars)")
}
hash, err := HashPassword(userParams.Password)
if err != nil {
return nil, fmt.Errorf("unable to hash password : %s", err)
}
user.Password = hash
}
return user, nil
}
// UpdateUser update a user object with the params
// - prevent to update provider, user ID or login
// - only update password if a new one is provided
func UpdateUser(user *User, userParams *User) (err error) {
if user.Provider == ProviderLocal && len(userParams.Password) > 0 {
if len(userParams.Password) < 8 {
return fmt.Errorf("password is too short (min 8 chars)")
}
hash, err := HashPassword(userParams.Password)
if err != nil {
return fmt.Errorf("unable to hash password : %s", err)
}
user.Password = hash
}
user.Name = userParams.Name
user.Email = userParams.Email
user.IsAdmin = userParams.IsAdmin
user.MaxFileSize = userParams.MaxFileSize
user.MaxUserSize = userParams.MaxUserSize
user.MaxTTL = userParams.MaxTTL
return nil
}
|
// Copyright 2020 MongoDB Inc
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package atlas
import (
"errors"
"github.com/mongodb/mongocli/internal/cli"
"github.com/AlecAivazis/survey/v2"
"github.com/mongodb/mongocli/internal/config"
"github.com/mongodb/mongocli/internal/convert"
"github.com/mongodb/mongocli/internal/description"
"github.com/mongodb/mongocli/internal/flag"
"github.com/mongodb/mongocli/internal/json"
"github.com/mongodb/mongocli/internal/store"
"github.com/mongodb/mongocli/internal/usage"
"github.com/spf13/cobra"
atlas "go.mongodb.org/atlas/mongodbatlas"
)
type DBUsersCreateOpts struct {
cli.GlobalOpts
username string
password string
authDB string
roles []string
store store.DatabaseUserCreator
}
func (opts *DBUsersCreateOpts) initStore() error {
var err error
opts.store, err = store.New(config.Default())
return err
}
func (opts *DBUsersCreateOpts) Run() error {
user := opts.newDatabaseUser()
result, err := opts.store.CreateDatabaseUser(user)
if err != nil {
return err
}
return json.PrettyPrint(result)
}
func (opts *DBUsersCreateOpts) newDatabaseUser() *atlas.DatabaseUser {
return &atlas.DatabaseUser{
DatabaseName: opts.authDB,
Roles: convert.BuildAtlasRoles(opts.roles),
GroupID: opts.ConfigProjectID(),
Username: opts.username,
Password: opts.password,
}
}
func (opts *DBUsersCreateOpts) Prompt() error {
if opts.password != "" {
return nil
}
prompt := &survey.Password{
Message: "Password:",
}
return survey.AskOne(prompt, &opts.password)
}
// mongocli atlas dbuser(s) create --username username --password password --role roleName@dbName [--projectId projectId]
func DBUsersCreateBuilder() *cobra.Command {
opts := &DBUsersCreateOpts{}
cmd := &cobra.Command{
Use: "create",
Short: description.CreateDBUser,
Example: `
Create an Atlas admin user
$ mongocli atlas dbuser create atlasAdmin --username <username> --projectId <projectId>
Create user with read/write access to any database
$ mongocli atlas dbuser create readWriteAnyDatabase --username <username> --projectId <projectId>
Create user with multiple roles
$ mongocli atlas dbuser create --username <username> --role clusterMonitor,backup --projectId <projectId>`,
Args: cobra.OnlyValidArgs,
ValidArgs: []string{"atlasAdmin", "readWriteAnyDatabase", "readAnyDatabase", "clusterMonitor", "backup", "dbAdminAnyDatabase", "enableSharding"},
PreRunE: func(cmd *cobra.Command, args []string) error {
if err := opts.PreRunE(opts.initStore); err != nil {
return err
}
if len(args) == 0 && len(opts.roles) == 0 {
return errors.New("no role specified for the user")
}
opts.roles = append(opts.roles, args...)
return opts.Prompt()
},
RunE: func(cmd *cobra.Command, args []string) error {
return opts.Run()
},
}
cmd.Flags().StringVarP(&opts.username, flag.Username, flag.UsernameShort, "", usage.Username)
cmd.Flags().StringVarP(&opts.password, flag.Password, flag.PasswordShort, "", usage.Password)
cmd.Flags().StringSliceVar(&opts.roles, flag.Role, []string{}, usage.Roles)
cmd.Flags().StringVar(&opts.authDB, flag.AuthDB, convert.AdminDB, usage.AuthDB)
cmd.Flags().StringVar(&opts.ProjectID, flag.ProjectID, "", usage.ProjectID)
_ = cmd.MarkFlagRequired(flag.Username)
return cmd
}
|
package main
import "fmt"
func main() {
fmt.Println(longestStrChain([]string{
"a", "b", "ba", "bca", "bda", "bdca",
}))
}
func longestStrChain(words []string) int {
wm := make(map[string]int)
for _, w := range words {
wm[w] = 0
}
max := func(a, b int) int {
if a > b {
return a
}
return b
}
var dfs func(s string) int
dfs = func(s string) int {
if v := wm[s]; v > 0 {
return v
}
var ans int
for i := 0; i < len(s); i++ {
t := s[:i] + s[i+1:]
if _, ok := wm[t]; ok {
ans = max(ans, dfs(t))
}
}
wm[s] = ans + 1
return ans + 1
}
var ans int
for w := range wm {
ans = max(ans, dfs(w))
}
return ans
}
|
package git
import (
"testing"
)
func TestResolveBaseBranch(t *testing.T) {
got, err := ResolveBaseBranch(nil)
if err != nil {
t.Errorf("error resolving branch: %s", err)
}
// will fail on a feature branch
want := "main"
if got != want {
t.Errorf("got %q, wanted %q", got, want)
}
}
|
package errors
// Kind is...
type Kind int
func (k Kind) String() string {
unexpected := "unexpected"
switch k {
case KindUnexpected:
return unexpected
case KindUnmarshal:
return "unmarshal"
case KindUser:
return "user"
default:
return unexpected
}
}
const (
// KindUnexpected is...
KindUnexpected Kind = iota + 1
// KindUnmarshal is...
KindUnmarshal
// KindUser is...
KindUser
)
|
package cpu
import (
_"fmt"
"time"
_"encoding/json"
"sysmonitor/profile"
"sysmonitor/common"
"github.com/shirou/gopsutil/cpu"
)
func CpuMonitor() string {
cpu_info, _ := cpu.Info()
cpu_percent, _ := cpu.Percent(time.Second, false)
cpustatus := new(profile.CpuStatus)
cpustatus.CPU = make([]profile.CpuInfo, len(cpu_info))
for i, ci := range cpu_info {
cpustatus.CPU[i].ModelName = ci.ModelName
cpustatus.CPU[i].Cores = ci.Cores
}
cpustatus.CPUUseage = cpu_percent[0]
cpu_result := new (profile.Item)
cpu_result.Tag = "CPU"
cpu_result.Payload = cpustatus
result := common.JsonMarshal(cpu_result)
return result
}
|
package usecase
import (
"backend/models"
"backend/api"
"errors"
)
type ObjectCreator struct {
repo api.Repository
}
func NewObjectCreator(repo api.Repository) *ObjectCreator {
return &ObjectCreator {
repo: repo,
}
}
func (o *ObjectCreator) GetObjects(firstNumber, count int) ([]models.Object, error) {
objects, _ := o.repo.GetAllObjects()
if firstNumber > len(objects) {
return []models.Object{}, errors.New("No more objects in repo")
}
lastObjectNumber := firstNumber + count
if lastObjectNumber > len(objects) {
lastObjectNumber = len(objects)
count = lastObjectNumber - firstNumber
}
result := make([]models.Object, count)
j := 0
for i := firstNumber; i < lastObjectNumber; i++ {
result[j] = objects[i]
j++
}
return result, nil
}
|
package main
import(
"github.com/griddb/go_client"
"fmt"
"os"
"strconv"
)
func main() {
factory := griddb_go.StoreFactoryGetInstance()
blob := []byte{65, 66, 67, 68, 69, 70, 71, 72, 73, 74}
// Get GridStore object
port, err := strconv.Atoi(os.Args[2])
if err != nil {
fmt.Println(err)
os.Exit(2)
}
gridstore := factory.GetStore(map[string]interface{} {
"host" :os.Args[1],
"port" :port,
"cluster_name":os.Args[3],
"username" :os.Args[4],
"password" :os.Args[5],
})
// Create Collection
conInfo, err := griddb_go.CreateContainerInfo("col01",
[][]interface{}{
{"name", griddb_go.TYPE_STRING},
{"status" ,griddb_go.TYPE_BOOL},
{"count", griddb_go.TYPE_LONG},
{"lob", griddb_go.TYPE_BLOB}},
griddb_go.CONTAINER_COLLECTION,
true)
if(err != nil) {
fmt.Println("Create containerInfo failed")
}
col, err := gridstore.PutContainer(conInfo, false)
if(err != nil) {
fmt.Println("put container failed")
}
// Change auto commit mode to false
col.SetAutoCommit(false)
// Set an index on the Row-key Column
col.CreateIndex("name", griddb_go.INDEX_DEFAULT)
// Set an index on the Column
col.CreateIndex("count", griddb_go.INDEX_DEFAULT)
//Put row: RowKey is "name01"
row1 := []interface{}{"name01", false, 1, blob}
err = col.Put(row1)
//Remove row with RowKey "name01"
col.Remove("name01")
//Put row: RowKey is "name02"
row2 := []interface{}{"name02", false, 1, blob}
err = col.Put(row2)
col.Commit();
// Create normal query
query, err := col.Query("select * where name = 'name02'")
if(err != nil) {
fmt.Println("create query failed")
}
//Execute query
rs, err := query.Fetch(true)
if(err != nil) {
fmt.Println("create rs from query failed")
}
for rs.HasNext(){
// Update row
rrow, err := rs.NextRow()
if(err != nil) {
fmt.Println("NextRow from rs failed")
}
fmt.Println("Person: name=", rrow[0]," status=", rrow[1]," count=", rrow[2]," lob=", rrow[3])
tmpRow := []interface{}{"name02", false, 2, blob}
rs.Update(tmpRow)
}
// End transaction
col.Commit()
}
|
package main
import (
"runtime"
"time"
"github.com/akosgarai/opengl_playground/pkg/application"
wrapper "github.com/akosgarai/opengl_playground/pkg/glwrapper"
"github.com/akosgarai/opengl_playground/pkg/primitives/camera"
"github.com/akosgarai/opengl_playground/pkg/primitives/cuboid"
"github.com/akosgarai/opengl_playground/pkg/primitives/light"
"github.com/akosgarai/opengl_playground/pkg/primitives/rectangle"
trans "github.com/akosgarai/opengl_playground/pkg/primitives/transformations"
"github.com/akosgarai/opengl_playground/pkg/shader"
"github.com/akosgarai/opengl_playground/pkg/window"
"github.com/go-gl/glfw/v3.3/glfw"
"github.com/go-gl/mathgl/mgl32"
)
const (
WindowWidth = 800
WindowHeight = 800
WindowTitle = "Example - cubes with light source"
FORWARD = glfw.KeyW
BACKWARD = glfw.KeyS
LEFT = glfw.KeyA
RIGHT = glfw.KeyD
UP = glfw.KeyQ
DOWN = glfw.KeyE
moveSpeed = 0.005
)
var (
app *application.Application
lastUpdate int64
cameraDistance = 0.1
cameraDirectionSpeed = float32(0.00500)
)
// It creates a new camera with the necessary setup
func CreateCamera() *camera.Camera {
camera := camera.NewCamera(mgl32.Vec3{0, 0, 10.0}, mgl32.Vec3{0, 1, 0}, -90.0, 0.0)
camera.SetupProjection(45, float32(WindowWidth)/float32(WindowHeight), 0.1, 100.0)
return camera
}
// Create the keymap
func SetupKeyMap() map[glfw.Key]bool {
keyDowns := make(map[glfw.Key]bool)
keyDowns[FORWARD] = false
keyDowns[LEFT] = false
keyDowns[RIGHT] = false
keyDowns[BACKWARD] = false
keyDowns[UP] = false
keyDowns[DOWN] = false
return keyDowns
}
// It generates the colored cube.
func GenerateWhiteCube(shaderProgram *shader.Shader) {
whiteBottomCoordinates := [4]mgl32.Vec3{
mgl32.Vec3{-3.5, -0.5, -3.5},
mgl32.Vec3{-3.5, -0.5, -2.5},
mgl32.Vec3{-2.5, -0.5, -2.5},
mgl32.Vec3{-2.5, -0.5, -3.5},
}
whiteBottomColor := [4]mgl32.Vec3{
mgl32.Vec3{1.0, 1.0, 1.0},
mgl32.Vec3{1.0, 1.0, 1.0},
mgl32.Vec3{1.0, 1.0, 1.0},
mgl32.Vec3{1.0, 1.0, 1.0},
}
bottomRect := rectangle.New(whiteBottomCoordinates, whiteBottomColor, shaderProgram)
cube := cuboid.New(bottomRect, 1.0, shaderProgram)
app.AddItem(cube)
}
// It generates the colored cube.
func GenerateColoredCube(shaderProgram *shader.Shader) {
colors := [6]mgl32.Vec3{
mgl32.Vec3{1.0, 0.0, 0.0},
mgl32.Vec3{1.0, 1.0, 0.0},
mgl32.Vec3{0.0, 1.0, 0.0},
mgl32.Vec3{0.0, 1.0, 1.0},
mgl32.Vec3{0.0, 0.0, 1.0},
mgl32.Vec3{1.0, 0.0, 1.0},
}
coloredBottomCoordinates := [4]mgl32.Vec3{
mgl32.Vec3{-0.5, -0.5, -0.5},
mgl32.Vec3{-0.5, -0.5, 0.5},
mgl32.Vec3{0.5, -0.5, 0.5},
mgl32.Vec3{0.5, -0.5, -0.5},
}
coloredBottomColor := [4]mgl32.Vec3{
colors[0],
colors[0],
colors[0],
colors[0],
}
bottomRect := rectangle.New(coloredBottomCoordinates, coloredBottomColor, shaderProgram)
cube := cuboid.New(bottomRect, 1.0, shaderProgram)
for i := 0; i < 6; i++ {
cube.SetSideColor(i, colors[i])
}
app.AddItem(cube)
}
func Update() {
nowNano := time.Now().UnixNano()
delta := float64(nowNano - lastUpdate)
moveTime := delta / float64(time.Millisecond)
lastUpdate = nowNano
forward := 0.0
if app.GetKeyState(FORWARD) && !app.GetKeyState(BACKWARD) {
forward = moveSpeed * moveTime
} else if app.GetKeyState(BACKWARD) && !app.GetKeyState(FORWARD) {
forward = -moveSpeed * moveTime
}
if forward != 0 {
app.GetCamera().Walk(float32(forward))
}
horisontal := 0.0
if app.GetKeyState(LEFT) && !app.GetKeyState(RIGHT) {
horisontal = -moveSpeed * moveTime
} else if app.GetKeyState(RIGHT) && !app.GetKeyState(LEFT) {
horisontal = moveSpeed * moveTime
}
if horisontal != 0 {
app.GetCamera().Strafe(float32(horisontal))
}
vertical := 0.0
if app.GetKeyState(UP) && !app.GetKeyState(DOWN) {
vertical = -moveSpeed * moveTime
} else if app.GetKeyState(DOWN) && !app.GetKeyState(UP) {
vertical = moveSpeed * moveTime
}
if vertical != 0 {
app.GetCamera().Lift(float32(vertical))
}
currX, currY := app.GetWindow().GetCursorPos()
x, y := trans.MouseCoordinates(currX, currY, WindowWidth, WindowHeight)
KeyDowns := make(map[string]bool)
// dUp
if y > 1.0-cameraDistance && y < 1.0 {
KeyDowns["dUp"] = true
} else {
KeyDowns["dUp"] = false
}
// dDown
if y < -1.0+cameraDistance && y > -1.0 {
KeyDowns["dDown"] = true
} else {
KeyDowns["dDown"] = false
}
// dLeft
if x < -1.0+cameraDistance && x > -1.0 {
KeyDowns["dLeft"] = true
} else {
KeyDowns["dLeft"] = false
}
// dRight
if x > 1.0-cameraDistance && x < 1.0 {
KeyDowns["dRight"] = true
} else {
KeyDowns["dRight"] = false
}
dX := float32(0.0)
dY := float32(0.0)
if KeyDowns["dUp"] && !KeyDowns["dDown"] {
dY = cameraDirectionSpeed
} else if KeyDowns["dDown"] && !KeyDowns["dUp"] {
dY = -cameraDirectionSpeed
}
if KeyDowns["dLeft"] && !KeyDowns["dRight"] {
dX = -cameraDirectionSpeed
} else if KeyDowns["dRight"] && !KeyDowns["dLeft"] {
dX = cameraDirectionSpeed
}
app.GetCamera().UpdateDirection(dX, dY)
}
func main() {
runtime.LockOSThread()
app = application.New()
app.SetWindow(window.InitGlfw(WindowWidth, WindowHeight, WindowTitle))
defer glfw.Terminate()
wrapper.InitOpenGL()
app.SetCamera(CreateCamera())
shaderProgram := shader.NewShader("examples/08-colors/vertexshader.vert", "examples/08-colors/fragmentshader.frag")
lightSource := light.NewPointLight([4]mgl32.Vec3{mgl32.Vec3{0, 0, 0}, mgl32.Vec3{1, 1, 1}, mgl32.Vec3{1, 1, 1}, mgl32.Vec3{1, 1, 1}}, [3]float32{1.0, 1.0, 1.0})
shaderProgram.AddPointLightSource(lightSource, [7]string{"", "light.ambient", "", "", "", "", ""})
GenerateColoredCube(shaderProgram)
GenerateWhiteCube(shaderProgram)
wrapper.Enable(wrapper.DEPTH_TEST)
wrapper.DepthFunc(wrapper.LESS)
wrapper.ClearColor(0.3, 0.3, 0.3, 1.0)
lastUpdate = time.Now().UnixNano()
// register keyboard button callback
app.GetWindow().SetKeyCallback(app.KeyCallback)
for !app.GetWindow().ShouldClose() {
wrapper.Clear(wrapper.COLOR_BUFFER_BIT | wrapper.DEPTH_BUFFER_BIT)
glfw.PollEvents()
Update()
app.DrawWithUniforms()
app.GetWindow().SwapBuffers()
}
}
|
package osbuild1
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewKernelCmdlineStage(t *testing.T) {
expectedStage := &Stage{
Name: "org.osbuild.kernel-cmdline",
Options: &KernelCmdlineStageOptions{},
}
actualStage := NewKernelCmdlineStage(&KernelCmdlineStageOptions{})
assert.Equal(t, expectedStage, actualStage)
}
|
package conformance
import (
"fmt"
"os"
"strconv"
"github.com/bloodorangeio/reggie"
godigest "github.com/opencontainers/go-digest"
)
// TODO: import from opencontainers/distribution-spec
type (
TagList struct {
Name string `json:"name"`
Tags []string `json:"tags"`
}
)
const (
nonexistentManifest string = ".INVALID_MANIFEST_NAME"
)
var (
blobA []byte
blobALength string
blobADigest string
blobB []byte
blobBDigest string
blobBChunk1 []byte
blobBChunk1Length string
blobBChunk2 []byte
blobBChunk2Length string
blobBChunk1Range string
blobBChunk2Range string
client *reggie.Client
configContent []byte
configContentLength string
configDigest string
dummyDigest string
lastResponse *reggie.Response
lastTagList TagList
manifestContent []byte
manifestDigest string
numTags int
reportJUnitFilename string
reportHTMLFilename string
httpWriter *httpDebugWriter
suiteDescription string
)
func init() {
hostname := os.Getenv("OCI_ROOT_URL")
namespace := os.Getenv("OCI_NAMESPACE")
username := os.Getenv("OCI_USERNAME")
password := os.Getenv("OCI_PASSWORD")
debug := os.Getenv("OCI_DEBUG") == "true"
var err error
httpWriter = newHTTPDebugWriter(debug)
logger := newHTTPDebugLogger(httpWriter)
client, err = reggie.NewClient(hostname,
reggie.WithDefaultName(namespace),
reggie.WithUsernamePassword(username, password),
reggie.WithDebug(true),
reggie.WithUserAgent("distribution-spec-conformance-tests"))
client.SetLogger(logger)
if err != nil {
panic(err)
}
configContent = []byte("{}\n")
configContentLength = strconv.Itoa(len(configContent))
configDigest = godigest.FromBytes(configContent).String()
manifestContent = []byte(fmt.Sprintf(
"{ \"mediaType\": \"application/vnd.oci.image.manifest.v1+json\", \"config\": { \"digest\": \"%s\", "+
"\"mediaType\": \"application/vnd.oci.image.config.v1+json\","+" \"size\": %s }, \"layers\": [], "+
"\"schemaVersion\": 2 }",
configDigest, configContentLength))
manifestDigest = godigest.FromBytes(manifestContent).String()
blobA = []byte("NBA Jam on my NBA toast")
blobALength = strconv.Itoa(len(blobA))
blobADigest = godigest.FromBytes(blobA).String()
blobB = []byte("Hello, how are you today?")
blobBDigest = godigest.FromBytes(blobB).String()
blobBChunk1 = blobB[:3]
blobBChunk1Length = strconv.Itoa(len(blobBChunk1))
blobBChunk1Range = fmt.Sprintf("0-%d", len(blobBChunk1)-1)
blobBChunk2 = blobB[3:]
blobBChunk2Length = strconv.Itoa(len(blobBChunk2))
blobBChunk2Range = fmt.Sprintf("%d-%d", len(blobBChunk1), len(blobB)-1)
dummyDigest = godigest.FromString("hello world").String()
reportJUnitFilename = "junit.xml"
reportHTMLFilename = "report.html"
suiteDescription = "OCI Distribution Conformance Tests"
}
|
package p2p
import (
"fmt"
)
// NetworkID represents the P2P network we are participating in (eg: test, nmain, etc.)
type NetworkID uint32
// NetworkID are specific uint32s to identify separate networks
//
// The default identifiers are MainNet (the main production network), TestNet (for network=TESTNET)
// and LocalNet (for network=LOCAL).
//
// Custom NetworkIDs (network=CUSTOM) are generated from the "customnet" command line flag
const (
MainNet NetworkID = 0xfeedbeef
TestNet NetworkID = 0xdeadbeef
LocalNet NetworkID = 0xbeaded
)
// NewNetworkID converts a string to a network id
func NewNetworkID(name string) NetworkID {
return NetworkID(StringToUint32(name))
}
func (n *NetworkID) String() string {
switch *n {
case MainNet:
return "MainNet"
case TestNet:
return "TestNet"
case LocalNet:
return "LocalNet"
default:
return fmt.Sprintf("CustomNet ID: %x\n", *n)
}
}
|
package factory
import (
"errors"
)
// Pokemon interface to implement on each pokemon object
type Pokemon interface {
Spawn()
}
// List of Pokemon we have
const (
CHARMANDER = iota + 1
PIKACHU
)
// CreatePokemon is the factory of pokemon
func CreatePokemon(poke int) (Pokemon, error) {
switch poke {
case CHARMANDER:
return new(Charmander), nil
case PIKACHU:
return new(Pikachu), nil
}
return nil, errors.New("Ops, we dont have that pokemon yet")
}
|
package main
import (
api_ctrl "./api/controllers"
"github.com/astaxie/beego"
clog "github.com/cihub/seelog"
)
// jzh: 这个是为了能单独跑起测试用的,通过ts.ini的[http]:[fake_api]项可以开启或者关闭
type RootController struct {
beego.Controller
}
func (self *RootController) Get() {
query := self.GetString("query")
if query == "alias" {
self.Ctx.Output.Body([]byte(`{"error":0, "alias":"fake_server"}`))
}
}
type UserController struct {
beego.Controller
}
func (self *UserController) Get() {
query := self.GetString("query")
if query == "availuser" {
json := `{
"error":0,
"user":[{
"username": "receiver",
"uid": "rr00000000rr",
"state": 1,
"expire": null,
"last_login": "2016-07-29 03:07:21",
"u_level": 0
}]
}`
self.Ctx.Output.Body([]byte(json))
}
}
type LoginController struct {
api_ctrl.BaseController
}
func (self *LoginController) Post() {
type LoginReq struct {
Username string `json:"username"`
Password string `json:"password"`
}
req := new(LoginReq)
if err := self.FetchJsonBody(req); err != nil {
return
}
// 模拟多个用户
result := make(map[string]string)
result["sender"] = `{"usr": {"username": "sender", "uid": "ss00000000ss", "state": 1, "expire": null, "last_login": "2016-07-29 03:07:21", "u_level": 0}, "session_id": "xyacqrbo5z3kqqf3hadovsio0gz7407q", "error": 0}`
result["receiver"] = `{"usr": {"username": "receiver", "uid": "rr00000000rr", "state": 1, "expire": null, "last_login": "2016-07-29 03:07:21", "u_level": 0}, "session_id": "zyacqrbo5z3kqqf3hadovsio0gz7407q", "error": 0}`
// 返回结果
self.Ctx.Output.Body([]byte(result[req.Username]))
}
type TestController struct {
api_ctrl.BaseController
}
func (self *TestController) Get() {
clog.Tracef("user level:%d", self.UserLevel)
self.Ctx.WriteString("this is test")
}
func EnableFakeApi() {
beego.Router("/", &RootController{})
beego.Router("/:uid/user", &UserController{})
beego.Router("/login", &LoginController{})
beego.Router("/test", &TestController{})
}
|
package util
import (
"github.com/sanguohot/chardet"
"net/http"
"strings"
)
var defaultCharset = "utf-8"
var gbk = "gbk"
func DetectCharsetWithOnlyUtf8OrGbk(data []byte) string {
strs := chardet.Possible(data)
if strs[0] == defaultCharset {
return defaultCharset
}
foundGbk := false
for _, value := range strs {
if value == gbk {
foundGbk = true
}
}
if foundGbk {
return gbk
}
return defaultCharset
}
func DetectContentType(data []byte) string {
contentType := http.DetectContentType(data)
if strings.Index(contentType, defaultCharset) >= 0 {
contentType = strings.Replace(contentType, defaultCharset, DetectCharsetWithOnlyUtf8OrGbk(data), -1)
}
return contentType
}
|
package timer
import (
"container/list"
"context"
"time"
)
type eventType int
const (
add eventType = iota
remove
reset
)
type event struct {
typ eventType
wt *wheelTimer
}
type wheel struct {
ctx context.Context
stop context.CancelFunc
interval time.Duration
ticker *time.Ticker
pos int
slots []*list.List
timers map[interface{}]int
eventCh chan *event
}
var (
defaultWheel = newWheel(context.Background(), time.Second, 600)
)
func newWheel(ctx context.Context, interval time.Duration, slotNum int) *wheel {
w := &wheel{
interval: interval,
slots: make([]*list.List, slotNum),
timers: make(map[interface{}]int),
eventCh: make(chan *event, 100),
}
w.ctx, w.stop = context.WithCancel(ctx)
for i := 0; i < slotNum; i++ {
w.slots[i] = list.New()
}
w.ticker = time.NewTicker(w.interval)
go w.loop()
return w
}
func (w *wheel) addTimer(t *wheelTimer) {
w.eventCh <- &event{
typ: add,
wt: t,
}
}
func (w *wheel) removeTimer(t *wheelTimer) {
w.eventCh <- &event{
typ: remove,
wt: t,
}
}
func (w *wheel) resetTimer(t *wheelTimer) {
w.eventCh <- &event{
typ: reset,
wt: t,
}
}
func (w *wheel) loop() {
for {
select {
case <-w.ctx.Done():
w.ticker.Stop()
return
case <-w.ticker.C:
w.tickerHandler()
case evt := <-w.eventCh:
switch evt.typ {
case add:
w.add(evt.wt)
case remove:
w.remove(evt.wt)
case reset:
w.remove(evt.wt)
w.add(evt.wt)
}
}
}
}
func (w *wheel) tickerHandler() {
slot := w.slots[w.pos]
w.pos = (w.pos + 1) % len(w.slots)
for el := slot.Front(); el != nil; {
wt := el.Value.(*wheelTimer)
if wt.circle > 0 {
wt.circle--
el = el.Next()
continue
}
// Fire event
w.fire(wt)
next := el.Next()
slot.Remove(el)
el = next
delete(w.timers, wt)
}
}
func (w *wheel) fire(t *wheelTimer) {
if t.f != nil {
go t.f()
return
}
select {
case t.c <- time.Now():
default:
}
}
func (w *wheel) add(t *wheelTimer) {
multi := int(t.dur.Nanoseconds() / w.interval.Nanoseconds())
pos := (w.pos + multi) % len(w.slots)
t.circle = multi / len(w.slots)
w.slots[pos].PushBack(t)
w.timers[t] = pos
}
func (w *wheel) remove(t *wheelTimer) {
pos, ok := w.timers[t]
if !ok {
select {
case <-t.c:
default:
}
return
}
//
slot := w.slots[pos]
for el := slot.Front(); el != nil; {
wt := el.Value.(*wheelTimer)
if wt == t {
slot.Remove(el)
delete(w.timers, t)
return
}
el = el.Next()
}
}
|
package coding
import (
"fmt"
"github.com/sujit-baniya/smpp/coding/gsm7bit"
. "unicode"
. "golang.org/x/text/encoding"
"golang.org/x/text/encoding/charmap"
"golang.org/x/text/encoding/japanese"
"golang.org/x/text/encoding/korean"
"golang.org/x/text/encoding/unicode"
"golang.org/x/text/unicode/rangetable"
)
// DataCoding see SMPP v5, section 4.7.7 (123p)
type DataCoding byte
func (c DataCoding) GoString() string {
return c.String()
}
func (c DataCoding) String() string {
return fmt.Sprintf("%08b", byte(c))
}
func (c DataCoding) MessageWaitingInfo() (coding DataCoding, active bool, kind int) {
kind = -1
coding = NoCoding
switch c >> 4 & 0b1111 {
case 0b1100:
case 0b1101:
coding = GSM7BitCoding
case 0b1110:
coding = UCS2Coding
default:
return
}
active = c>>3 == 1
kind = int(c & 0b11)
return
}
func (c DataCoding) MessageClass() (coding DataCoding, class int) {
class = int(c & 0b11)
coding = GSM7BitCoding
if c>>4&0b1111 != 0b1111 {
coding = NoCoding
class = -1
} else if c>>2&0b1 == 1 {
coding = UCS2Coding
}
return
}
func (c DataCoding) Encoding() Encoding {
if coding, _, kind := c.MessageWaitingInfo(); kind != -1 {
return encodingMap[coding]
} else if coding, class := c.MessageClass(); class != -1 {
return encodingMap[coding]
}
return encodingMap[c]
}
func (c DataCoding) Splitter() Splitter {
if coding, _, kind := c.MessageWaitingInfo(); kind != -1 {
return splitterMap[coding]
} else if coding, class := c.MessageClass(); class != -1 {
return splitterMap[coding]
}
return splitterMap[c]
}
func (c DataCoding) Validate(input string) bool {
if c == UCS2Coding {
return true
}
for _, r := range input {
if !Is(alphabetMap[c], r) {
return false
}
}
return true
}
const (
GSM7BitCoding DataCoding = 0b00000000 // GSM 7Bit
ASCIICoding DataCoding = 0b00000001 // ASCII
Latin1Coding DataCoding = 0b00000011 // ISO-8859-1 (Latin-1)
ShiftJISCoding DataCoding = 0b00000101 // Shift-JIS
CyrillicCoding DataCoding = 0b00000110 // ISO-8859-5 (Cyrillic)
HebrewCoding DataCoding = 0b00000111 // ISO-8859-8 (Hebrew)
UCS2Coding DataCoding = 0b00001000 // UCS-2
ISO2022JPCoding DataCoding = 0b00001010 // ISO-2022-JP
EUCJPCoding DataCoding = 0b00001101 // Extended Kanji JIS (X 0212-1990)
EUCKRCoding DataCoding = 0b00001110 // KS X 1001 (KS C 5601)
NoCoding DataCoding = 0b10111111 // Reserved (Non-specification definition)
)
var encodingMap = map[DataCoding]Encoding{
GSM7BitCoding: gsm7bit.Packed,
ASCIICoding: charmap.ISO8859_1,
Latin1Coding: charmap.ISO8859_1,
ShiftJISCoding: japanese.ShiftJIS,
CyrillicCoding: charmap.ISO8859_5,
HebrewCoding: charmap.ISO8859_8,
UCS2Coding: unicode.UTF16(unicode.BigEndian, unicode.IgnoreBOM),
ISO2022JPCoding: japanese.ISO2022JP,
EUCJPCoding: japanese.EUCJP,
EUCKRCoding: korean.EUCKR,
}
var alphabetMap = map[DataCoding]*RangeTable{
GSM7BitCoding: gsm7bit.DefaultAlphabet,
ASCIICoding: _ASCII,
Latin1Coding: rangetable.Merge(_ASCII, Latin),
CyrillicCoding: rangetable.Merge(_ASCII, Cyrillic),
HebrewCoding: rangetable.Merge(_ASCII, Hebrew),
ShiftJISCoding: rangetable.Merge(_ASCII, _Shift_JIS_Definition),
EUCKRCoding: rangetable.Merge(_ASCII, _EUC_KR_Definition),
}
var splitterMap = map[DataCoding]Splitter{
GSM7BitCoding: _7BitSplitter,
ASCIICoding: _1ByteSplitter,
HebrewCoding: _1ByteSplitter,
CyrillicCoding: _1ByteSplitter,
Latin1Coding: _1ByteSplitter,
ShiftJISCoding: _MultibyteSplitter,
ISO2022JPCoding: _MultibyteSplitter,
EUCJPCoding: _MultibyteSplitter,
EUCKRCoding: _MultibyteSplitter,
UCS2Coding: _UTF16Splitter,
}
|
package appStruct
import "github.com/therecipe/qt/widgets"
//структура хранит компоненты которые могут
//изменятся при работе программы
type GuiComponent struct{
Application *widgets.QApplication
WordList *widgets.QListWidget
MainWindow *widgets.QMainWindow
MainWidget *widgets.QWidget
FileProgress *CustomProgressBar
FileProgressUpdate chan int
InfoAboutScanningFiles *CustomLabel
StartDirectoryForScan *CustomLabel
ScanningTimeInfo *CustomLabel
FileTree *CustomTreeWidget
ErrorTable *widgets.QTableWidget
NonScanTable *widgets.QTableWidget
SearchIsActive bool
SkipItem bool
SkipItemNonArch bool
StartDirectoryName string
AddTempDir chan string
DeleteTempDir chan string
EndDeleteTemp chan bool
UpdateTime string
IsTimeUpdate bool
UpdateLabel string
ProgressBarValue int
EndUIUpdate chan string
ErrorTableUpdate chan string
}
func NewGui()*GuiComponent{
return &GuiComponent{
Application: nil,
WordList: nil,
MainWindow: nil,
MainWidget: nil,
FileProgress: nil,
FileProgressUpdate: make(chan int, 1000),
InfoAboutScanningFiles: nil,
StartDirectoryForScan: nil,
ScanningTimeInfo: nil,
FileTree: nil,
ErrorTable: nil,
ErrorTableUpdate: make(chan string, 1000),
NonScanTable: nil,
EndUIUpdate: make(chan string, 2),
SearchIsActive: false,
SkipItem: false,
StartDirectoryName: "",
AddTempDir: make(chan string, 1000),
DeleteTempDir: make(chan string, 1000),
EndDeleteTemp: make(chan bool, 1000),
IsTimeUpdate: false,
}
}
|
package gojobs
import (
"sync"
)
// Func is a sceleton of executing functions
type Func func(data interface{}) (result interface{}, err error)
// Job specifies a job
type Job struct {
Data interface{}
Result interface{}
Error error
}
// New simply create a new instance of Structure Job
func New(data interface{}) *Job {
return &Job{
Data: data,
}
}
var wg sync.WaitGroup
// Run runs given function
func (j *Job) Run(f Func) {
wg.Add(1)
go j.funcHandler(f)
wg.Wait()
}
func (j *Job) funcHandler(f Func) {
j.Result, j.Error = f(j.Data)
wg.Done()
}
|
////////////////////////////////////////////////////////////////////////////////
// //
// Copyright 2019 Dell, Inc. //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
////////////////////////////////////////////////////////////////////////////////
package transformer
import (
"github.com/Azure/sonic-mgmt-common/translib/db"
)
type XfmrDbTblCbkParams struct {
d *db.DB //Config DB handler
oper int
delDepRefKey string
tblName string
dbKey string
delDepEntry map[string]string
dbDataMap map[db.DBNum]map[string]map[string]db.Value
delDepDataMap map[int]*RedisDbMap // Call back methods can add the data
}
func formXfmrDbTblCbkParams (d *db.DB, oper int, delDepRefKey string, tblName string, dbKey string, delDepEntry map[string]string, dbDataMap RedisDbMap) XfmrDbTblCbkParams {
var inParams XfmrDbTblCbkParams
inParams.d = d
inParams.oper = oper
inParams.delDepRefKey = delDepRefKey
inParams.tblName = tblName
inParams.dbKey = dbKey
inParams.delDepEntry = delDepEntry
inParams.dbDataMap = dbDataMap
inParams.delDepDataMap = make(map[int]*RedisDbMap)
return inParams
}
type XfmrDbTblCbkMethod func (inParams XfmrDbTblCbkParams) error
|
package fmc
import (
"runtime"
"strings"
)
//Caller log run function
func Caller() {
pc := make([]uintptr, 40)
n := runtime.Callers(0, pc)
// Printfln("#rbtn= #gbt%d", n)
pc = pc[0:n] // pass only valid pcs to runtime.CallersFrames
frames := runtime.CallersFrames(pc)
i := 0
for {
frame, more := frames.Next()
name, line, file := frame.Func, frame.Line, frame.File
// fmt.Println(name)
nm := ""
if name == nil {
// fmt.Println("name==nil")
f := runtime.FuncForPC(frame.PC)
//file, line := f.FileLine(frame.PC)
//fmt.Printf("%s:%d %s\n", file, line, f.Name())
nm = f.Name()
//Printfln("#gbt%s#wbt:#gbt%d #ybt%s", file, line, f.Name())
} else {
nm = name.Name()
}
if strings.Contains(nm, "fmc.Caller") || strings.Contains(nm, "runtime.Callers") || strings.Contains(nm, "runtime.goexit") || strings.Contains(nm, "runtime.main") {
} else {
if i == 0 {
Printf("#wbtRun> ")
i++
} else {
Printf("\t#rbtFrom> ")
i++
}
Printfln("#gbt%s#wbt:#gbt%d #ybt%s", file, line, nm)
}
// }
if !more {
break
}
}
}
//WhoCallerIs calling func name
func WhoCallerIs() string {
pc := make([]uintptr, 40)
n := runtime.Callers(0, pc)
// Printfln("#rbtn= #gbt%d", n)
pc = pc[2 : n-2] // pass only valid pcs to runtime.CallersFrames
frames := runtime.CallersFrames(pc)
frame, _ := frames.Next()
//name, line, file := frame.Func, frame.Line, frame.File
// fmt.Println(name)
if frame.Func == nil {
// fmt.Println("name==nil")
f := runtime.FuncForPC(frame.PC)
//file, line := f.FileLine(frame.PC)
//fmt.Printf("%s:%d %s\n", file, line, f.Name())
// Printfln("#gbt%s#wbt:#gbt%d #ybt%s", file, line, f.Name())
return f.Name()
}
// Printfln("#gbt%s#wbt:#gbt%d #ybt%s", file, line, name.Name())
return frame.Func.Name()
}
|
package main
import "unicode/utf8"
func main() {
s := "雨.痕"
println(len(s), utf8.RuneCountInString(s))
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//go:build ignore
// +build ignore
package main
import (
"context"
"expvar"
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"strings"
"sync"
"time"
_ "net/http/pprof"
"github.com/dustin/go-humanize"
"github.com/elastic/go-elasticsearch/v8"
"github.com/elastic/go-elasticsearch/v8/esutil"
"go.elastic.co/apm"
"go.elastic.co/apm/module/apmelasticsearch"
"github.com/elastic/go-elasticsearch/v8/_examples/bulk/kafka/consumer"
"github.com/elastic/go-elasticsearch/v8/_examples/bulk/kafka/producer"
)
var (
brokerURL string
topicName = "stocks"
topicParts = 4
msgRate int
indexName = "stocks"
numProducers = 1
numConsumers = 4
numIndexers = 1
flushBytes = 0 // Default
numWorkers = 0 // Default
indexerError error
mapping = `{
"mappings": {
"properties": {
"time": { "type": "date" },
"symbol": { "type": "keyword" },
"side": { "type": "keyword" },
"account": { "type": "keyword" },
"quantity": { "type": "long" },
"price": { "type": "long" },
"amount": { "type": "long" }
}
}}}`
)
func init() {
if v := os.Getenv("KAFKA_URL"); v != "" {
brokerURL = v
} else {
brokerURL = "localhost:9092"
}
flag.IntVar(&msgRate, "rate", 1000, "Producer rate (msg/sec)")
flag.IntVar(&numProducers, "producers", numProducers, "Number of producers")
flag.IntVar(&numConsumers, "consumers", numConsumers, "Number of consumers")
flag.IntVar(&numIndexers, "indexers", numIndexers, "Number of indexers")
flag.Parse()
}
func main() {
log.SetFlags(0)
// Serve the "/debug/pprof/" and "/debug/vars" pages
//
go func() { log.Println(http.ListenAndServe("localhost:6060", nil)) }()
var (
wg sync.WaitGroup
ctx = context.Background()
producers []*producer.Producer
consumers []*consumer.Consumer
indexers []esutil.BulkIndexer
)
done := make(chan os.Signal)
signal.Notify(done, os.Interrupt)
go func() { <-done; log.Println("\n"); os.Exit(0) }()
// Set up producers
//
for i := 1; i <= numProducers; i++ {
producers = append(producers,
&producer.Producer{
BrokerURL: brokerURL,
TopicName: topicName,
TopicParts: topicParts,
MessageRate: msgRate})
}
// Create an Elasticsearch client
//
es, err := elasticsearch.NewClient(elasticsearch.Config{
RetryOnStatus: []int{502, 503, 504, 429}, // Add 429 to the list of retryable statuses
RetryBackoff: func(i int) time.Duration { return time.Duration(i) * 100 * time.Millisecond },
MaxRetries: 5,
EnableMetrics: true,
Transport: apmelasticsearch.WrapRoundTripper(http.DefaultTransport),
})
if err != nil {
log.Fatalf("Error: NewClient(): %s", err)
}
// Export client metrics to the "expvar" package
expvar.Publish("go-elasticsearch", expvar.Func(func() interface{} { m, _ := es.Metrics(); return m }))
// Create the "stocks" index with correct mappings
//
res, err := es.Indices.Exists([]string{indexName})
if err != nil {
log.Fatalf("Error: Indices.Exists: %s", err)
}
res.Body.Close()
if res.StatusCode == 404 {
res, err := es.Indices.Create(
indexName,
es.Indices.Create.WithBody(strings.NewReader(mapping)),
es.Indices.Create.WithWaitForActiveShards("1"),
)
if err != nil {
log.Fatalf("Error: Indices.Create: %s", err)
}
if res.IsError() {
log.Fatalf("Error: Indices.Create: %s", res)
}
}
// Set up indexers
//
for i := 1; i <= numIndexers; i++ {
idx, err := esutil.NewBulkIndexer(esutil.BulkIndexerConfig{
Index: indexName,
Client: es,
NumWorkers: numWorkers,
FlushBytes: int(flushBytes),
// Elastic APM: Instrument the flush operations and capture errors
OnFlushStart: func(ctx context.Context) context.Context {
txn := apm.DefaultTracer.StartTransaction("Bulk", "indexing")
return apm.ContextWithTransaction(ctx, txn)
},
OnFlushEnd: func(ctx context.Context) {
apm.TransactionFromContext(ctx).End()
},
OnError: func(ctx context.Context, err error) {
indexerError = err
apm.CaptureError(ctx, err).Send()
},
})
if err != nil {
log.Fatalf("ERROR: NewBulkIndexer(): %s", err)
}
indexers = append(indexers, idx)
}
// Set up consumers
//
for i := 1; i <= numConsumers; i++ {
consumers = append(consumers,
&consumer.Consumer{
BrokerURL: brokerURL,
TopicName: topicName,
Indexer: indexers[i%numIndexers]})
}
// Set up reporting output
//
reporter := time.NewTicker(500 * time.Millisecond)
defer reporter.Stop()
go func() {
fmt.Printf("Initializing... producers=%d consumers=%d indexers=%d\n", numProducers, numConsumers, numIndexers)
for {
select {
case <-reporter.C:
fmt.Print(report(producers, consumers, indexers))
}
}
}()
errcleaner := time.NewTicker(10 * time.Second)
defer errcleaner.Stop()
go func() {
for {
select {
case <-errcleaner.C:
indexerError = nil
}
}
}()
// Create the Kafka topic
//
if len(producers) > 0 {
if err := producers[0].CreateTopic(ctx); err != nil {
log.Fatalf("ERROR: Producer: %s", err)
}
}
// Launch consumers
//
for _, c := range consumers {
wg.Add(1)
go func(c *consumer.Consumer) {
defer wg.Done()
if err := c.Run(ctx); err != nil {
log.Fatalf("ERROR: Consumer: %s", err)
}
}(c)
}
// Launch producers
//
time.Sleep(5 * time.Second) // Leave some room for consumers to connect
for _, p := range producers {
wg.Add(1)
go func(p *producer.Producer) {
defer wg.Done()
if err := p.Run(ctx); err != nil {
log.Fatalf("ERROR: Producer: %s", err)
}
}(p)
}
wg.Wait()
fmt.Print(report(producers, consumers, indexers))
}
func report(
producers []*producer.Producer,
consumers []*consumer.Consumer,
indexers []esutil.BulkIndexer,
) string {
var (
b strings.Builder
value string
currRow = 1
numCols = 6
colWidth = 20
divider = func(last bool) {
fmt.Fprintf(&b, "\033[%d;0H", currRow)
fmt.Fprint(&b, "┣")
for i := 1; i <= numCols; i++ {
fmt.Fprint(&b, strings.Repeat("━", colWidth))
if last && i == 5 {
fmt.Fprint(&b, "┷")
continue
}
if i < numCols {
fmt.Fprint(&b, "┿")
}
}
fmt.Fprint(&b, "┫")
currRow++
}
)
fmt.Print("\033[2J\033[K")
fmt.Printf("\033[%d;0H", currRow)
fmt.Fprint(&b, "┏")
for i := 1; i <= numCols; i++ {
fmt.Fprint(&b, strings.Repeat("━", colWidth))
if i < numCols {
fmt.Fprint(&b, "┯")
}
}
fmt.Fprint(&b, "┓")
currRow++
for i, p := range producers {
fmt.Fprintf(&b, "\033[%d;0H", currRow)
value = fmt.Sprintf("Producer %d", i+1)
fmt.Fprintf(&b, "┃ %-*s│", colWidth-1, value)
s := p.Stats()
value = fmt.Sprintf("duration=%s", s.Duration.Truncate(time.Second))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("msg/sec=%s", humanize.FtoaWithDigits(s.Throughput, 2))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("sent=%s", humanize.Comma(int64(s.TotalMessages)))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("bytes=%s", humanize.Bytes(uint64(s.TotalBytes)))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("errors=%s", humanize.Comma(int64(s.TotalErrors)))
fmt.Fprintf(&b, " %-*s┃", colWidth-1, value)
currRow++
divider(i == len(producers)-1)
}
for i, c := range consumers {
fmt.Fprintf(&b, "\033[%d;0H", currRow)
value = fmt.Sprintf("Consumer %d", i+1)
fmt.Fprintf(&b, "┃ %-*s│", colWidth-1, value)
s := c.Stats()
value = fmt.Sprintf("lagging=%s", humanize.Comma(s.TotalLag))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("msg/sec=%s", humanize.FtoaWithDigits(s.Throughput, 2))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("received=%s", humanize.Comma(s.TotalMessages))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("bytes=%s", humanize.Bytes(uint64(s.TotalBytes)))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("errors=%s", humanize.Comma(s.TotalErrors))
fmt.Fprintf(&b, " %-*s┃", colWidth-1, value)
currRow++
divider(i == len(consumers)-1)
}
for i, x := range indexers {
fmt.Fprintf(&b, "\033[%d;0H", currRow)
value = fmt.Sprintf("Indexer %d", i+1)
fmt.Fprintf(&b, "┃ %-*s│", colWidth-1, value)
s := x.Stats()
value = fmt.Sprintf("added=%s", humanize.Comma(int64(s.NumAdded)))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("flushed=%s", humanize.Comma(int64(s.NumFlushed)))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
value = fmt.Sprintf("failed=%s", humanize.Comma(int64(s.NumFailed)))
fmt.Fprintf(&b, " %-*s│", colWidth-1, value)
if indexerError != nil {
value = "err=" + indexerError.Error()
if len(value) > 2*colWidth {
value = value[:2*colWidth]
}
} else {
value = ""
}
fmt.Fprintf(&b, " %-*s┃", 2*colWidth, value)
currRow++
if i < len(indexers)-1 {
divider(true)
}
}
fmt.Fprintf(&b, "\033[%d;0H", currRow)
fmt.Fprint(&b, "┗")
for i := 1; i <= numCols; i++ {
fmt.Fprint(&b, strings.Repeat("━", colWidth))
if i == 5 {
fmt.Fprint(&b, "━")
continue
}
if i < numCols {
fmt.Fprint(&b, "┷")
}
}
fmt.Fprint(&b, "┛")
currRow++
return b.String()
}
|
package loadgen
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"math/rand"
"strconv"
)
type (
incCollector struct {
descs []*prometheus.Desc
labelCount int
cycle int
}
)
func NewIncCollector(nmetrics, nlabels int) *incCollector {
descs := make([]*prometheus.Desc, nmetrics)
for i := 0; i < nmetrics; i++ {
metname := fmt.Sprintf("test%d", i)
descs[i] = prometheus.NewDesc(metname, metname, []string{"lab"}, nil)
}
return &incCollector{descs: descs, labelCount: nlabels}
}
// Describe implements prometheus.Collector.
func (t *incCollector) Describe(ch chan<- *prometheus.Desc) {
for _, desc := range t.descs {
ch <- desc
}
}
// Collect implements prometheus.Collector.
func (t *incCollector) Collect(ch chan<- prometheus.Metric) {
t.cycle++
for _, desc := range t.descs {
for j := 0; j < t.labelCount; j++ {
ch <- prometheus.MustNewConstMetric(desc,
prometheus.GaugeValue, float64(t.cycle), strconv.Itoa(j))
}
}
}
func (t *incCollector) Sum() (int, error) {
return len(t.descs) * (t.labelCount) * t.cycle * (t.cycle + 1) / 2, nil
}
type (
staticCollector struct {
descs []*prometheus.Desc
metrics []prometheus.Metric
labelCount int
cycle int
}
)
func NewStaticCollector(nmetrics, nlabels int) *staticCollector {
descs := make([]*prometheus.Desc, nmetrics)
metrics := make([]prometheus.Metric, 0, nlabels*nmetrics)
for i := 0; i < nmetrics; i++ {
metname := fmt.Sprintf("test%d", i)
desc := prometheus.NewDesc(metname, metname, []string{"lab"}, nil)
descs[i] = desc
for j := 0; j < nlabels; j++ {
metrics = append(metrics, prometheus.MustNewConstMetric(desc,
prometheus.GaugeValue, float64(1), strconv.Itoa(j)))
}
}
return &staticCollector{descs: descs, metrics: metrics, labelCount: nlabels}
}
// Describe implements prometheus.Collector.
func (t *staticCollector) Describe(ch chan<- *prometheus.Desc) {
for _, desc := range t.descs {
ch <- desc
}
}
// Collect implements prometheus.Collector.
func (t *staticCollector) Collect(ch chan<- prometheus.Metric) {
t.cycle++
for _, metric := range t.metrics {
ch <- metric
}
}
func (t *staticCollector) Sum() (int, error) {
return len(t.descs) * (t.labelCount) * t.cycle, nil
}
type (
randCyclicCollector struct {
descs []*prometheus.Desc
values []int
labelCount int
cycle int
sumvalues int
}
)
func NewRandCyclicCollector(nmetrics, nlabels, maxvalue int) *randCyclicCollector {
descs := make([]*prometheus.Desc, nmetrics)
for i := 0; i < nmetrics; i++ {
metname := fmt.Sprintf("test%d", i)
desc := prometheus.NewDesc(metname, metname, []string{"lab"}, nil)
descs[i] = desc
}
values := make([]int, nlabels*nmetrics)
sum := 0
for i := range values {
r := rand.Intn(maxvalue)
values[i] = r
sum += r
}
return &randCyclicCollector{descs: descs, values: values, labelCount: nlabels, sumvalues: sum}
}
// Describe implements prometheus.Collector.
func (t *randCyclicCollector) Describe(ch chan<- *prometheus.Desc) {
for _, desc := range t.descs {
ch <- desc
}
}
// Collect implements prometheus.Collector.
func (t *randCyclicCollector) Collect(ch chan<- prometheus.Metric) {
i := t.cycle
t.cycle++
for _, desc := range t.descs {
for j := 0; j < t.labelCount; j++ {
if i >= len(t.values) {
i = 0
}
ch <- prometheus.MustNewConstMetric(desc,
prometheus.GaugeValue, float64(t.values[i]), strconv.Itoa(j))
i++
}
}
}
func (t *randCyclicCollector) Sum() (int, error) {
return t.sumvalues * t.cycle, nil
}
|
// Copyright 2022 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
osconfigpb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/osconfig/osconfig_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/osconfig"
)
// PatchDeploymentServer implements the gRPC interface for PatchDeployment.
type PatchDeploymentServer struct{}
// ProtoToPatchDeploymentPatchConfigRebootConfigEnum converts a PatchDeploymentPatchConfigRebootConfigEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigRebootConfigEnum(e osconfigpb.OsconfigPatchDeploymentPatchConfigRebootConfigEnum) *osconfig.PatchDeploymentPatchConfigRebootConfigEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigRebootConfigEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentPatchConfigRebootConfigEnum(n[len("OsconfigPatchDeploymentPatchConfigRebootConfigEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentPatchConfigAptTypeEnum converts a PatchDeploymentPatchConfigAptTypeEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigAptTypeEnum(e osconfigpb.OsconfigPatchDeploymentPatchConfigAptTypeEnum) *osconfig.PatchDeploymentPatchConfigAptTypeEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigAptTypeEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentPatchConfigAptTypeEnum(n[len("OsconfigPatchDeploymentPatchConfigAptTypeEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum converts a PatchDeploymentPatchConfigWindowsUpdateClassificationsEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum(e osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum) *osconfig.PatchDeploymentPatchConfigWindowsUpdateClassificationsEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentPatchConfigWindowsUpdateClassificationsEnum(n[len("OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum converts a PatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum(e osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum) *osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum(n[len("OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum converts a PatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum(e osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum) *osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum(n[len("OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum converts a PatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum(e osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum) *osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum(n[len("OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum converts a PatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum(e osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum) *osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum(n[len("OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentRecurringScheduleFrequencyEnum converts a PatchDeploymentRecurringScheduleFrequencyEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleFrequencyEnum(e osconfigpb.OsconfigPatchDeploymentRecurringScheduleFrequencyEnum) *osconfig.PatchDeploymentRecurringScheduleFrequencyEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentRecurringScheduleFrequencyEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentRecurringScheduleFrequencyEnum(n[len("OsconfigPatchDeploymentRecurringScheduleFrequencyEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum converts a PatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum(e osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum) *osconfig.PatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum(n[len("OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum converts a PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum(e osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum) *osconfig.PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum(n[len("OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentRolloutModeEnum converts a PatchDeploymentRolloutModeEnum enum from its proto representation.
func ProtoToOsconfigPatchDeploymentRolloutModeEnum(e osconfigpb.OsconfigPatchDeploymentRolloutModeEnum) *osconfig.PatchDeploymentRolloutModeEnum {
if e == 0 {
return nil
}
if n, ok := osconfigpb.OsconfigPatchDeploymentRolloutModeEnum_name[int32(e)]; ok {
e := osconfig.PatchDeploymentRolloutModeEnum(n[len("OsconfigPatchDeploymentRolloutModeEnum"):])
return &e
}
return nil
}
// ProtoToPatchDeploymentInstanceFilter converts a PatchDeploymentInstanceFilter object from its proto representation.
func ProtoToOsconfigPatchDeploymentInstanceFilter(p *osconfigpb.OsconfigPatchDeploymentInstanceFilter) *osconfig.PatchDeploymentInstanceFilter {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentInstanceFilter{
All: dcl.Bool(p.GetAll()),
}
for _, r := range p.GetGroupLabels() {
obj.GroupLabels = append(obj.GroupLabels, *ProtoToOsconfigPatchDeploymentInstanceFilterGroupLabels(r))
}
for _, r := range p.GetZones() {
obj.Zones = append(obj.Zones, r)
}
for _, r := range p.GetInstances() {
obj.Instances = append(obj.Instances, r)
}
for _, r := range p.GetInstanceNamePrefixes() {
obj.InstanceNamePrefixes = append(obj.InstanceNamePrefixes, r)
}
return obj
}
// ProtoToPatchDeploymentInstanceFilterGroupLabels converts a PatchDeploymentInstanceFilterGroupLabels object from its proto representation.
func ProtoToOsconfigPatchDeploymentInstanceFilterGroupLabels(p *osconfigpb.OsconfigPatchDeploymentInstanceFilterGroupLabels) *osconfig.PatchDeploymentInstanceFilterGroupLabels {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentInstanceFilterGroupLabels{}
return obj
}
// ProtoToPatchDeploymentPatchConfig converts a PatchDeploymentPatchConfig object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfig(p *osconfigpb.OsconfigPatchDeploymentPatchConfig) *osconfig.PatchDeploymentPatchConfig {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfig{
RebootConfig: ProtoToOsconfigPatchDeploymentPatchConfigRebootConfigEnum(p.GetRebootConfig()),
Apt: ProtoToOsconfigPatchDeploymentPatchConfigApt(p.GetApt()),
Yum: ProtoToOsconfigPatchDeploymentPatchConfigYum(p.GetYum()),
Goo: ProtoToOsconfigPatchDeploymentPatchConfigGoo(p.GetGoo()),
Zypper: ProtoToOsconfigPatchDeploymentPatchConfigZypper(p.GetZypper()),
WindowsUpdate: ProtoToOsconfigPatchDeploymentPatchConfigWindowsUpdate(p.GetWindowsUpdate()),
PreStep: ProtoToOsconfigPatchDeploymentPatchConfigPreStep(p.GetPreStep()),
PostStep: ProtoToOsconfigPatchDeploymentPatchConfigPostStep(p.GetPostStep()),
}
return obj
}
// ProtoToPatchDeploymentPatchConfigApt converts a PatchDeploymentPatchConfigApt object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigApt(p *osconfigpb.OsconfigPatchDeploymentPatchConfigApt) *osconfig.PatchDeploymentPatchConfigApt {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigApt{
Type: ProtoToOsconfigPatchDeploymentPatchConfigAptTypeEnum(p.GetType()),
}
for _, r := range p.GetExcludes() {
obj.Excludes = append(obj.Excludes, r)
}
for _, r := range p.GetExclusivePackages() {
obj.ExclusivePackages = append(obj.ExclusivePackages, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigYum converts a PatchDeploymentPatchConfigYum object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigYum(p *osconfigpb.OsconfigPatchDeploymentPatchConfigYum) *osconfig.PatchDeploymentPatchConfigYum {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigYum{
Security: dcl.Bool(p.GetSecurity()),
Minimal: dcl.Bool(p.GetMinimal()),
}
for _, r := range p.GetExcludes() {
obj.Excludes = append(obj.Excludes, r)
}
for _, r := range p.GetExclusivePackages() {
obj.ExclusivePackages = append(obj.ExclusivePackages, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigGoo converts a PatchDeploymentPatchConfigGoo object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigGoo(p *osconfigpb.OsconfigPatchDeploymentPatchConfigGoo) *osconfig.PatchDeploymentPatchConfigGoo {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigGoo{}
return obj
}
// ProtoToPatchDeploymentPatchConfigZypper converts a PatchDeploymentPatchConfigZypper object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigZypper(p *osconfigpb.OsconfigPatchDeploymentPatchConfigZypper) *osconfig.PatchDeploymentPatchConfigZypper {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigZypper{
WithOptional: dcl.Bool(p.GetWithOptional()),
WithUpdate: dcl.Bool(p.GetWithUpdate()),
}
for _, r := range p.GetCategories() {
obj.Categories = append(obj.Categories, r)
}
for _, r := range p.GetSeverities() {
obj.Severities = append(obj.Severities, r)
}
for _, r := range p.GetExcludes() {
obj.Excludes = append(obj.Excludes, r)
}
for _, r := range p.GetExclusivePatches() {
obj.ExclusivePatches = append(obj.ExclusivePatches, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigWindowsUpdate converts a PatchDeploymentPatchConfigWindowsUpdate object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigWindowsUpdate(p *osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdate) *osconfig.PatchDeploymentPatchConfigWindowsUpdate {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigWindowsUpdate{}
for _, r := range p.GetClassifications() {
obj.Classifications = append(obj.Classifications, *ProtoToOsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum(r))
}
for _, r := range p.GetExcludes() {
obj.Excludes = append(obj.Excludes, r)
}
for _, r := range p.GetExclusivePatches() {
obj.ExclusivePatches = append(obj.ExclusivePatches, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPreStep converts a PatchDeploymentPatchConfigPreStep object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPreStep(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStep) *osconfig.PatchDeploymentPatchConfigPreStep {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPreStep{
LinuxExecStepConfig: ProtoToOsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(p.GetLinuxExecStepConfig()),
WindowsExecStepConfig: ProtoToOsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(p.GetWindowsExecStepConfig()),
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPreStepLinuxExecStepConfig converts a PatchDeploymentPatchConfigPreStepLinuxExecStepConfig object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig) *osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfig {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfig{
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
GcsObject: ProtoToOsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(p.GetGcsObject()),
Interpreter: ProtoToOsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum(p.GetInterpreter()),
}
for _, r := range p.GetAllowedSuccessCodes() {
obj.AllowedSuccessCodes = append(obj.AllowedSuccessCodes, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject converts a PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject) *osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
GenerationNumber: dcl.Int64OrNil(p.GetGenerationNumber()),
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPreStepWindowsExecStepConfig converts a PatchDeploymentPatchConfigPreStepWindowsExecStepConfig object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig) *osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfig {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfig{
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
GcsObject: ProtoToOsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(p.GetGcsObject()),
Interpreter: ProtoToOsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum(p.GetInterpreter()),
}
for _, r := range p.GetAllowedSuccessCodes() {
obj.AllowedSuccessCodes = append(obj.AllowedSuccessCodes, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject converts a PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject) *osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
GenerationNumber: dcl.Int64OrNil(p.GetGenerationNumber()),
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPostStep converts a PatchDeploymentPatchConfigPostStep object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPostStep(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStep) *osconfig.PatchDeploymentPatchConfigPostStep {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPostStep{
LinuxExecStepConfig: ProtoToOsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(p.GetLinuxExecStepConfig()),
WindowsExecStepConfig: ProtoToOsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(p.GetWindowsExecStepConfig()),
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPostStepLinuxExecStepConfig converts a PatchDeploymentPatchConfigPostStepLinuxExecStepConfig object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig) *osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfig {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfig{
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
GcsObject: ProtoToOsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(p.GetGcsObject()),
Interpreter: ProtoToOsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum(p.GetInterpreter()),
}
for _, r := range p.GetAllowedSuccessCodes() {
obj.AllowedSuccessCodes = append(obj.AllowedSuccessCodes, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject converts a PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject) *osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
GenerationNumber: dcl.Int64OrNil(p.GetGenerationNumber()),
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPostStepWindowsExecStepConfig converts a PatchDeploymentPatchConfigPostStepWindowsExecStepConfig object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig) *osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfig {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfig{
LocalPath: dcl.StringOrNil(p.GetLocalPath()),
GcsObject: ProtoToOsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(p.GetGcsObject()),
Interpreter: ProtoToOsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum(p.GetInterpreter()),
}
for _, r := range p.GetAllowedSuccessCodes() {
obj.AllowedSuccessCodes = append(obj.AllowedSuccessCodes, r)
}
return obj
}
// ProtoToPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject converts a PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject object from its proto representation.
func ProtoToOsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject(p *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject) *osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject{
Bucket: dcl.StringOrNil(p.GetBucket()),
Object: dcl.StringOrNil(p.GetObject()),
GenerationNumber: dcl.Int64OrNil(p.GetGenerationNumber()),
}
return obj
}
// ProtoToPatchDeploymentOneTimeSchedule converts a PatchDeploymentOneTimeSchedule object from its proto representation.
func ProtoToOsconfigPatchDeploymentOneTimeSchedule(p *osconfigpb.OsconfigPatchDeploymentOneTimeSchedule) *osconfig.PatchDeploymentOneTimeSchedule {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentOneTimeSchedule{
ExecuteTime: dcl.StringOrNil(p.GetExecuteTime()),
}
return obj
}
// ProtoToPatchDeploymentRecurringSchedule converts a PatchDeploymentRecurringSchedule object from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringSchedule(p *osconfigpb.OsconfigPatchDeploymentRecurringSchedule) *osconfig.PatchDeploymentRecurringSchedule {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRecurringSchedule{
TimeZone: ProtoToOsconfigPatchDeploymentRecurringScheduleTimeZone(p.GetTimeZone()),
StartTime: dcl.StringOrNil(p.GetStartTime()),
EndTime: dcl.StringOrNil(p.GetEndTime()),
TimeOfDay: ProtoToOsconfigPatchDeploymentRecurringScheduleTimeOfDay(p.GetTimeOfDay()),
Frequency: ProtoToOsconfigPatchDeploymentRecurringScheduleFrequencyEnum(p.GetFrequency()),
Weekly: ProtoToOsconfigPatchDeploymentRecurringScheduleWeekly(p.GetWeekly()),
Monthly: ProtoToOsconfigPatchDeploymentRecurringScheduleMonthly(p.GetMonthly()),
LastExecuteTime: dcl.StringOrNil(p.GetLastExecuteTime()),
NextExecuteTime: dcl.StringOrNil(p.GetNextExecuteTime()),
}
return obj
}
// ProtoToPatchDeploymentRecurringScheduleTimeZone converts a PatchDeploymentRecurringScheduleTimeZone object from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleTimeZone(p *osconfigpb.OsconfigPatchDeploymentRecurringScheduleTimeZone) *osconfig.PatchDeploymentRecurringScheduleTimeZone {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRecurringScheduleTimeZone{
Id: dcl.StringOrNil(p.GetId()),
Version: dcl.StringOrNil(p.GetVersion()),
}
return obj
}
// ProtoToPatchDeploymentRecurringScheduleTimeOfDay converts a PatchDeploymentRecurringScheduleTimeOfDay object from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleTimeOfDay(p *osconfigpb.OsconfigPatchDeploymentRecurringScheduleTimeOfDay) *osconfig.PatchDeploymentRecurringScheduleTimeOfDay {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRecurringScheduleTimeOfDay{
Hours: dcl.Int64OrNil(p.GetHours()),
Minutes: dcl.Int64OrNil(p.GetMinutes()),
Seconds: dcl.Int64OrNil(p.GetSeconds()),
Nanos: dcl.Int64OrNil(p.GetNanos()),
}
return obj
}
// ProtoToPatchDeploymentRecurringScheduleWeekly converts a PatchDeploymentRecurringScheduleWeekly object from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleWeekly(p *osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeekly) *osconfig.PatchDeploymentRecurringScheduleWeekly {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRecurringScheduleWeekly{
DayOfWeek: ProtoToOsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum(p.GetDayOfWeek()),
}
return obj
}
// ProtoToPatchDeploymentRecurringScheduleMonthly converts a PatchDeploymentRecurringScheduleMonthly object from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleMonthly(p *osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthly) *osconfig.PatchDeploymentRecurringScheduleMonthly {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRecurringScheduleMonthly{
WeekDayOfMonth: ProtoToOsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(p.GetWeekDayOfMonth()),
MonthDay: dcl.Int64OrNil(p.GetMonthDay()),
}
return obj
}
// ProtoToPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth converts a PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth object from its proto representation.
func ProtoToOsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth(p *osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth) *osconfig.PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth{
WeekOrdinal: dcl.Int64OrNil(p.GetWeekOrdinal()),
DayOfWeek: ProtoToOsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum(p.GetDayOfWeek()),
}
return obj
}
// ProtoToPatchDeploymentRollout converts a PatchDeploymentRollout object from its proto representation.
func ProtoToOsconfigPatchDeploymentRollout(p *osconfigpb.OsconfigPatchDeploymentRollout) *osconfig.PatchDeploymentRollout {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRollout{
Mode: ProtoToOsconfigPatchDeploymentRolloutModeEnum(p.GetMode()),
DisruptionBudget: ProtoToOsconfigPatchDeploymentRolloutDisruptionBudget(p.GetDisruptionBudget()),
}
return obj
}
// ProtoToPatchDeploymentRolloutDisruptionBudget converts a PatchDeploymentRolloutDisruptionBudget object from its proto representation.
func ProtoToOsconfigPatchDeploymentRolloutDisruptionBudget(p *osconfigpb.OsconfigPatchDeploymentRolloutDisruptionBudget) *osconfig.PatchDeploymentRolloutDisruptionBudget {
if p == nil {
return nil
}
obj := &osconfig.PatchDeploymentRolloutDisruptionBudget{
Fixed: dcl.Int64OrNil(p.GetFixed()),
Percent: dcl.Int64OrNil(p.GetPercent()),
}
return obj
}
// ProtoToPatchDeployment converts a PatchDeployment resource from its proto representation.
func ProtoToPatchDeployment(p *osconfigpb.OsconfigPatchDeployment) *osconfig.PatchDeployment {
obj := &osconfig.PatchDeployment{
Name: dcl.StringOrNil(p.GetName()),
Description: dcl.StringOrNil(p.GetDescription()),
InstanceFilter: ProtoToOsconfigPatchDeploymentInstanceFilter(p.GetInstanceFilter()),
PatchConfig: ProtoToOsconfigPatchDeploymentPatchConfig(p.GetPatchConfig()),
Duration: dcl.StringOrNil(p.GetDuration()),
OneTimeSchedule: ProtoToOsconfigPatchDeploymentOneTimeSchedule(p.GetOneTimeSchedule()),
RecurringSchedule: ProtoToOsconfigPatchDeploymentRecurringSchedule(p.GetRecurringSchedule()),
CreateTime: dcl.StringOrNil(p.GetCreateTime()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
LastExecuteTime: dcl.StringOrNil(p.GetLastExecuteTime()),
Rollout: ProtoToOsconfigPatchDeploymentRollout(p.GetRollout()),
Project: dcl.StringOrNil(p.GetProject()),
}
return obj
}
// PatchDeploymentPatchConfigRebootConfigEnumToProto converts a PatchDeploymentPatchConfigRebootConfigEnum enum to its proto representation.
func OsconfigPatchDeploymentPatchConfigRebootConfigEnumToProto(e *osconfig.PatchDeploymentPatchConfigRebootConfigEnum) osconfigpb.OsconfigPatchDeploymentPatchConfigRebootConfigEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentPatchConfigRebootConfigEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigRebootConfigEnum_value["PatchDeploymentPatchConfigRebootConfigEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentPatchConfigRebootConfigEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentPatchConfigRebootConfigEnum(0)
}
// PatchDeploymentPatchConfigAptTypeEnumToProto converts a PatchDeploymentPatchConfigAptTypeEnum enum to its proto representation.
func OsconfigPatchDeploymentPatchConfigAptTypeEnumToProto(e *osconfig.PatchDeploymentPatchConfigAptTypeEnum) osconfigpb.OsconfigPatchDeploymentPatchConfigAptTypeEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentPatchConfigAptTypeEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigAptTypeEnum_value["PatchDeploymentPatchConfigAptTypeEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentPatchConfigAptTypeEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentPatchConfigAptTypeEnum(0)
}
// PatchDeploymentPatchConfigWindowsUpdateClassificationsEnumToProto converts a PatchDeploymentPatchConfigWindowsUpdateClassificationsEnum enum to its proto representation.
func OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnumToProto(e *osconfig.PatchDeploymentPatchConfigWindowsUpdateClassificationsEnum) osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum_value["PatchDeploymentPatchConfigWindowsUpdateClassificationsEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum(0)
}
// PatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnumToProto converts a PatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum enum to its proto representation.
func OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnumToProto(e *osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum) osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum_value["PatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnum(0)
}
// PatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnumToProto converts a PatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum enum to its proto representation.
func OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnumToProto(e *osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum) osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum_value["PatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnum(0)
}
// PatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnumToProto converts a PatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum enum to its proto representation.
func OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnumToProto(e *osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum) osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum_value["PatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnum(0)
}
// PatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnumToProto converts a PatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum enum to its proto representation.
func OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnumToProto(e *osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum) osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum_value["PatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnum(0)
}
// PatchDeploymentRecurringScheduleFrequencyEnumToProto converts a PatchDeploymentRecurringScheduleFrequencyEnum enum to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleFrequencyEnumToProto(e *osconfig.PatchDeploymentRecurringScheduleFrequencyEnum) osconfigpb.OsconfigPatchDeploymentRecurringScheduleFrequencyEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleFrequencyEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentRecurringScheduleFrequencyEnum_value["PatchDeploymentRecurringScheduleFrequencyEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleFrequencyEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleFrequencyEnum(0)
}
// PatchDeploymentRecurringScheduleWeeklyDayOfWeekEnumToProto converts a PatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum enum to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnumToProto(e *osconfig.PatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum) osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum_value["PatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnum(0)
}
// PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnumToProto converts a PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum enum to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnumToProto(e *osconfig.PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum) osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum_value["PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnum(0)
}
// PatchDeploymentRolloutModeEnumToProto converts a PatchDeploymentRolloutModeEnum enum to its proto representation.
func OsconfigPatchDeploymentRolloutModeEnumToProto(e *osconfig.PatchDeploymentRolloutModeEnum) osconfigpb.OsconfigPatchDeploymentRolloutModeEnum {
if e == nil {
return osconfigpb.OsconfigPatchDeploymentRolloutModeEnum(0)
}
if v, ok := osconfigpb.OsconfigPatchDeploymentRolloutModeEnum_value["PatchDeploymentRolloutModeEnum"+string(*e)]; ok {
return osconfigpb.OsconfigPatchDeploymentRolloutModeEnum(v)
}
return osconfigpb.OsconfigPatchDeploymentRolloutModeEnum(0)
}
// PatchDeploymentInstanceFilterToProto converts a PatchDeploymentInstanceFilter object to its proto representation.
func OsconfigPatchDeploymentInstanceFilterToProto(o *osconfig.PatchDeploymentInstanceFilter) *osconfigpb.OsconfigPatchDeploymentInstanceFilter {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentInstanceFilter{}
p.SetAll(dcl.ValueOrEmptyBool(o.All))
sGroupLabels := make([]*osconfigpb.OsconfigPatchDeploymentInstanceFilterGroupLabels, len(o.GroupLabels))
for i, r := range o.GroupLabels {
sGroupLabels[i] = OsconfigPatchDeploymentInstanceFilterGroupLabelsToProto(&r)
}
p.SetGroupLabels(sGroupLabels)
sZones := make([]string, len(o.Zones))
for i, r := range o.Zones {
sZones[i] = r
}
p.SetZones(sZones)
sInstances := make([]string, len(o.Instances))
for i, r := range o.Instances {
sInstances[i] = r
}
p.SetInstances(sInstances)
sInstanceNamePrefixes := make([]string, len(o.InstanceNamePrefixes))
for i, r := range o.InstanceNamePrefixes {
sInstanceNamePrefixes[i] = r
}
p.SetInstanceNamePrefixes(sInstanceNamePrefixes)
return p
}
// PatchDeploymentInstanceFilterGroupLabelsToProto converts a PatchDeploymentInstanceFilterGroupLabels object to its proto representation.
func OsconfigPatchDeploymentInstanceFilterGroupLabelsToProto(o *osconfig.PatchDeploymentInstanceFilterGroupLabels) *osconfigpb.OsconfigPatchDeploymentInstanceFilterGroupLabels {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentInstanceFilterGroupLabels{}
mLabels := make(map[string]string, len(o.Labels))
for k, r := range o.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
return p
}
// PatchDeploymentPatchConfigToProto converts a PatchDeploymentPatchConfig object to its proto representation.
func OsconfigPatchDeploymentPatchConfigToProto(o *osconfig.PatchDeploymentPatchConfig) *osconfigpb.OsconfigPatchDeploymentPatchConfig {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfig{}
p.SetRebootConfig(OsconfigPatchDeploymentPatchConfigRebootConfigEnumToProto(o.RebootConfig))
p.SetApt(OsconfigPatchDeploymentPatchConfigAptToProto(o.Apt))
p.SetYum(OsconfigPatchDeploymentPatchConfigYumToProto(o.Yum))
p.SetGoo(OsconfigPatchDeploymentPatchConfigGooToProto(o.Goo))
p.SetZypper(OsconfigPatchDeploymentPatchConfigZypperToProto(o.Zypper))
p.SetWindowsUpdate(OsconfigPatchDeploymentPatchConfigWindowsUpdateToProto(o.WindowsUpdate))
p.SetPreStep(OsconfigPatchDeploymentPatchConfigPreStepToProto(o.PreStep))
p.SetPostStep(OsconfigPatchDeploymentPatchConfigPostStepToProto(o.PostStep))
return p
}
// PatchDeploymentPatchConfigAptToProto converts a PatchDeploymentPatchConfigApt object to its proto representation.
func OsconfigPatchDeploymentPatchConfigAptToProto(o *osconfig.PatchDeploymentPatchConfigApt) *osconfigpb.OsconfigPatchDeploymentPatchConfigApt {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigApt{}
p.SetType(OsconfigPatchDeploymentPatchConfigAptTypeEnumToProto(o.Type))
sExcludes := make([]string, len(o.Excludes))
for i, r := range o.Excludes {
sExcludes[i] = r
}
p.SetExcludes(sExcludes)
sExclusivePackages := make([]string, len(o.ExclusivePackages))
for i, r := range o.ExclusivePackages {
sExclusivePackages[i] = r
}
p.SetExclusivePackages(sExclusivePackages)
return p
}
// PatchDeploymentPatchConfigYumToProto converts a PatchDeploymentPatchConfigYum object to its proto representation.
func OsconfigPatchDeploymentPatchConfigYumToProto(o *osconfig.PatchDeploymentPatchConfigYum) *osconfigpb.OsconfigPatchDeploymentPatchConfigYum {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigYum{}
p.SetSecurity(dcl.ValueOrEmptyBool(o.Security))
p.SetMinimal(dcl.ValueOrEmptyBool(o.Minimal))
sExcludes := make([]string, len(o.Excludes))
for i, r := range o.Excludes {
sExcludes[i] = r
}
p.SetExcludes(sExcludes)
sExclusivePackages := make([]string, len(o.ExclusivePackages))
for i, r := range o.ExclusivePackages {
sExclusivePackages[i] = r
}
p.SetExclusivePackages(sExclusivePackages)
return p
}
// PatchDeploymentPatchConfigGooToProto converts a PatchDeploymentPatchConfigGoo object to its proto representation.
func OsconfigPatchDeploymentPatchConfigGooToProto(o *osconfig.PatchDeploymentPatchConfigGoo) *osconfigpb.OsconfigPatchDeploymentPatchConfigGoo {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigGoo{}
return p
}
// PatchDeploymentPatchConfigZypperToProto converts a PatchDeploymentPatchConfigZypper object to its proto representation.
func OsconfigPatchDeploymentPatchConfigZypperToProto(o *osconfig.PatchDeploymentPatchConfigZypper) *osconfigpb.OsconfigPatchDeploymentPatchConfigZypper {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigZypper{}
p.SetWithOptional(dcl.ValueOrEmptyBool(o.WithOptional))
p.SetWithUpdate(dcl.ValueOrEmptyBool(o.WithUpdate))
sCategories := make([]string, len(o.Categories))
for i, r := range o.Categories {
sCategories[i] = r
}
p.SetCategories(sCategories)
sSeverities := make([]string, len(o.Severities))
for i, r := range o.Severities {
sSeverities[i] = r
}
p.SetSeverities(sSeverities)
sExcludes := make([]string, len(o.Excludes))
for i, r := range o.Excludes {
sExcludes[i] = r
}
p.SetExcludes(sExcludes)
sExclusivePatches := make([]string, len(o.ExclusivePatches))
for i, r := range o.ExclusivePatches {
sExclusivePatches[i] = r
}
p.SetExclusivePatches(sExclusivePatches)
return p
}
// PatchDeploymentPatchConfigWindowsUpdateToProto converts a PatchDeploymentPatchConfigWindowsUpdate object to its proto representation.
func OsconfigPatchDeploymentPatchConfigWindowsUpdateToProto(o *osconfig.PatchDeploymentPatchConfigWindowsUpdate) *osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdate {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdate{}
sClassifications := make([]osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum, len(o.Classifications))
for i, r := range o.Classifications {
sClassifications[i] = osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum(osconfigpb.OsconfigPatchDeploymentPatchConfigWindowsUpdateClassificationsEnum_value[string(r)])
}
p.SetClassifications(sClassifications)
sExcludes := make([]string, len(o.Excludes))
for i, r := range o.Excludes {
sExcludes[i] = r
}
p.SetExcludes(sExcludes)
sExclusivePatches := make([]string, len(o.ExclusivePatches))
for i, r := range o.ExclusivePatches {
sExclusivePatches[i] = r
}
p.SetExclusivePatches(sExclusivePatches)
return p
}
// PatchDeploymentPatchConfigPreStepToProto converts a PatchDeploymentPatchConfigPreStep object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPreStepToProto(o *osconfig.PatchDeploymentPatchConfigPreStep) *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStep {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPreStep{}
p.SetLinuxExecStepConfig(OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigToProto(o.LinuxExecStepConfig))
p.SetWindowsExecStepConfig(OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigToProto(o.WindowsExecStepConfig))
return p
}
// PatchDeploymentPatchConfigPreStepLinuxExecStepConfigToProto converts a PatchDeploymentPatchConfigPreStepLinuxExecStepConfig object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigToProto(o *osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfig) *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfig{}
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetGcsObject(OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectToProto(o.GcsObject))
p.SetInterpreter(OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigInterpreterEnumToProto(o.Interpreter))
sAllowedSuccessCodes := make([]int64, len(o.AllowedSuccessCodes))
for i, r := range o.AllowedSuccessCodes {
sAllowedSuccessCodes[i] = r
}
p.SetAllowedSuccessCodes(sAllowedSuccessCodes)
return p
}
// PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectToProto converts a PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObjectToProto(o *osconfig.PatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject) *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepLinuxExecStepConfigGcsObject{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGenerationNumber(dcl.ValueOrEmptyInt64(o.GenerationNumber))
return p
}
// PatchDeploymentPatchConfigPreStepWindowsExecStepConfigToProto converts a PatchDeploymentPatchConfigPreStepWindowsExecStepConfig object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigToProto(o *osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfig) *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfig{}
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetGcsObject(OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectToProto(o.GcsObject))
p.SetInterpreter(OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigInterpreterEnumToProto(o.Interpreter))
sAllowedSuccessCodes := make([]int64, len(o.AllowedSuccessCodes))
for i, r := range o.AllowedSuccessCodes {
sAllowedSuccessCodes[i] = r
}
p.SetAllowedSuccessCodes(sAllowedSuccessCodes)
return p
}
// PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectToProto converts a PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObjectToProto(o *osconfig.PatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject) *osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPreStepWindowsExecStepConfigGcsObject{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGenerationNumber(dcl.ValueOrEmptyInt64(o.GenerationNumber))
return p
}
// PatchDeploymentPatchConfigPostStepToProto converts a PatchDeploymentPatchConfigPostStep object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPostStepToProto(o *osconfig.PatchDeploymentPatchConfigPostStep) *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStep {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPostStep{}
p.SetLinuxExecStepConfig(OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigToProto(o.LinuxExecStepConfig))
p.SetWindowsExecStepConfig(OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigToProto(o.WindowsExecStepConfig))
return p
}
// PatchDeploymentPatchConfigPostStepLinuxExecStepConfigToProto converts a PatchDeploymentPatchConfigPostStepLinuxExecStepConfig object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigToProto(o *osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfig) *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfig{}
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetGcsObject(OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectToProto(o.GcsObject))
p.SetInterpreter(OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigInterpreterEnumToProto(o.Interpreter))
sAllowedSuccessCodes := make([]int64, len(o.AllowedSuccessCodes))
for i, r := range o.AllowedSuccessCodes {
sAllowedSuccessCodes[i] = r
}
p.SetAllowedSuccessCodes(sAllowedSuccessCodes)
return p
}
// PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectToProto converts a PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObjectToProto(o *osconfig.PatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject) *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepLinuxExecStepConfigGcsObject{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGenerationNumber(dcl.ValueOrEmptyInt64(o.GenerationNumber))
return p
}
// PatchDeploymentPatchConfigPostStepWindowsExecStepConfigToProto converts a PatchDeploymentPatchConfigPostStepWindowsExecStepConfig object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigToProto(o *osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfig) *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfig{}
p.SetLocalPath(dcl.ValueOrEmptyString(o.LocalPath))
p.SetGcsObject(OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectToProto(o.GcsObject))
p.SetInterpreter(OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigInterpreterEnumToProto(o.Interpreter))
sAllowedSuccessCodes := make([]int64, len(o.AllowedSuccessCodes))
for i, r := range o.AllowedSuccessCodes {
sAllowedSuccessCodes[i] = r
}
p.SetAllowedSuccessCodes(sAllowedSuccessCodes)
return p
}
// PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectToProto converts a PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject object to its proto representation.
func OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObjectToProto(o *osconfig.PatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject) *osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentPatchConfigPostStepWindowsExecStepConfigGcsObject{}
p.SetBucket(dcl.ValueOrEmptyString(o.Bucket))
p.SetObject(dcl.ValueOrEmptyString(o.Object))
p.SetGenerationNumber(dcl.ValueOrEmptyInt64(o.GenerationNumber))
return p
}
// PatchDeploymentOneTimeScheduleToProto converts a PatchDeploymentOneTimeSchedule object to its proto representation.
func OsconfigPatchDeploymentOneTimeScheduleToProto(o *osconfig.PatchDeploymentOneTimeSchedule) *osconfigpb.OsconfigPatchDeploymentOneTimeSchedule {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentOneTimeSchedule{}
p.SetExecuteTime(dcl.ValueOrEmptyString(o.ExecuteTime))
return p
}
// PatchDeploymentRecurringScheduleToProto converts a PatchDeploymentRecurringSchedule object to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleToProto(o *osconfig.PatchDeploymentRecurringSchedule) *osconfigpb.OsconfigPatchDeploymentRecurringSchedule {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRecurringSchedule{}
p.SetTimeZone(OsconfigPatchDeploymentRecurringScheduleTimeZoneToProto(o.TimeZone))
p.SetStartTime(dcl.ValueOrEmptyString(o.StartTime))
p.SetEndTime(dcl.ValueOrEmptyString(o.EndTime))
p.SetTimeOfDay(OsconfigPatchDeploymentRecurringScheduleTimeOfDayToProto(o.TimeOfDay))
p.SetFrequency(OsconfigPatchDeploymentRecurringScheduleFrequencyEnumToProto(o.Frequency))
p.SetWeekly(OsconfigPatchDeploymentRecurringScheduleWeeklyToProto(o.Weekly))
p.SetMonthly(OsconfigPatchDeploymentRecurringScheduleMonthlyToProto(o.Monthly))
p.SetLastExecuteTime(dcl.ValueOrEmptyString(o.LastExecuteTime))
p.SetNextExecuteTime(dcl.ValueOrEmptyString(o.NextExecuteTime))
return p
}
// PatchDeploymentRecurringScheduleTimeZoneToProto converts a PatchDeploymentRecurringScheduleTimeZone object to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleTimeZoneToProto(o *osconfig.PatchDeploymentRecurringScheduleTimeZone) *osconfigpb.OsconfigPatchDeploymentRecurringScheduleTimeZone {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRecurringScheduleTimeZone{}
p.SetId(dcl.ValueOrEmptyString(o.Id))
p.SetVersion(dcl.ValueOrEmptyString(o.Version))
return p
}
// PatchDeploymentRecurringScheduleTimeOfDayToProto converts a PatchDeploymentRecurringScheduleTimeOfDay object to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleTimeOfDayToProto(o *osconfig.PatchDeploymentRecurringScheduleTimeOfDay) *osconfigpb.OsconfigPatchDeploymentRecurringScheduleTimeOfDay {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRecurringScheduleTimeOfDay{}
p.SetHours(dcl.ValueOrEmptyInt64(o.Hours))
p.SetMinutes(dcl.ValueOrEmptyInt64(o.Minutes))
p.SetSeconds(dcl.ValueOrEmptyInt64(o.Seconds))
p.SetNanos(dcl.ValueOrEmptyInt64(o.Nanos))
return p
}
// PatchDeploymentRecurringScheduleWeeklyToProto converts a PatchDeploymentRecurringScheduleWeekly object to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleWeeklyToProto(o *osconfig.PatchDeploymentRecurringScheduleWeekly) *osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeekly {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRecurringScheduleWeekly{}
p.SetDayOfWeek(OsconfigPatchDeploymentRecurringScheduleWeeklyDayOfWeekEnumToProto(o.DayOfWeek))
return p
}
// PatchDeploymentRecurringScheduleMonthlyToProto converts a PatchDeploymentRecurringScheduleMonthly object to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleMonthlyToProto(o *osconfig.PatchDeploymentRecurringScheduleMonthly) *osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthly {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthly{}
p.SetWeekDayOfMonth(OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthToProto(o.WeekDayOfMonth))
p.SetMonthDay(dcl.ValueOrEmptyInt64(o.MonthDay))
return p
}
// PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthToProto converts a PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth object to its proto representation.
func OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthToProto(o *osconfig.PatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth) *osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonth{}
p.SetWeekOrdinal(dcl.ValueOrEmptyInt64(o.WeekOrdinal))
p.SetDayOfWeek(OsconfigPatchDeploymentRecurringScheduleMonthlyWeekDayOfMonthDayOfWeekEnumToProto(o.DayOfWeek))
return p
}
// PatchDeploymentRolloutToProto converts a PatchDeploymentRollout object to its proto representation.
func OsconfigPatchDeploymentRolloutToProto(o *osconfig.PatchDeploymentRollout) *osconfigpb.OsconfigPatchDeploymentRollout {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRollout{}
p.SetMode(OsconfigPatchDeploymentRolloutModeEnumToProto(o.Mode))
p.SetDisruptionBudget(OsconfigPatchDeploymentRolloutDisruptionBudgetToProto(o.DisruptionBudget))
return p
}
// PatchDeploymentRolloutDisruptionBudgetToProto converts a PatchDeploymentRolloutDisruptionBudget object to its proto representation.
func OsconfigPatchDeploymentRolloutDisruptionBudgetToProto(o *osconfig.PatchDeploymentRolloutDisruptionBudget) *osconfigpb.OsconfigPatchDeploymentRolloutDisruptionBudget {
if o == nil {
return nil
}
p := &osconfigpb.OsconfigPatchDeploymentRolloutDisruptionBudget{}
p.SetFixed(dcl.ValueOrEmptyInt64(o.Fixed))
p.SetPercent(dcl.ValueOrEmptyInt64(o.Percent))
return p
}
// PatchDeploymentToProto converts a PatchDeployment resource to its proto representation.
func PatchDeploymentToProto(resource *osconfig.PatchDeployment) *osconfigpb.OsconfigPatchDeployment {
p := &osconfigpb.OsconfigPatchDeployment{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetInstanceFilter(OsconfigPatchDeploymentInstanceFilterToProto(resource.InstanceFilter))
p.SetPatchConfig(OsconfigPatchDeploymentPatchConfigToProto(resource.PatchConfig))
p.SetDuration(dcl.ValueOrEmptyString(resource.Duration))
p.SetOneTimeSchedule(OsconfigPatchDeploymentOneTimeScheduleToProto(resource.OneTimeSchedule))
p.SetRecurringSchedule(OsconfigPatchDeploymentRecurringScheduleToProto(resource.RecurringSchedule))
p.SetCreateTime(dcl.ValueOrEmptyString(resource.CreateTime))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetLastExecuteTime(dcl.ValueOrEmptyString(resource.LastExecuteTime))
p.SetRollout(OsconfigPatchDeploymentRolloutToProto(resource.Rollout))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
return p
}
// applyPatchDeployment handles the gRPC request by passing it to the underlying PatchDeployment Apply() method.
func (s *PatchDeploymentServer) applyPatchDeployment(ctx context.Context, c *osconfig.Client, request *osconfigpb.ApplyOsconfigPatchDeploymentRequest) (*osconfigpb.OsconfigPatchDeployment, error) {
p := ProtoToPatchDeployment(request.GetResource())
res, err := c.ApplyPatchDeployment(ctx, p)
if err != nil {
return nil, err
}
r := PatchDeploymentToProto(res)
return r, nil
}
// applyOsconfigPatchDeployment handles the gRPC request by passing it to the underlying PatchDeployment Apply() method.
func (s *PatchDeploymentServer) ApplyOsconfigPatchDeployment(ctx context.Context, request *osconfigpb.ApplyOsconfigPatchDeploymentRequest) (*osconfigpb.OsconfigPatchDeployment, error) {
cl, err := createConfigPatchDeployment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyPatchDeployment(ctx, cl, request)
}
// DeletePatchDeployment handles the gRPC request by passing it to the underlying PatchDeployment Delete() method.
func (s *PatchDeploymentServer) DeleteOsconfigPatchDeployment(ctx context.Context, request *osconfigpb.DeleteOsconfigPatchDeploymentRequest) (*emptypb.Empty, error) {
cl, err := createConfigPatchDeployment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeletePatchDeployment(ctx, ProtoToPatchDeployment(request.GetResource()))
}
// ListOsconfigPatchDeployment handles the gRPC request by passing it to the underlying PatchDeploymentList() method.
func (s *PatchDeploymentServer) ListOsconfigPatchDeployment(ctx context.Context, request *osconfigpb.ListOsconfigPatchDeploymentRequest) (*osconfigpb.ListOsconfigPatchDeploymentResponse, error) {
cl, err := createConfigPatchDeployment(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListPatchDeployment(ctx, request.GetProject())
if err != nil {
return nil, err
}
var protos []*osconfigpb.OsconfigPatchDeployment
for _, r := range resources.Items {
rp := PatchDeploymentToProto(r)
protos = append(protos, rp)
}
p := &osconfigpb.ListOsconfigPatchDeploymentResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigPatchDeployment(ctx context.Context, service_account_file string) (*osconfig.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return osconfig.NewClient(conf), nil
}
|
package mongomodel
import (
"time"
)
type LocationModel struct {
View *DailyLocationView
Typemap map[string]string
}
func NewLocationModel(date time.Time) *LocationModel {
locationmodel := LocationModel{
View: newDailyLocationView(date),
Typemap: make(map[string]string),
}
locationmodel.Typemap["北京市"] = "北京"
locationmodel.Typemap["天津市"] = "天津"
locationmodel.Typemap["河北省"] = "河北"
locationmodel.Typemap["山西省"] = "山西"
locationmodel.Typemap["内蒙古自治区"] = "内蒙古"
locationmodel.Typemap["辽宁省"] = "辽宁"
locationmodel.Typemap["吉林省"] = "吉林"
locationmodel.Typemap["黑龙江省"] = "黑龙江"
locationmodel.Typemap["上海市"] = "上海"
locationmodel.Typemap["江苏省"] = "江苏"
locationmodel.Typemap["浙江省"] = "浙江"
locationmodel.Typemap["安徽省"] = "安徽"
locationmodel.Typemap["福建省"] = "福建"
locationmodel.Typemap["江西省"] = "江西"
locationmodel.Typemap["山东省"] = "山东"
locationmodel.Typemap["河南省"] = "河南"
locationmodel.Typemap["湖北省"] = "湖北"
locationmodel.Typemap["湖南省"] = "湖南"
locationmodel.Typemap["广东省"] = "广东"
locationmodel.Typemap["广西壮族自治区"] = "广西"
locationmodel.Typemap["海南省"] = "海南"
locationmodel.Typemap["重庆市"] = "重庆"
locationmodel.Typemap["四川省"] = "四川"
locationmodel.Typemap["贵州省"] = "贵州"
locationmodel.Typemap["云南省"] = "云南"
locationmodel.Typemap["西藏自治区"] = "西藏"
locationmodel.Typemap["陕西省"] = "陕西"
locationmodel.Typemap["甘肃省"] = "甘肃"
locationmodel.Typemap["青海省"] = "青海"
locationmodel.Typemap["宁夏回族自治区"] = "宁夏"
locationmodel.Typemap["新疆维吾尔自治区"] = "新疆"
locationmodel.Typemap["台湾省"] = "台湾"
locationmodel.Typemap["香港特别行政区"] = "香港"
locationmodel.Typemap["澳门特别行政区"] = "澳门"
return &locationmodel
}
func (this *LocationModel) AddChinaView(index string, value int) {
this.View.ViewData = append(this.View.ViewData, item{Key: this.Typemap[index], Value: value})
}
func (this *LocationModel) AddGlobalView(index string, value int) {
this.View.ViewData = append(this.View.ViewData, item{Key: index, Value: value})
}
type DailyLocationView struct {
Date time.Time
ViewData []item
}
func newDailyLocationView(date time.Time) *DailyLocationView {
return &DailyLocationView{
Date: date,
}
}
|
package main
import (
"fmt"
"github.com/kizzie/go-teampasswordmanager/teampasswordmanager"
)
func main() {
config := teampasswordmanager.ClientConfig{
BaseURL: "http://localhost/teampasswordmanager",
AuthToken: "a2F0OnBhc3N3b3Jk",
}
client, _ := teampasswordmanager.NewClient(&config)
fmt.Println(client.GetPasswordList())
password, _ := client.GetPassword(1)
fmt.Println(password)
fmt.Println(password.CustomFields())
// fmt.Println(client.GetPassword(2))
// fmt.Println(client.GetPasswordByName("foo", "bar"))
fmt.Println(password.CustomField("service_username"))
fmt.Println(password.CustomField("service_password"))
}
|
package grpc
import (
"fmt"
"log"
"net"
"github.com/charlesfan/go-grpc/pb"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
const Protocol string = "tcp"
func Run(port string) {
fmt.Printf("[gRPC test] gRPC start service with %s on %s\n", Protocol, port)
lis, err := net.Listen(Protocol, port)
if err != nil {
log.Fatalf("gRPC binding port failed: %v", err)
}
// Create gRPC Server and regist Service
s := grpc.NewServer()
pb.RegisterLoginServer(s, &login{})
reflection.Register(s)
// Start
if err := s.Serve(lis); err != nil {
log.Fatalf("gRPC run error: %v", err)
}
}
|
package pci
import (
"fmt"
"runtime"
"apic"
"defs"
)
type pciide_disk_t struct {
rbase uintptr
allstat uintptr
bmaster uintptr
}
func attach_3400(vendorid, devid int, tag Pcitag_t) {
if Disk != nil {
panic("adding two disks")
}
gsi := pci_disk_interrupt_wiring(tag)
IRQ_DISK = gsi
INT_DISK = defs.IRQ_BASE + IRQ_DISK
d := &pciide_disk_t{}
// 3400's PCI-native IDE command/control block
rbase := pci_bar_pio(tag, 0)
allstats := pci_bar_pio(tag, 1)
busmaster := pci_bar_pio(tag, 4)
d.init(rbase, allstats, busmaster)
Disk = d
fmt.Printf("3400: base %#x, cntrl: %#x, bm: %#x, irq: %d\n", rbase,
allstats, busmaster, gsi)
}
func (d *pciide_disk_t) init(base, allst, busmaster uintptr) {
d.rbase = base
d.allstat = allst
d.bmaster = busmaster
ide_init(d.rbase)
}
func (d *pciide_disk_t) Start(ibuf *Idebuf_t, writing bool) {
ide_start(d.rbase, d.allstat, ibuf, writing)
}
func (d *pciide_disk_t) Complete(dst []uint8, writing bool) {
ide_complete(d.rbase, dst, writing)
}
func (d *pciide_disk_t) Intr() bool {
streg := uint16(d.bmaster + 0x02)
bmintr := uint(1 << 2)
st := runtime.Inb(streg)
if st&bmintr == 0 {
return false
}
return true
}
func (d *pciide_disk_t) Int_clear() {
// read status so disk clears int
runtime.Inb(uint16(d.rbase + 7))
runtime.Inb(uint16(d.rbase + 7))
// in PCI-native mode, clear the interrupt via the legacy bus master
// base, bar 4.
streg := uint16(d.bmaster + 0x02)
st := runtime.Inb(streg)
er := uint(1 << 1)
if st&er != 0 {
panic("disk error")
}
runtime.Outb(streg, uint8(st))
// and via apic
apic.Apic.Irq_unmask(IRQ_DISK)
// irq_eoi(IRQ_DISK)
}
|
package config
import (
"github.com/jinzhu/gorm"
)
type Database struct {
Driver string
Host string
Port string
Database string
Username string
Password string
SslMode string
}
func (this *Database) Connect() *gorm.DB {
var dsn string
switch driver := this.Driver; { // missing expression means "true"
case driver == "postgres":
dsn = "host=" + this.Host +
" port=" + this.Port +
" user=" + this.Username +
" password=" + this.Password +
" dbname=" + this.Database +
" sslmode=" + this.SslMode
default:
panic("DB driver selected was unsupported")
}
db, err := gorm.Open(this.Driver, dsn)
if err != nil {
panic("failed to connect database")
}
defer db.Close()
return db
}
|
package biliLiveHelper
import (
"github.com/bitly/go-simplejson"
"math"
"sync"
)
const (
abortIndex = math.MaxInt8 / 2
)
type Context struct {
Cmd CmdType
Msg *simplejson.Json
keys map[string]interface{}
keysMutex *sync.RWMutex
handlers HandleChain
index int8
}
func NewContext(cmdType CmdType, msg *simplejson.Json) *Context {
return &Context{
Cmd: cmdType,
Msg: msg,
index: -1,
}
}
func (c *Context) Set(key string, value interface{}) {
if c.keysMutex == nil {
c.keysMutex = &sync.RWMutex{}
}
c.keysMutex.Lock()
if c.keys == nil {
c.keys = make(map[string]interface{})
}
c.keys[key] = value
c.keysMutex.Unlock()
}
func (c *Context) Get(key string) (value interface{}, exists bool) {
if c.keysMutex == nil {
c.keysMutex = &sync.RWMutex{}
}
c.keysMutex.RLock()
value, exists = c.keys[key]
c.keysMutex.RUnlock()
return
}
func (c *Context) Next() {
c.index++
for c.index < int8(len(c.handlers)) {
c.handlers[c.index].Handle(c)
c.index++
}
}
func (c *Context) Abort() {
c.index = abortIndex
}
func (c *Context) IsAbort() bool {
return c.index >= abortIndex
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spool
// Option represents the optional function.
type Option func(opts *Options)
func loadOptions(options ...Option) *Options {
opts := DefaultOption()
for _, option := range options {
option(opts)
}
return opts
}
// Options contains all options which will be applied when instantiating an pool.
type Options struct {
Blocking bool
}
// DefaultOption is the default option.
func DefaultOption() *Options {
return &Options{
Blocking: true,
}
}
// WithBlocking indicates whether the pool is blocking.
func WithBlocking(blocking bool) Option {
return func(opts *Options) {
opts.Blocking = blocking
}
}
|
package relation
import (
"fmt"
"log"
"strconv"
"github.com/ChowRobin/fantim/constant"
"github.com/ChowRobin/fantim/constant/status"
"github.com/ChowRobin/fantim/model/po"
"github.com/ChowRobin/fantim/model/vo"
"github.com/gin-gonic/gin"
)
func ListApply(c *gin.Context) interface{} {
resp := &vo.RelationApplyListResponse{}
// 校验参数合法性
userId := c.GetInt64("user_id")
fromUserId, _ := strconv.ParseInt(c.Query("from_user_id"), 10, 64)
applyType, _ := strconv.Atoi(c.Query("apply_type"))
page, _ := strconv.Atoi(c.Query("page"))
pageSize, _ := strconv.Atoi(c.Query("page_size"))
queryStatusStr := c.QueryArray("status")
var queryStatus []int32
for _, statusStr := range queryStatusStr {
if s, err := strconv.Atoi(statusStr); err == nil {
queryStatus = append(queryStatus, int32(s))
}
}
// 参数校验
if page == 0 || pageSize == 0 || pageSize > 100 {
return status.FillResp(resp, status.ErrInvalidParam)
}
var toIds []int64
if fromUserId != 0 {
// 不允许查其他人的申请列表
if fromUserId != userId {
return status.FillResp(resp, status.ErrInvalidParam)
}
} else {
if applyType == 1 { // 好友申请
toIds = append(toIds, userId)
} else if applyType == 2 { // 加群申请
groups, err := po.ListGroupByCondition(c, userId, []int32{2})
if err != nil {
log.Printf("[ListApply] ListGroupByCondition failed. err=%v", err)
return status.FillResp(resp, status.ErrServiceInternal)
}
for _, g := range groups {
toIds = append(toIds, g.GroupId)
}
}
}
// 查询总数
totalNum, err := po.CountUserRelationApplyPageByCondition(c, fromUserId, toIds, queryStatus, int32(applyType))
if err != nil {
log.Printf("[ListApply] po.CountUserRelationApplyPageByCondition failed. err=%v", err)
return status.FillResp(resp, status.ErrServiceInternal)
}
if totalNum == 0 {
return status.FillResp(resp, status.Success)
}
if (page-1)*pageSize >= int(totalNum) {
return status.FillResp(resp, status.ErrInvalidPageParam)
}
// 分页查询记录
applyPoList, err := po.ListUserRelationApplyPageByCondition(c, fromUserId, toIds, queryStatus, int32(applyType), int32(page), int32(pageSize))
if err != nil {
log.Printf("[ListApply] po.ListUserRelationApplyPageByCondition failed. err=%v", err)
return status.FillResp(resp, status.ErrServiceInternal)
}
groupMap := make(map[int64]*vo.GroupInfo)
if len(applyPoList) > 0 && applyType == constant.RelationApplyTypeGroup {
reqGroupIds := make([]int64, 0, len(applyPoList))
for _, apply := range applyPoList {
reqGroupIds = append(reqGroupIds, apply.ToUserId)
}
groupsPo, err := po.MultiGetGroup(c, reqGroupIds)
if err != nil {
log.Printf("[ListApply] po.MultiGetGroup failed. err=%v", err)
return status.FillResp(resp, status.ErrServiceInternal)
}
for _, g := range groupsPo {
groupMap[g.GroupId] = &vo.GroupInfo{
GroupId: g.GroupId,
OwnerUid: g.OwnerId,
Name: g.Name,
Avatar: g.Avatar,
Description: g.Description,
GroupIdStr: fmt.Sprintf("%d", g.GroupId),
}
}
}
for _, applyPo := range applyPoList {
applyVo := &vo.RelationApply{
FromUserId: applyPo.FromUserId,
ToUserId: applyPo.ToUserId,
ApplyType: int32(applyPo.ApplyType),
Status: int32(applyPo.Status),
Content: applyPo.Content,
ApplyId: applyPo.Id,
}
if applyType == constant.RelationApplyTypeGroup {
applyVo.GroupInfo = groupMap[applyPo.ToUserId]
}
resp.ApplyList = append(resp.ApplyList, applyVo)
}
resp.TotalNum = totalNum
return status.FillResp(resp, status.Success)
}
|
/*
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package primitive
import (
"bytes"
"sync"
)
var headerPool = sync.Pool{}
var bufferPool = sync.Pool{}
func init() {
headerPool.New = func() interface{} {
b := make([]byte, 4)
return &b
}
bufferPool.New = func() interface{} {
return new(bytes.Buffer)
}
}
func GetHeader() *[]byte {
d := headerPool.Get().(*[]byte)
//d = (d)[:0]
return d
}
func BackHeader(d *[]byte) {
headerPool.Put(d)
}
func GetBuffer() *bytes.Buffer {
b := bufferPool.Get().(*bytes.Buffer)
b.Reset()
return b
}
func BackBuffer(b *bytes.Buffer) {
b.Reset()
bufferPool.Put(b)
}
|
package domain
type DummyUser struct {
Username string `json:"username"`
Avatar string `json:"avatar"`
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.