text stringlengths 11 4.05M |
|---|
package models
import "github.com/astaxie/beego/orm"
type Status struct {
Id int
Name string `orm:"unique"`
}
func (s *Status) Insert() error {
if _, err := orm.NewOrm().Insert(s); err != nil {
return err
}
return nil
}
func (s *Status) Read(fields ...string) error {
if err := orm.NewOrm().Read(s, fields...); err != nil {
return err
}
return nil
}
func (s *Status) ReadOrCreate(field string, fields ...string) (bool, int64, error) {
return orm.NewOrm().ReadOrCreate(s, field, fields...)
}
func (s *Status) Update(fields ...string) error {
if _, err := orm.NewOrm().Update(s, fields...); err != nil {
return err
}
return nil
}
func (s *Status) Delete() error {
if _, err := orm.NewOrm().Delete(s); err != nil {
return err
}
return nil
}
func Statuses() orm.QuerySeter {
var table Status
return orm.NewOrm().QueryTable(table).OrderBy("-Id")
}
func init() {
orm.RegisterModel(new(Status))
}
|
package database
import (
"context"
"fmt"
"log"
"os"
"github.com/jackc/pgx/v4/pgxpool"
)
const errMsgConnection = "The environment variable '%s' is not defined, it is required to establish a connection with the database"
var dBVariables = [...]string{"CTIPO_DB_NAME", "CTIPO_DB_HOST", "CTIPO_DB_USERNAME", "CTIPO_DB_PASSWORD"}
var dBConnection *pgxpool.Pool
func GetDBConnection() *pgxpool.Pool {
if dBConnection != nil {
return dBConnection
}
dBConnection = configToConnection(dBVariables)
return dBConnection
}
func configToConnection(envNameList [4]string) *pgxpool.Pool {
var dBVariables [4]string
for i, envVariable := range envNameList {
if variable := os.Getenv(envVariable); variable == "" {
log.Fatalf(errMsgConnection, envNameList[i])
} else {
dBVariables[i] = variable
}
}
return createDBConnection(dBVariables[0], dBVariables[1], dBVariables[2], dBVariables[3])
}
func createDBConnection(dbName, host, username, password string) *pgxpool.Pool {
config, err := pgxpool.ParseConfig(fmt.Sprintf("user=%v password=%v host=%v port=5432 dbname=%v pool_max_conns=10", username, password, host, dbName))
if err != nil {
log.Fatal("error configuring the database: ", err)
}
conn, err := pgxpool.ConnectConfig(context.Background(), config)
if err != nil {
log.Fatal("error connecting to the database: ", err)
}
return conn
}
|
package app
import "github.com/thoohv5/template/internal/pkg/config"
type IApp interface {
GetConfig() config.IConfig
Run(addr ...string) error
}
|
package main
import (
"bufio"
"fmt"
"os"
)
func main() {
counts := make(map[string]int)
files := os.Args[1:]
if len (files) == 0 {
countLines(os.Stdin, counts)
} else {
for _, arg := range files {
f, err := os.Open(arg) /*os.Open returns two values. First - opened
file (*os.File). File will be readded by Scanner
Second - value of the embedded type "error"*/
if err != nil { //if err == nil, file was opened correctly
fmt.Fprintf(os.Stderr, "dup2: %v\n", err)//Error message
continue
}
countLines (f, counts) //to the next file
f.Close() //The file was read. File closed, all reourses released
}
}
for line, n := range counts {
if n > 1 {
fmt.Printf("%d\t%s\n", n, line)
}
}
}
func countLines(f *os.File, counts map[string]int) {
input := bufio.NewScanner(f)
for input.Scan() {
counts[input.Text()]++
}
}
//cat 1.txt 2.txt | go run findDuplicateStrings2.go
|
// Copyright 2013-2014 go-diameter authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package diamtype
import "fmt"
// Grouped Diameter Type
type Grouped []byte
func DecodeGrouped(b []byte) (DataType, error) {
return Grouped(b), nil
}
func (g Grouped) Serialize() []byte {
return g
}
func (g Grouped) Len() int {
return len(g)
}
func (g Grouped) Padding() int {
return 0
}
func (g Grouped) Type() DataTypeId {
return GroupedType
}
func (g Grouped) String() string {
return fmt.Sprint("Grouped{...}")
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"fmt"
"hash"
"hash/fnv"
"sync"
"testing"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/hack"
"github.com/pingcap/tidb/util/memory"
"github.com/pingcap/tidb/util/mock"
"github.com/stretchr/testify/require"
)
func initBuildChunk(numRows int) (*chunk.Chunk, []*types.FieldType) {
numCols := 6
colTypes := make([]*types.FieldType, 0, numCols)
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeJSON).BuildP())
oldChk := chunk.NewChunkWithCapacity(colTypes, numRows)
for i := 0; i < numRows; i++ {
str := fmt.Sprintf("%d.12345", i)
oldChk.AppendNull(0)
oldChk.AppendInt64(1, int64(i))
oldChk.AppendString(2, str)
oldChk.AppendString(3, str)
oldChk.AppendMyDecimal(4, types.NewDecFromStringForTest(str))
oldChk.AppendJSON(5, types.CreateBinaryJSON(str))
}
return oldChk, colTypes
}
func initProbeChunk(numRows int) (*chunk.Chunk, []*types.FieldType) {
numCols := 3
colTypes := make([]*types.FieldType, 0, numCols)
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeLonglong).BuildP())
colTypes = append(colTypes, types.NewFieldTypeBuilder().SetType(mysql.TypeVarchar).BuildP())
oldChk := chunk.NewChunkWithCapacity(colTypes, numRows)
for i := 0; i < numRows; i++ {
str := fmt.Sprintf("%d.12345", i)
oldChk.AppendNull(0)
oldChk.AppendInt64(1, int64(i))
oldChk.AppendString(2, str)
}
return oldChk, colTypes
}
type hashCollision struct {
count int
}
func (h *hashCollision) Sum64() uint64 {
h.count++
return 0
}
func (h hashCollision) Write(p []byte) (n int, err error) { return len(p), nil }
func (h hashCollision) Reset() {}
func (h hashCollision) Sum(b []byte) []byte { panic("not implemented") }
func (h hashCollision) Size() int { panic("not implemented") }
func (h hashCollision) BlockSize() int { panic("not implemented") }
func TestHashRowContainer(t *testing.T) {
hashFunc := fnv.New64
rowContainer, copiedRC := testHashRowContainer(t, hashFunc, false)
require.Equal(t, int64(0), rowContainer.stat.probeCollision)
// On windows time.Now() is imprecise, the elapse time may equal 0
require.True(t, rowContainer.stat.buildTableElapse >= 0)
require.Equal(t, rowContainer.stat.probeCollision, copiedRC.stat.probeCollision)
require.Equal(t, rowContainer.stat.buildTableElapse, copiedRC.stat.buildTableElapse)
rowContainer, copiedRC = testHashRowContainer(t, hashFunc, true)
require.Equal(t, int64(0), rowContainer.stat.probeCollision)
require.True(t, rowContainer.stat.buildTableElapse >= 0)
require.Equal(t, rowContainer.stat.probeCollision, copiedRC.stat.probeCollision)
require.Equal(t, rowContainer.stat.buildTableElapse, copiedRC.stat.buildTableElapse)
h := &hashCollision{count: 0}
hashFuncCollision := func() hash.Hash64 {
return h
}
rowContainer, copiedRC = testHashRowContainer(t, hashFuncCollision, false)
require.True(t, h.count > 0)
require.True(t, rowContainer.stat.probeCollision > int64(0))
require.True(t, rowContainer.stat.buildTableElapse >= 0)
require.Equal(t, rowContainer.stat.probeCollision, copiedRC.stat.probeCollision)
require.Equal(t, rowContainer.stat.buildTableElapse, copiedRC.stat.buildTableElapse)
}
func testHashRowContainer(t *testing.T, hashFunc func() hash.Hash64, spill bool) (originRC, copiedRC *hashRowContainer) {
sctx := mock.NewContext()
var err error
numRows := 10
chk0, colTypes := initBuildChunk(numRows)
chk1, _ := initBuildChunk(numRows)
hCtx := &hashContext{
allTypes: colTypes[1:3],
keyColIdx: []int{1, 2},
}
hCtx.hasNull = make([]bool, numRows)
for i := 0; i < numRows; i++ {
hCtx.hashVals = append(hCtx.hashVals, hashFunc())
}
rowContainer := newHashRowContainer(sctx, hCtx, colTypes)
copiedRC = rowContainer.ShallowCopy()
tracker := rowContainer.GetMemTracker()
tracker.SetLabel(memory.LabelForBuildSideResult)
if spill {
tracker.SetBytesLimit(1)
rowContainer.rowContainer.ActionSpillForTest().Action(tracker)
}
err = rowContainer.PutChunk(chk0, nil)
require.NoError(t, err)
err = rowContainer.PutChunk(chk1, nil)
require.NoError(t, err)
rowContainer.ActionSpill().(*chunk.SpillDiskAction).WaitForTest()
require.Equal(t, spill, rowContainer.alreadySpilledSafeForTest())
require.Equal(t, spill, rowContainer.rowContainer.GetMemTracker().BytesConsumed() == 0)
require.Equal(t, !spill, rowContainer.rowContainer.GetMemTracker().BytesConsumed() > 0)
require.True(t, rowContainer.GetMemTracker().BytesConsumed() > 0) // hashtable need memory
if rowContainer.alreadySpilledSafeForTest() {
require.NotNil(t, rowContainer.GetDiskTracker())
require.True(t, rowContainer.GetDiskTracker().BytesConsumed() > 0)
}
probeChk, probeColType := initProbeChunk(2)
probeRow := probeChk.GetRow(1)
probeCtx := &hashContext{
allTypes: probeColType[1:3],
keyColIdx: []int{1, 2},
}
probeCtx.hasNull = make([]bool, 1)
probeCtx.hashVals = append(hCtx.hashVals, hashFunc())
matched, _, err := rowContainer.GetMatchedRowsAndPtrs(hCtx.hashVals[1].Sum64(), probeRow, probeCtx, nil, nil, false)
require.NoError(t, err)
require.Equal(t, 2, len(matched))
require.Equal(t, chk0.GetRow(1).GetDatumRow(colTypes), matched[0].GetDatumRow(colTypes))
require.Equal(t, chk1.GetRow(1).GetDatumRow(colTypes), matched[1].GetDatumRow(colTypes))
return rowContainer, copiedRC
}
func TestConcurrentMapHashTableMemoryUsage(t *testing.T) {
m := newConcurrentMapHashTable()
const iterations = 1024 * hack.LoadFactorNum / hack.LoadFactorDen // 6656
wg := &sync.WaitGroup{}
wg.Add(2)
// Note: Now concurrentMapHashTable doesn't support inserting in parallel.
for i := 0; i < iterations; i++ {
// Add entry to map.
m.Put(uint64(i*ShardCount), chunk.RowPtr{ChkIdx: uint32(i), RowIdx: uint32(i)})
}
mapMemoryExpected := int64(1024) * hack.DefBucketMemoryUsageForMapIntToPtr
entryMemoryExpected := 16 * int64(64+128+256+512+1024+2048+4096)
require.Equal(t, mapMemoryExpected+entryMemoryExpected, m.GetAndCleanMemoryDelta())
require.Equal(t, int64(0), m.GetAndCleanMemoryDelta())
}
|
package main
import (
"context"
"fmt"
"net/http"
"os"
"os/signal"
"time"
"github.com/gorilla/mux"
"github.com/hrishin/pokemon-shakespeare/pkg/pokemon"
"github.com/op/go-logging"
)
var log = logging.MustGetLogger("pokemon")
func main() {
wait := time.Second * 15
port := 5000
r := mux.NewRouter()
r.HandleFunc("/pokemon/{name}", pokemon.GetDescriptionHandler)
srv := &http.Server{
Addr: fmt.Sprintf("0.0.0.0:%d", port),
Handler: r,
}
// Run our server in a goroutine so that it doesn't block.
go func() {
log.Infof("starting the server and listening on port %d", port)
if err := srv.ListenAndServe(); err != nil {
log.Error(err)
}
}()
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
<-ch
ctx, cancel := context.WithTimeout(context.Background(), wait)
defer cancel()
srv.Shutdown(ctx)
log.Info("stopping the server")
os.Exit(0)
}
|
package main
import (
"fmt"
)
func main() {
// panic produces a quick exit
panic("Jim, we have a problem.")
fmt.Println("You will not even see this line. The panic creates a fast fail.")
}
|
package main
import (
"bytes"
"flag"
"fmt"
"log"
"math/rand"
"os"
"sort"
"text/template"
"time"
"github.com/yanzay/tbot"
)
var local = flag.Bool("local", false, "Launch bot without webhook")
var dataFile = flag.String("data", "tamago.db", "Database file")
type application struct {
petStore *PetStorage
historyStore *PetStorage
client *tbot.Client
}
func main() {
flag.Parse()
app := &application{}
storage := NewStorage(*dataFile)
app.petStore = storage.PetStorage()
app.historyStore = storage.HistoryStorage()
app.resetPlays()
go app.gameStats()
defer storage.Close()
token := os.Getenv("TELEGRAM_TOKEN")
var bot *tbot.Server
if *local {
bot = tbot.New(token, tbot.WithLogger(tbot.BasicLogger{}))
} else {
bot = tbot.New(token,
tbot.WithWebhook("https://tamago.yanzay.com/"+token, "0.0.0.0:8014"))
}
app.client = bot.Client()
bot.Use(app.createPet)
bot.Use(app.sleep)
bot.HandleMessage(HomeButton, app.rootHandler)
bot.HandleMessage(FeedButton, app.feedHandler)
bot.HandleMessage(FoodPizza, app.fullMealHandler)
bot.HandleMessage(FoodMeat, app.fullMealHandler)
bot.HandleMessage(FoodSalad, app.smallMealHandler)
bot.HandleMessage(FoodPopcorn, app.smallMealHandler)
bot.HandleMessage(PlayButton, app.playHandler)
bot.HandleMessage(GameVideo, app.playGameHandler)
bot.HandleMessage(GameBoard, app.playGameHandler)
bot.HandleMessage(GameTennis, app.playGameHandler)
bot.HandleMessage(GameGuitar, app.playGameHandler)
bot.HandleMessage(HealButton, app.healHandler)
bot.HandleMessage(PillButton, app.pillHandler)
bot.HandleMessage(InjectionButton, app.injectionHandler)
bot.HandleMessage(SleepButton, app.sleepHandler)
bot.HandleMessage(Sleep5m, app.sleep5mHandler)
bot.HandleMessage(Sleep1h, app.sleep1hHandler)
bot.HandleMessage(Sleep8h, app.sleep8hHandler)
bot.HandleMessage(TopButton, app.topHandler)
bot.HandleMessage(AliveButton, app.topAliveHandler)
bot.HandleMessage(AllButton, app.topAllHandler)
bot.HandleMessage("", app.defaultHandler)
go app.mainLoop()
go app.sleepLoop()
log.Fatal(bot.Start())
}
func (app *application) resetPlays() {
pets := app.petStore.Alive()
for _, pet := range pets {
app.petStore.Update(pet.PlayerID, func(p *Pet) {
p.Play = false
})
}
}
func (app *application) defaultHandler(m *tbot.Message) {
app.client.SendMessage(m.Chat.ID, "hm?")
}
func (app *application) rootHandler(m *tbot.Message) {
pet := app.petStore.Get(m.Chat.ID)
content, err := contentFromTemplate(rootTemplate, pet)
if err != nil {
return
}
buttons := tbot.Buttons([][]string{
{HomeButton, FeedButton, PlayButton},
{HealButton, SleepButton, TopButton},
})
app.client.SendMessage(m.Chat.ID, content,
tbot.OptReplyKeyboardMarkup(buttons),
tbot.OptParseModeMarkdown)
}
func (app *application) feedHandler(m *tbot.Message) {
pet := app.petStore.Get(m.Chat.ID)
content, err := contentFromTemplate(feedTemplate, pet)
if err != nil {
return
}
buttons := tbot.Buttons([][]string{
{FoodSalad, FoodMeat},
{FoodPopcorn, FoodPizza},
{HomeButton},
})
app.client.SendMessage(m.Chat.ID, content,
tbot.OptReplyKeyboardMarkup(buttons),
tbot.OptParseModeMarkdown)
}
func (app *application) fullMealHandler(m *tbot.Message) {
message := "Om-nom-nom..."
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
if pet.Food == 200 {
message = "I can't eat more."
}
pet.Food += 10
if pet.Food > 200 {
pet.Food = 200
}
})
app.client.SendMessage(m.Chat.ID, message)
app.feedHandler(m)
}
func (app *application) smallMealHandler(m *tbot.Message) {
message := "Om-nom..."
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
if pet.Food == 200 {
message = "I can't eat more."
}
pet.Food += 5
if pet.Food > 200 {
pet.Food = 200
}
})
app.client.SendMessage(m.Chat.ID, message)
app.feedHandler(m)
}
func (app *application) playHandler(m *tbot.Message) {
pet := app.petStore.Get(m.Chat.ID)
content, err := contentFromTemplate(playTemplate, pet)
if err != nil {
return
}
buttons := tbot.Buttons([][]string{
{GameVideo, GameBoard},
{GameTennis, GameGuitar},
{HomeButton},
})
app.client.SendMessage(m.Chat.ID, content,
tbot.OptReplyKeyboardMarkup(buttons),
tbot.OptParseModeMarkdown)
}
func (app *application) playGameHandler(m *tbot.Message) {
pet := app.petStore.Get(m.Chat.ID)
if pet.Play {
app.client.SendMessage(m.Chat.ID, "You pet is already playing. Keep calm.")
return
}
pets := app.petStore.Alive()
randomPet := pets[rand.Intn(len(pets))]
if fmt.Sprint(randomPet.PlayerID) != m.Chat.ID {
app.client.SendMessage(m.Chat.ID,
fmt.Sprintf("Your pet started to play %s with %s", m.Text, randomPet.String()))
} else {
app.client.SendMessage(m.Chat.ID,
fmt.Sprintf("Your pet plays %s with himself.", m.Text))
}
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
pet.Play = true
})
time.Sleep(5 * time.Second)
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
pet.Play = false
if pet.Happy < 120 {
pet.XP += 100
}
pet.Happy += 10
if pet.Happy > 120 {
pet.Happy = 120
}
})
app.client.SendMessage(m.Chat.ID, "Weeeee! It was fun!")
}
func (app *application) healHandler(m *tbot.Message) {
pet := app.petStore.Get(m.Chat.ID)
content, err := contentFromTemplate(healTemplate, pet)
if err != nil {
return
}
buttons := tbot.Buttons([][]string{
{PillButton, InjectionButton},
{HomeButton},
})
app.client.SendMessage(m.Chat.ID, content,
tbot.OptReplyKeyboardMarkup(buttons),
tbot.OptParseModeMarkdown)
}
func (app *application) pillHandler(m *tbot.Message) {
pet := app.petStore.Get(m.Chat.ID)
if pet.Health == 100 {
app.client.SendMessage(m.Chat.ID, "I'm not sick!")
return
}
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
pet.Health += 40
pet.Happy -= 10
if pet.Health > 100 {
pet.Health = 100
}
if pet.Happy < 0 {
pet.Happy = 0
}
})
app.client.SendMessage(m.Chat.ID, "Ugh!")
app.healHandler(m)
}
func (app *application) injectionHandler(m *tbot.Message) {
pet := app.petStore.Get(m.Chat.ID)
if pet.Health == 100 {
app.client.SendMessage(m.Chat.ID, "I'm not sick!")
return
}
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
pet.Health = 100
if pet.Happy > 10 {
pet.Happy = 10
}
})
app.client.SendMessage(m.Chat.ID, "Ouch!")
app.healHandler(m)
}
func (app *application) sleepHandler(m *tbot.Message) {
buttons := tbot.Buttons([][]string{
{Sleep5m, Sleep1h, Sleep8h},
{HomeButton},
})
app.client.SendMessage(m.Chat.ID, "How much to sleep?",
tbot.OptReplyKeyboardMarkup(buttons),
tbot.OptParseModeMarkdown)
}
func (app *application) sleep5mHandler(m *tbot.Message) {
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
pet.Sleep = true
pet.AwakeTime = time.Now().Add(5 * time.Minute)
})
app.client.SendMessage(m.Chat.ID, "Zzz...")
}
func (app *application) sleep1hHandler(m *tbot.Message) {
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
pet.Sleep = true
pet.AwakeTime = time.Now().Add(1 * time.Hour)
})
app.client.SendMessage(m.Chat.ID, "Zzz...")
}
func (app *application) sleep8hHandler(m *tbot.Message) {
app.petStore.Update(m.Chat.ID, func(pet *Pet) {
pet.Sleep = true
pet.AwakeTime = time.Now().Add(8 * time.Hour)
})
app.client.SendMessage(m.Chat.ID, "Zzz...")
}
func (app *application) topHandler(m *tbot.Message) {
buttons := tbot.Buttons([][]string{
{AliveButton, AllButton},
{HomeButton},
})
app.client.SendMessage(m.Chat.ID, "Choose top",
tbot.OptReplyKeyboardMarkup(buttons))
}
func (app *application) topAliveHandler(m *tbot.Message) {
pets := app.petStore.Alive()
sort.Slice(pets, func(i, j int) bool {
return pets[i].XP > pets[j].XP
})
b := &bytes.Buffer{}
if len(pets) > 10 {
pets = pets[:10]
}
err := topTemplate.Execute(b, pets)
if err != nil {
log.Printf("Can't render topTemplate: %q", err)
}
content := "```\n" + b.String() + "```"
app.client.SendMessage(m.Chat.ID, content, tbot.OptParseModeMarkdown)
}
func (app *application) topAllHandler(m *tbot.Message) {
pets := app.petStore.Alive()
pets = append(pets, app.historyStore.All()...)
sort.Slice(pets, func(i, j int) bool {
return pets[i].XP > pets[j].XP
})
b := &bytes.Buffer{}
if len(pets) > 10 {
pets = pets[:10]
}
err := topTemplate.Execute(b, pets)
if err != nil {
log.Printf("Can't render topTemplate: %q", err)
}
content := "```\n" + b.String() + "```"
app.client.SendMessage(m.Chat.ID, content, tbot.OptParseModeMarkdown)
}
func contentFromTemplate(tpl *template.Template, pet *Pet) (string, error) {
b := &bytes.Buffer{}
err := tpl.Execute(b, pet)
if err != nil {
log.Printf("Can't render template %v: %q", tpl, err)
return "", err
}
return "```\n" + b.String() + "```", nil
}
func (app *application) gameStats() {
for {
pets := app.petStore.All()
alive := app.petStore.Alive()
log.Printf("Players: %d, alive: %d", len(pets), len(alive))
time.Sleep(60 * time.Second)
}
}
|
package main
import (
"fmt"
hyperclient "github.com/Cloud-Foundations/Dominator/hypervisor/client"
"github.com/Cloud-Foundations/Dominator/lib/errors"
"github.com/Cloud-Foundations/Dominator/lib/log"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
fm_proto "github.com/Cloud-Foundations/Dominator/proto/fleetmanager"
hyper_proto "github.com/Cloud-Foundations/Dominator/proto/hypervisor"
)
func netbootVmSubcommand(args []string, logger log.DebugLogger) error {
err := netbootVm(logger)
if err != nil {
return fmt.Errorf("error netbooting VM: %s", err)
}
return nil
}
func netbootVm(logger log.DebugLogger) error {
if len(subnetIDs) < 1 {
return errors.New("no subnetIDs specified")
}
fmCR := srpc.NewClientResource("tcp",
fmt.Sprintf("%s:%d", *fleetManagerHostname, *fleetManagerPortNum))
defer fmCR.ScheduleClose()
imageClient, err := srpc.DialHTTP("tcp", fmt.Sprintf("%s:%d",
*imageServerHostname, *imageServerPortNum), 0)
if err != nil {
return fmt.Errorf("%s: %s", *imageServerHostname, err)
}
defer imageClient.Close()
var hypervisorAddresses []string
if *hypervisorHostname != "" {
hypervisorAddresses = append(hypervisorAddresses,
fmt.Sprintf("%s:%d", *hypervisorHostname, *hypervisorPortNum))
} else {
hypervisorAddresses, err = listConnectedHypervisorsInLocation(fmCR,
*location)
if err != nil {
return err
}
}
if len(hypervisorAddresses) < 1 {
return errors.New("no nearby Hypervisors available")
}
logger.Debugf(0, "Selected %s as boot server on subnet: %s\n",
hypervisorAddresses[0], subnetIDs[0])
hyperCR := srpc.NewClientResource("tcp", hypervisorAddresses[0])
defer hyperCR.ScheduleClose()
client, err := hyperCR.GetHTTP(nil, 0)
if err != nil {
return err
}
defer client.Put()
hypervisorSubnets, err := hyperclient.ListSubnets(client, false)
if err != nil {
return err
}
subnetTable := make(map[string]hyper_proto.Subnet, len(hypervisorSubnets))
for _, subnet := range hypervisorSubnets {
subnetTable[subnet.Id] = subnet
}
var subnets []*hyper_proto.Subnet
for _, subnetId := range subnetIDs {
if subnet, ok := subnetTable[subnetId]; !ok {
return fmt.Errorf("subnet: %s not available on: %s",
subnetId, hypervisorAddresses[0])
} else {
subnets = append(subnets, &subnet)
}
}
info := fm_proto.GetMachineInfoResponse{Subnets: subnets}
createRequest := hyper_proto.CreateVmRequest{
DhcpTimeout: -1,
EnableNetboot: true,
MinimumFreeBytes: uint64(volumeSizes[0]),
VmInfo: hyper_proto.VmInfo{
ConsoleType: hyper_proto.ConsoleVNC,
Hostname: "netboot-test",
MemoryInMiB: uint64(memory >> 20),
MilliCPUs: 1000,
SecondarySubnetIDs: subnetIDs[1:],
SubnetId: subnetIDs[0],
},
}
if createRequest.VmInfo.MemoryInMiB < 1 {
createRequest.VmInfo.MemoryInMiB = 1024
}
for _, size := range volumeSizes[1:] {
createRequest.SecondaryVolumes = append(createRequest.SecondaryVolumes,
hyper_proto.Volume{Size: uint64(size)})
}
var createResponse hyper_proto.CreateVmResponse
err = hyperclient.CreateVm(client, createRequest, &createResponse, logger)
if err != nil {
return err
}
err = hyperclient.AcknowledgeVm(client, createResponse.IpAddress)
if err != nil {
return err
}
logger.Printf("created VM: %s\n", createResponse.IpAddress)
vmInfo, err := hyperclient.GetVmInfo(client, createResponse.IpAddress)
if err != nil {
return err
}
vncErrChannel := make(chan error, 1)
if *vncViewer == "" {
vncErrChannel <- nil
} else {
defer hyperclient.DestroyVm(client, createResponse.IpAddress, nil)
client, err := srpc.DialHTTP("tcp", hypervisorAddresses[0], 0)
if err != nil {
return err
}
go func() {
defer client.Close()
vncErrChannel <- hyperclient.ConnectToVmConsole(client,
vmInfo.Address.IpAddress, *vncViewer, logger)
}()
}
info.Machine.NetworkEntry = fm_proto.NetworkEntry{
Hostname: vmInfo.Hostname,
HostIpAddress: vmInfo.Address.IpAddress,
SubnetId: subnetIDs[0],
}
err = info.Machine.HostMacAddress.UnmarshalText(
[]byte(vmInfo.Address.MacAddress))
if err != nil {
return err
}
for index, subnetId := range subnetIDs {
if index < 1 {
continue
}
address := vmInfo.SecondaryAddresses[index-1]
var hwAddr fm_proto.HardwareAddr
if err := hwAddr.UnmarshalText([]byte(address.MacAddress)); err != nil {
return err
}
info.Machine.SecondaryNetworkEntries = append(
info.Machine.SecondaryNetworkEntries, fm_proto.NetworkEntry{
HostIpAddress: address.IpAddress,
HostMacAddress: hwAddr,
SubnetId: subnetId,
})
}
configFiles, err := makeConfigFiles(info, "", getNetworkEntries(info),
false)
netbootRequest := hyper_proto.NetbootMachineRequest{
Address: vmInfo.Address,
Files: configFiles,
FilesExpiration: *netbootFilesTimeout,
Hostname: vmInfo.Hostname,
NumAcknowledgementsToWaitFor: *numAcknowledgementsToWaitFor,
OfferExpiration: *offerTimeout,
WaitTimeout: *netbootTimeout,
}
var netbootResponse hyper_proto.NetbootMachineResponse
err = client.RequestReply("Hypervisor.NetbootMachine", netbootRequest,
&netbootResponse)
if err != nil {
return err
}
if err := errors.New(netbootResponse.Error); err != nil {
return err
}
logger.Println("waiting for console exit")
return <-vncErrChannel
}
|
/*
* Copyright © 2019-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adatypes
import (
"bytes"
"fmt"
"strings"
)
type subSuperEntries struct {
Name [2]byte
From uint16
To uint16
}
// AdaSuperType data type structure for super or sub descriptor field types, no structures
type AdaSuperType struct {
CommonType
FdtFormat byte
Entries []subSuperEntries
}
// NewSuperType new super or sub descriptor field type
func NewSuperType(name string, option byte) *AdaSuperType {
superType := &AdaSuperType{CommonType: CommonType{fieldType: FieldTypeSuperDesc,
flags: uint32(1<<FlagOptionToBeRemoved | 1<<FlagOptionReadOnly),
name: name, shortName: name}}
if Central.IsDebugLevel() {
Central.Log.Debugf("Check super descriptor %s option %X", name, option)
}
if (option & 0x08) > 0 {
if Central.IsDebugLevel() {
Central.Log.Debugf("%s super/sub descriptor found PE", name)
}
superType.AddOption(FieldOptionPE)
}
if (option & 0x10) > 0 {
superType.AddOption(FieldOptionNU)
}
if (option & 0x20) > 0 {
superType.AddOption(FieldOptionMU)
}
// if (option & 0x80) > 0 {
// superType.AddOption(FieldOptionDE)
// }
if (option & 0x02) > 0 {
superType.AddOption(FieldOptionPF)
}
if (option & 0x04) > 0 {
superType.AddOption(FieldOptionNC)
}
return superType
}
// IsStructure return the structure of the field
func (adaType *AdaSuperType) IsStructure() bool {
return false
}
// AddSubEntry add sub field entry on super or sub descriptors
func (adaType *AdaSuperType) AddSubEntry(name string, from uint16, to uint16) {
var code [2]byte
copy(code[:], name)
entry := subSuperEntries{Name: code, From: from, To: to}
adaType.Entries = append(adaType.Entries, entry)
adaType.calcLength()
}
func (adaType *AdaSuperType) calcLength() {
len := uint32(0)
for _, entry := range adaType.Entries {
if Central.IsDebugLevel() {
Central.Log.Debugf("%s: super descriptor entry %s len=%d add [%d:%d] -> %d", adaType.name, entry.Name,
len, entry.From, entry.To, uint32(entry.To-entry.From+1))
}
len += uint32(entry.To - entry.From + 1)
}
if Central.IsDebugLevel() {
Central.Log.Debugf("len=%d", len)
}
adaType.length = len
}
// Length return the length of the field
func (adaType *AdaSuperType) Length() uint32 {
return adaType.length
}
// SetLength set the length of the field
func (adaType *AdaSuperType) SetLength(length uint32) {
}
// Option string representation of all option of Sub or super descriptors
func (adaType *AdaSuperType) Option() string {
var buffer bytes.Buffer
for i := 0; i < len(fieldOptions); i++ {
if (adaType.options & (1 << uint(i))) > 0 {
if buffer.Len() > 0 {
buffer.WriteString(",")
}
buffer.WriteString(fieldOptions[i])
}
}
return buffer.String()
}
// SetFractional set fractional part
func (adaType *AdaSuperType) SetFractional(x uint32) {
}
// Fractional get fractional part
func (adaType *AdaSuperType) Fractional() uint32 {
return 0
}
// // SetCharset set fractional part
// func (adaType *AdaSuperType) SetCharset(x string) {
// }
// SetFormatType set format type
func (adaType *AdaSuperType) SetFormatType(x rune) {
}
// FormatType get format type
func (adaType *AdaSuperType) FormatType() rune {
return adaType.FormatTypeCharacter
}
// SetFormatLength set format length
func (adaType *AdaSuperType) SetFormatLength(x uint32) {
}
// String string representation of the sub or super descriptor
func (adaType *AdaSuperType) String() string {
var buffer bytes.Buffer
if adaType.shortName == adaType.name {
buffer.WriteString(adaType.shortName + "=")
} else {
buffer.WriteString(adaType.name + "[" + adaType.shortName + "] =")
}
for index, s := range adaType.Entries {
if index > 0 {
buffer.WriteByte(',')
}
buffer.WriteString(fmt.Sprintf("%s(%d,%d)", s.Name, s.From, s.To))
}
buffer.WriteString(fmt.Sprintf(" ; %s", adaType.name))
return buffer.String()
}
// Value value of the sub or super descriptor
func (adaType *AdaSuperType) Value() (adaValue IAdaValue, err error) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Return super descriptor value")
}
adaValue = newSuperDescriptorValue(adaType)
return
}
// InitSubTypes init Adabas super/sub types with adabas definition
func (adaType *AdaSuperType) InitSubTypes(definition *Definition) (err error) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Init super descriptor types of %s", adaType.name)
}
for _, s := range adaType.Entries {
v := definition.fileShortFields[string(s.Name[:])]
if v == nil {
return fmt.Errorf("Error init sub type %s", string(s.Name[:]))
}
t := NewType(v.Type(), string(s.Name[:]), int(s.To-s.From+1))
adaType.SubTypes = append(adaType.SubTypes, t)
}
return nil
}
// AdaPhoneticType data type phonetic descriptor for field types, no structures
type AdaPhoneticType struct {
AdaType
DescriptorLength uint16
ParentName [2]byte
}
// NewPhoneticType new phonetic descriptor type
func NewPhoneticType(name string, descriptorLength uint16, parentName string) *AdaPhoneticType {
var code [2]byte
copy(code[:], parentName)
return &AdaPhoneticType{AdaType: AdaType{CommonType: CommonType{fieldType: FieldTypePhonetic, name: name,
flags: uint32(1<<FlagOptionToBeRemoved | 1<<FlagOptionReadOnly),
shortName: name}},
DescriptorLength: descriptorLength, ParentName: code}
}
// String string representation of the phonetic type
func (fieldType *AdaPhoneticType) String() string {
return fmt.Sprintf("%s=PHON(%s) ; %s", fieldType.shortName, fieldType.ParentName, fieldType.name)
}
// AdaCollationType data type structure for field types, no structures
type AdaCollationType struct {
AdaType
ParentName [2]byte
CollAttribute string
}
// NewCollationType creates new collation type instance
func NewCollationType(name string, length uint16, parentName string, collAttribute string) *AdaCollationType {
var code [2]byte
copy(code[:], parentName)
return &AdaCollationType{AdaType: AdaType{CommonType: CommonType{fieldType: FieldTypeCollation, length: uint32(length),
flags: uint32(1<<FlagOptionToBeRemoved | 1<<FlagOptionReadOnly),
name: name, shortName: name}},
ParentName: code, CollAttribute: collAttribute}
}
// String string representation of the collation type
func (fieldType *AdaCollationType) String() string {
options := ""
if fieldType.IsOption(FieldOptionLA) {
options = ",LA"
} else {
if fieldType.IsOption(FieldOptionLB) {
options = ",L4"
}
}
if fieldType.IsOption(FieldOptionHE) {
options = ",HE"
}
if fieldType.IsOption(FieldOptionUQ) {
options = ",UQ"
}
return fmt.Sprintf("%s%s=COLLATING(%s,%s) ; %s", fieldType.shortName, options, fieldType.ParentName,
fieldType.CollAttribute, fieldType.name)
}
// AdaHyperExitType data type structure for field types, no structures
type AdaHyperExitType struct {
AdaType
fdtFormat byte
nr byte
parentNames []string
}
// NewHyperExitType new hyper exit type
func NewHyperExitType(name string, length uint32, fdtFormat byte, nr uint8, parentNames []string) *AdaHyperExitType {
return &AdaHyperExitType{AdaType: AdaType{CommonType: CommonType{fieldType: FieldTypeHyperDesc,
flags: uint32(1<<FlagOptionToBeRemoved | 1<<FlagOptionReadOnly),
name: name, shortName: name, length: length}},
fdtFormat: fdtFormat, nr: nr, parentNames: parentNames}
}
// String string representation of the hyper exit type
func (fieldType *AdaHyperExitType) String() string {
options := fieldType.Option()
if len(options) > 0 {
options = "," + strings.Replace(options, " ", ",", -1)
}
parents := ""
for _, p := range fieldType.parentNames {
if len(parents) > 0 {
parents += ","
}
parents += p
}
return fmt.Sprintf("%s %d %c%s=HYPER(%d,%s) ; %s", fieldType.shortName, fieldType.length, fieldType.fdtFormat,
options, fieldType.nr, parents, fieldType.name)
}
// AdaReferentialType data type structure for referential integrity types, no structures
type AdaReferentialType struct {
AdaType
refFile uint32
keys [2]string
refType uint8
refUpdateAction uint8
refDeleteAction uint8
}
// NewReferentialType new referential integrity type
func NewReferentialType(name string, refFile uint32, keys [2]string, refType uint8, refUpdateAction uint8, refDeleteAction uint8) *AdaReferentialType {
return &AdaReferentialType{AdaType: AdaType{CommonType: CommonType{fieldType: FieldTypeReferential,
flags: uint32(1<<FlagOptionToBeRemoved | 1<<FlagOptionReadOnly),
name: name, shortName: name, length: 0}}, refFile: refFile, keys: keys, refType: refType,
refUpdateAction: refUpdateAction, refDeleteAction: refDeleteAction}
}
// String string representation of the hyper exit type
func (fieldType *AdaReferentialType) String() string {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("%s=REFINT(%s,%d,%s", fieldType.shortName, fieldType.keys[1], fieldType.refFile, fieldType.keys[0]))
switch fieldType.refDeleteAction {
case 0:
buffer.WriteString("/DX")
case 1:
buffer.WriteString("/DC")
case 2:
buffer.WriteString("/DN")
}
switch fieldType.refUpdateAction {
case 0:
buffer.WriteString(",UX")
case 1:
buffer.WriteString(",UC")
case 2:
buffer.WriteString(",UN")
}
buffer.WriteString(")")
buffer.WriteString(fmt.Sprintf(" ; %s", fieldType.name))
return buffer.String()
}
// ReferentialType type of referential integrity
func (fieldType *AdaReferentialType) ReferentialType() string {
switch fieldType.refType {
case 1:
return "PRIMARY"
case 2:
return "FOREIGN"
}
return "UNKNOWN"
}
// ReferentialFile reference file of referential integrity
func (fieldType *AdaReferentialType) ReferentialFile() uint32 {
return fieldType.refFile
}
// PrimaryKeyName primary key name of referential integrity
func (fieldType *AdaReferentialType) PrimaryKeyName() string {
if fieldType.keys[0] == "" {
return "ISN"
}
return fieldType.keys[0]
}
// ForeignKeyName foreign key name of referential integrity
func (fieldType *AdaReferentialType) ForeignKeyName() string {
return fieldType.keys[1]
}
// UpdateAction update action of referential integrity
func (fieldType *AdaReferentialType) UpdateAction() string {
switch fieldType.refUpdateAction {
case 1:
return "UPDATE_CASCADE"
case 2:
return "UPDATE_NULL"
}
return "UPDATE_NOACTION"
}
// DeleteAction delete action of referential integrity
func (fieldType *AdaReferentialType) DeleteAction() string {
switch fieldType.refDeleteAction {
case 1:
return "DELETE_CASCADE"
case 2:
return "DELETE_NULL"
}
return "DELETE_NOACTION"
}
|
package leetcode
import "testing"
func TestTribonaci(t *testing.T) {
if tribonacci(4) != 4 {
t.Fatal()
}
if tribonacci(25) != 1389537 {
t.Fatal()
}
}
|
package main
import "fmt"
import "math"
const (
width,height=600,320
cells =100
xyrange =30.0
xyscale =width /2/xyrange
zscale =height*0.4
)
func main() {
}
|
package configuration
const (
// ServerVersion specifies the current GOST Server version
ServerVersion string = "v0.3"
// SensorThingsAPIVersion specifies the supported SensorThings API version
SensorThingsAPIVersion string = "v1.0"
)
|
package clitest
import (
"encoding/json"
"fmt"
"os"
"testing"
"github.com/cosmos/cosmos-sdk/cmd/gaia/app"
"github.com/stretchr/testify/require"
"github.com/cosmos/cosmos-sdk/server"
"github.com/cosmos/cosmos-sdk/tests"
)
var (
democoindHome = ""
democliHome = ""
)
func init() {
democoindHome, democliHome = getTestingHomeDirs()
}
func TestInitStartSequence(t *testing.T) {
os.RemoveAll(democoindHome)
servAddr, port, err := server.FreeTCPAddr()
require.NoError(t, err)
executeInit(t)
executeStart(t, servAddr, port)
}
func executeInit(t *testing.T) {
var (
chainID string
initRes map[string]json.RawMessage
)
_, stderr := tests.ExecuteT(t, fmt.Sprintf("democoind --home=%s --home-client=%s init --name=test", democoindHome, democliHome), app.DefaultKeyPass)
err := json.Unmarshal([]byte(stderr), &initRes)
require.NoError(t, err)
err = json.Unmarshal(initRes["chain_id"], &chainID)
require.NoError(t, err)
}
func executeStart(t *testing.T, servAddr, port string) {
proc := tests.GoExecuteTWithStdout(t, fmt.Sprintf("democoind start --home=%s --rpc.laddr=%v", democoindHome, servAddr))
defer proc.Stop(false)
tests.WaitForTMStart(port)
}
func getTestingHomeDirs() (string, string) {
tmpDir := os.TempDir()
democoindHome := fmt.Sprintf("%s%s.test_democoind", tmpDir, string(os.PathSeparator))
democliHome := fmt.Sprintf("%s%s.test_democli", tmpDir, string(os.PathSeparator))
return democoindHome, democliHome
}
|
package main
import (
"fmt"
)
var tabla [10]int
func main() {
tabla[0] = 1
tabla[5] = 15
fmt.Println(tabla)
vector := [10]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
fmt.Println(vector)
for i := 0; i < len(vector); i++ {
fmt.Println(vector[i])
}
//matrices
var matriz [3][3]int
matriz[2][2] = 1
fmt.Println(matriz)
//slices -> se asemejan a las listas , puedo agregar elementos
matriz2 := []int{1, 2, 3}
fmt.Println(matriz2)
variante2()
}
func variante2() {
elementos := [5]int{1, 2, 3, 4, 5}
// desde la posicion 3 hasta el final
porcion := elementos[3:]
fmt.Println(porcion)
variante3()
}
func variante3() {
// me reserva 20 unidades de memoria pero me la crea de 5 posiciones
elementos := make([]int, 5, 20)
// cap -> capacidad de un slice
fmt.Printf("Largo %d , capacidad %d", len(elementos), cap(elementos))
variante4()
}
func variante4() {
fmt.Println("_______________________")
nums := make([]int, 0, 0)
for i := 0; i < 100; i++ {
// añade un elemento cuando sobrepasa la capacidad (cuando me quedo sin espacio)
// go va aumentando la capacidad en base 2 a la n
nums = append(nums, i)
fmt.Printf("Largo %d , capacidad %d", len(nums), cap(nums))
}
}
|
package sheetsproxy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/jakubincloud/sheetsproxy/sheetsproxy/util"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/httptest"
"strings"
"testing"
"time"
)
func TestHelloHTTP(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
secretName = "projects/cr-lab-jzygmunt-2608185428/secrets/worker-svc/versions/latest"
sheetId := "10xtQZsNX0kjO0Jn6jUhBbzQuM5FRV6a_vZzY0NCjdyU"
rangeValue := "'Form responses 1'!A2:K"
ctx := context.Background()
err := setUpClient(ctx)
if err != nil {
t.Errorf("Cannot retrieve client data: %v", err)
}
response, err := Load(ctx, client, sheetId, rangeValue)
if err != nil {
t.Errorf("Cannot Load the spreadsheet: %v", err)
}
fmt.Printf("l: %d\n", len(response))
fmt.Printf("%s\n", buf.String())
}
func TestServe(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
tests := []struct {
body string
want [][]string
}{
{body: `{"spreadsheet_id": "10xtQZsNX0kjO0Jn6jUhBbzQuM5FRV6a_vZzY0NCjdyU", "range": "'Form responses 1'!A1:K1"}`, want: [][]string{
{"Timestamp", "Email address", "Platform", "Purpose", "DeleteOn", "AccountId", "Status", "Notes", "CreationDate", "CreationPlatform", "PreviousDeleteOn"},
}},
}
for _, test := range tests {
req := httptest.NewRequest("GET", "/", strings.NewReader(test.body))
req.Header.Add("Content-Type", "application/json")
rr := httptest.NewRecorder()
Serve(rr, req)
got, err := readResponse(rr.Body)
if err != nil {
t.Errorf("Error reading the response")
continue
}
if !cmp.Equal(got.Values, test.want) {
t.Errorf("HelloHTTP(%q) = %q, want %q", test.body, got.Values, test.want)
}
}
fmt.Printf("%s\n", buf.String())
}
func readResponse(in io.Reader) (*response, error) {
var req response
body, err := ioutil.ReadAll(in)
log.Printf("%s", body)
if err != nil {
log.Printf("iotuil.ReadAll: %v", err)
return nil, err
}
if err := json.Unmarshal(body, &req); err != nil {
log.Printf("json.Unmarshal: %v", err)
return nil, err
}
return &req, nil
}
func TestAuthenticatedCloudF(t *testing.T) {
var buf bytes.Buffer
log.SetOutput(&buf)
secretName = "projects/cr-lab-jzygmunt-2608185428/secrets/worker-svc/versions/latest"
sheetsProxyUrl := "https://us-central1-cr-lab-jzygmunt-2608185428.cloudfunctions.net/sheets-proxy"
serviceAccount := "worker@cr-lab-jzygmunt-2608185428.iam.gserviceaccount.com"
ctx := context.Background()
err := setUpClient(ctx)
if err != nil {
t.Errorf("Cannot retrieve client data: %v", err)
return
}
// the ID token will be valid for 1 hr
idToken, err := util.GetIDTokenForEndpoint(ctx, client, serviceAccount, sheetsProxyUrl)
if err != nil {
t.Errorf("Cannot get ID token: %v", err)
return
}
body, err := json.Marshal(map[string]string{"spreadsheet_id": "10xtQZsNX0kjO0Jn6jUhBbzQuM5FRV6a_vZzY0NCjdyU", "range": "'Form responses 1'!A1:K1"})
if err != nil {
t.Errorf("cannot marshal body")
}
req, err := http.NewRequest("POST", sheetsProxyUrl, bytes.NewBuffer(body))
req.Header.Add("Authorization", "Bearer "+idToken)
req.Header.Add("Content-Type", "application/json")
// Create a new Client
netClient := &http.Client{
Timeout: time.Second * 10,
}
r, err := netClient.Do(req)
if err != nil {
log.Printf("client.Do: %v", err)
t.Errorf("client.Do: %v", err)
}
response, err := readResponse(r.Body)
if err != nil {
t.Errorf("cannot parse response: %v", err)
}
fmt.Printf("response: %v", response)
fmt.Printf("%s\n", buf.String())
}
|
package onelogin
import (
"errors"
)
func ErrorOcurred(err error)(error) {
logger.Errorf("An error occurred, %s", err.Error())
return errors.New("An error ocurred.")
}
|
package controllers
import (
"encoding/json"
"fmt"
"github.com/revel/revel"
"log"
"testapp/app/dbmanager"
"testapp/app/providers"
)
type TaskController struct {
*revel.Controller
model *providers.TaskModel
}
func (c *TaskController) Init() *TaskController {
c.model = providers.NewTaskModel()
c.model.DB = dbmanager.InitConnection()
return nil
}
func (c *TaskController) CloseConnection() *TaskController {
var err error
err = c.model.DB.Close()
if err != nil {
fmt.Println("ERROR. Perhaps connection doesn't exist")
return nil
}
fmt.Println("Connection closed")
return nil
}
func (c *TaskController) GetTasks() revel.Result {
data, _ := json.Marshal(c.model.GetTasks())
list := string(data)
return c.RenderJSON(list)
}
func (c *TaskController) OpenModalAdd() revel.Result {
data := c.model.OpenModalAdd()
list := string(data)
return c.RenderJSON(list)
}
func (c *TaskController) OpenModalEdit() revel.Result {
data := c.model.OpenModalEdit()
list := string(data)
return c.RenderJSON(list)
}
func (c *TaskController) AddTask() revel.Result {
request := c.Params.JSON
data := c.model.AddTask(request)
return c.RenderJSON(data)
}
func (c *TaskController) DelTask() revel.Result {
request := c.Params.JSON
data := c.model.DelTask(request)
log.Print(data)
return c.RenderJSON(data)
}
func (c *TaskController) EditTask(id int) revel.Result {
request := c.Params.JSON
data := c.model.EditTask(request, id)
log.Print(data)
return c.RenderJSON(data)
}
|
package structs
type Pods []Pod
type Pod struct {
Name string `json:"name"`
CPURequest float64 `json:"cpuRequest"`
MemoryRequest float64 `json:"memoryRequest"`
Zone string `json:"zone"`
}
type TotalReq struct {
A_totalCPU float64
A_totalMemory float64
B_totalCPU float64
B_totalMemory float64
X_totalCPU float64
X_totalMemory float64
}
func (t TotalReq) AllSatisfied() bool {
return t.A_totalCPU <= 0 &&
t.A_totalMemory <= 0 &&
t.B_totalCPU <= 0 &&
t.B_totalMemory <= 0 &&
t.X_totalCPU <= 0 &&
t.X_totalMemory <= 0
}
|
package handler
import "net/http"
type RoundTripperMock struct {
RoundTripFunc func(req *http.Request) (*http.Response, error)
}
func (rt *RoundTripperMock) RoundTrip(req *http.Request) (*http.Response, error) {
return rt.RoundTripFunc(req)
}
|
package psql
import (
"bytes"
"strconv"
)
func (c *compilerContext) alias(alias string) {
c.w.WriteString(` AS `)
c.quoted(alias)
}
func aliasWithID(w *bytes.Buffer, alias string, id int32) {
w.WriteString(` AS `)
w.WriteString(alias)
w.WriteString(`_`)
int32String(w, id)
}
func colWithTableID(w *bytes.Buffer, table string, id int32, col string) {
w.WriteString(table)
if id >= 0 {
w.WriteString(`_`)
int32String(w, id)
}
w.WriteString(`.`)
w.WriteString(col)
}
func (c *compilerContext) colWithTable(table, col string) {
c.quoted(table)
c.w.WriteString(`.`)
c.quoted(col)
}
func (c *compilerContext) quoted(identifier string) {
switch c.ct {
case "mysql":
c.w.WriteByte('`')
c.w.WriteString(identifier)
c.w.WriteByte('`')
default:
c.w.WriteByte('"')
c.w.WriteString(identifier)
c.w.WriteByte('"')
}
}
func (c *compilerContext) squoted(identifier string) {
c.w.WriteByte('\'')
c.w.WriteString(identifier)
c.w.WriteByte('\'')
}
func int32String(w *bytes.Buffer, val int32) {
w.WriteString(strconv.FormatInt(int64(val), 10))
}
|
package leetcode
import "testing"
func TestBackspaceCompare(t *testing.T) {
if backspaceCompare("ab#c", "ad#c") != true {
t.Fatal()
}
if backspaceCompare("ab##", "c#d#") != true {
t.Fatal()
}
if backspaceCompare("a##c", "#a#c") != true {
t.Fatal()
}
if backspaceCompare("a#c", "b") != false {
t.Fatal()
}
if backspaceCompare("bbbextm", "bbb#extm") != false {
t.Fatal()
}
if backspaceCompare("nzp#o#g", "b#nzp#o#g") != true {
t.Fatal()
}
}
|
package config
// ConverterFFmpegConfig stores the configuration for the FFmpeg program
type ConverterFFmpegConfig struct {
ExecutableName string
}
|
// package auth will encapsulate authentication backends such as facebook, oauth, google auth...
// For now auth only implements the use of our userservice backend.
package auth
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
"github.com/totalsynthesis/autoromance/lib/userservice"
)
// Authenticate middleware gets the token from a request and uses it to
// retrieve the user, redirecting to "/login" on failure.
// First check the context for the token (for redirects),
// fallback on checking the headers.
func Authenticate(c *gin.Context) {
var token string
tkn, exists := c.Get("token")
if t, isString := tkn.(string); exists && isString {
token = t
} else {
// TODO: trim off "Bearer " and then parse the token.
// At the moment we are simply checking that it exists.
token = c.Request.Header.Get("Authorization")
}
user, err := userservice.Get(token)
if err != nil {
c.Redirect(http.StatusSeeOther, "/login")
return
}
c.Set("user", user)
c.Set("token", token)
}
// GetToken swaps username and password for an expiring token.
// If the credentials are not valid error is returned.
// TODO: implement.
func GetToken(username, password string) (string, error) {
return "asdf", nil
}
// Login puts the token into context if credentials are valid.
func Login(c *gin.Context, username, password string) (string, error) {
token, err := GetToken(username, password)
if err != nil {
return "", errors.Wrap(err, "could not get token")
}
return token, nil
}
|
package win32
// Handle is a handle to an object.
type Handle uintptr
// HBitmap is a handle to a bitmap.
type HBitmap Handle
// HBrush is a handle to a brush.
type HBrush Handle
// HColorSpace is a handle to a color space.
type HColorSpace Handle
// HConv is a handle to a DDE conversation.
type HConv Handle
// HConvList is a handle to a DDE conversation list.
type HConvList Handle
// HCursor is a handle to a cursor.
type HCursor HIcon
// HDC is a handle to a device context.
type HDC Handle
// HDDEData is a handle to DDE data.
type HDDEData Handle
// HDesk is a handle to a desktop.
type HDesk Handle
// HDrop is a handle to an internal drop structure.
type HDrop Handle
// HDWP is a handle to a deferred window position structure.
type HDWP Handle
// HEnhancedMetaFile is a handle to an enhanced meta file.
type HEnhancedMetaFile Handle
// HFile is a handle to a file opened by OpenFile.
type HFile int32
// HFont is a handle to a font.
type HFont Handle
// HGDIObject is a handle to a GDI object.
type HGDIObject Handle
// HGlobal is a handle to a global memory block.
type HGlobal Handle
// HHook is a handle to a hook.
type HHook Handle
// HIcon is a handle to an icon.
type HIcon Handle
// HInstance is a handle to an instance.
type HInstance Handle
// HKey is a handle to a registry key.
type HKey Handle
// HKL is a handle to an input locale identifier.
type HKL Handle
// HLocal is a handle to a local memory block.
type HLocal Handle
// HMenu is a handle to a menu.
type HMenu Handle
// HMetaFile is a handle to a metafile.
type HMetaFile Handle
// HModule is a handle to a module.
type HModule HInstance
// HMonitor is a handle to a display monitor.
type HMonitor Handle
// HPalette is a handle to a palette.
type HPalette Handle
// HPen is a handle to a pen.
type HPen Handle
// HRegion is a handle to a region.
type HRegion Handle
// HResource is a handle to a resource.
type HResource Handle
// HStringZ is a handle to a DDE string.
type HStringZ Handle
// HWindowStation is a handle to a window station.
type HWindowStation Handle
// HWindow is a handle to a window.
type HWindow Handle
|
/*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package humio
import (
"crypto/sha512"
"encoding/hex"
"fmt"
"net/url"
"reflect"
humioapi "github.com/humio/cli/api"
humiov1alpha1 "github.com/humio/humio-operator/api/v1alpha1"
"github.com/humio/humio-operator/pkg/kubernetes"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
type ClientMock struct {
Cluster humioapi.Cluster
ClusterError error
UpdateStoragePartitionSchemeError error
UpdateIngestPartitionSchemeError error
IngestToken humioapi.IngestToken
Parser humioapi.Parser
Repository humioapi.Repository
View humioapi.View
OnPremLicense humioapi.OnPremLicense
Action humioapi.Action
Alert humioapi.Alert
}
type MockClientConfig struct {
apiClient *ClientMock
}
func NewMockClient(cluster humioapi.Cluster, clusterError error, updateStoragePartitionSchemeError error, updateIngestPartitionSchemeError error) *MockClientConfig {
storagePartition := humioapi.StoragePartition{}
ingestPartition := humioapi.IngestPartition{}
mockClientConfig := &MockClientConfig{
apiClient: &ClientMock{
Cluster: cluster,
ClusterError: clusterError,
UpdateStoragePartitionSchemeError: updateStoragePartitionSchemeError,
UpdateIngestPartitionSchemeError: updateIngestPartitionSchemeError,
IngestToken: humioapi.IngestToken{},
Parser: humioapi.Parser{},
Repository: humioapi.Repository{},
View: humioapi.View{},
OnPremLicense: humioapi.OnPremLicense{},
Action: humioapi.Action{},
Alert: humioapi.Alert{},
},
}
cluster.StoragePartitions = []humioapi.StoragePartition{storagePartition}
cluster.IngestPartitions = []humioapi.IngestPartition{ingestPartition}
return mockClientConfig
}
func (h *MockClientConfig) Status(config *humioapi.Config, req reconcile.Request) (humioapi.StatusResponse, error) {
return humioapi.StatusResponse{
Status: "OK",
Version: "x.y.z",
}, nil
}
func (h *MockClientConfig) GetClusters(config *humioapi.Config, req reconcile.Request) (humioapi.Cluster, error) {
if h.apiClient.ClusterError != nil {
return humioapi.Cluster{}, h.apiClient.ClusterError
}
return h.apiClient.Cluster, nil
}
func (h *MockClientConfig) UpdateStoragePartitionScheme(config *humioapi.Config, req reconcile.Request, sps []humioapi.StoragePartitionInput) error {
if h.apiClient.UpdateStoragePartitionSchemeError != nil {
return h.apiClient.UpdateStoragePartitionSchemeError
}
var storagePartitions []humioapi.StoragePartition
for _, storagePartitionInput := range sps {
var nodeIdsList []int
for _, nodeID := range storagePartitionInput.NodeIDs {
nodeIdsList = append(nodeIdsList, int(nodeID))
}
storagePartitions = append(storagePartitions, humioapi.StoragePartition{Id: int(storagePartitionInput.ID), NodeIds: nodeIdsList})
}
h.apiClient.Cluster.StoragePartitions = storagePartitions
return nil
}
func (h *MockClientConfig) UpdateIngestPartitionScheme(config *humioapi.Config, req reconcile.Request, ips []humioapi.IngestPartitionInput) error {
if h.apiClient.UpdateIngestPartitionSchemeError != nil {
return h.apiClient.UpdateIngestPartitionSchemeError
}
var ingestPartitions []humioapi.IngestPartition
for _, ingestPartitionInput := range ips {
var nodeIdsList []int
for _, nodeID := range ingestPartitionInput.NodeIDs {
nodeIdsList = append(nodeIdsList, int(nodeID))
}
ingestPartitions = append(ingestPartitions, humioapi.IngestPartition{Id: int(ingestPartitionInput.ID), NodeIds: nodeIdsList})
}
h.apiClient.Cluster.IngestPartitions = ingestPartitions
return nil
}
func (h *MockClientConfig) SuggestedStoragePartitions(config *humioapi.Config, req reconcile.Request) ([]humioapi.StoragePartitionInput, error) {
return []humioapi.StoragePartitionInput{}, nil
}
func (h *MockClientConfig) SuggestedIngestPartitions(config *humioapi.Config, req reconcile.Request) ([]humioapi.IngestPartitionInput, error) {
return []humioapi.IngestPartitionInput{}, nil
}
func (h *MockClientConfig) GetBaseURL(config *humioapi.Config, req reconcile.Request, hc *humiov1alpha1.HumioCluster) *url.URL {
baseURL, _ := url.Parse(fmt.Sprintf("http://%s.%s:%d/", hc.Name, hc.Namespace, 8080))
return baseURL
}
func (h *MockClientConfig) TestAPIToken(config *humioapi.Config, req reconcile.Request) error {
return nil
}
func (h *MockClientConfig) AddIngestToken(config *humioapi.Config, req reconcile.Request, hit *humiov1alpha1.HumioIngestToken) (*humioapi.IngestToken, error) {
h.apiClient.IngestToken = humioapi.IngestToken{
Name: hit.Spec.Name,
AssignedParser: hit.Spec.ParserName,
Token: "mocktoken",
}
return &h.apiClient.IngestToken, nil
}
func (h *MockClientConfig) GetIngestToken(config *humioapi.Config, req reconcile.Request, hit *humiov1alpha1.HumioIngestToken) (*humioapi.IngestToken, error) {
return &h.apiClient.IngestToken, nil
}
func (h *MockClientConfig) UpdateIngestToken(config *humioapi.Config, req reconcile.Request, hit *humiov1alpha1.HumioIngestToken) (*humioapi.IngestToken, error) {
return h.AddIngestToken(config, req, hit)
}
func (h *MockClientConfig) DeleteIngestToken(config *humioapi.Config, req reconcile.Request, hit *humiov1alpha1.HumioIngestToken) error {
h.apiClient.IngestToken = humioapi.IngestToken{}
return nil
}
func (h *MockClientConfig) AddParser(config *humioapi.Config, req reconcile.Request, hp *humiov1alpha1.HumioParser) (*humioapi.Parser, error) {
h.apiClient.Parser = humioapi.Parser{
Name: hp.Spec.Name,
Script: hp.Spec.ParserScript,
TagFields: hp.Spec.TagFields,
Tests: hp.Spec.TestData,
}
return &h.apiClient.Parser, nil
}
func (h *MockClientConfig) GetParser(config *humioapi.Config, req reconcile.Request, hp *humiov1alpha1.HumioParser) (*humioapi.Parser, error) {
if h.apiClient.Parser.Name == "" {
return nil, fmt.Errorf("could not find parser in view %q with name %q, err=%w", hp.Spec.RepositoryName, hp.Spec.Name, humioapi.EntityNotFound{})
}
return &h.apiClient.Parser, nil
}
func (h *MockClientConfig) UpdateParser(config *humioapi.Config, req reconcile.Request, hp *humiov1alpha1.HumioParser) (*humioapi.Parser, error) {
return h.AddParser(config, req, hp)
}
func (h *MockClientConfig) DeleteParser(config *humioapi.Config, req reconcile.Request, hp *humiov1alpha1.HumioParser) error {
h.apiClient.Parser = humioapi.Parser{}
return nil
}
func (h *MockClientConfig) AddRepository(config *humioapi.Config, req reconcile.Request, hr *humiov1alpha1.HumioRepository) (*humioapi.Repository, error) {
h.apiClient.Repository = humioapi.Repository{
ID: kubernetes.RandomString(),
Name: hr.Spec.Name,
Description: hr.Spec.Description,
RetentionDays: float64(hr.Spec.Retention.TimeInDays),
IngestRetentionSizeGB: float64(hr.Spec.Retention.IngestSizeInGB),
StorageRetentionSizeGB: float64(hr.Spec.Retention.StorageSizeInGB),
}
return &h.apiClient.Repository, nil
}
func (h *MockClientConfig) GetRepository(config *humioapi.Config, req reconcile.Request, hr *humiov1alpha1.HumioRepository) (*humioapi.Repository, error) {
return &h.apiClient.Repository, nil
}
func (h *MockClientConfig) UpdateRepository(config *humioapi.Config, req reconcile.Request, hr *humiov1alpha1.HumioRepository) (*humioapi.Repository, error) {
return h.AddRepository(config, req, hr)
}
func (h *MockClientConfig) DeleteRepository(config *humioapi.Config, req reconcile.Request, hr *humiov1alpha1.HumioRepository) error {
h.apiClient.Repository = humioapi.Repository{}
return nil
}
func (h *MockClientConfig) GetView(config *humioapi.Config, req reconcile.Request, hv *humiov1alpha1.HumioView) (*humioapi.View, error) {
return &h.apiClient.View, nil
}
func (h *MockClientConfig) AddView(config *humioapi.Config, req reconcile.Request, hv *humiov1alpha1.HumioView) (*humioapi.View, error) {
connections := make([]humioapi.ViewConnection, 0)
for _, connection := range hv.Spec.Connections {
connections = append(connections, humioapi.ViewConnection{
RepoName: connection.RepositoryName,
Filter: connection.Filter,
})
}
h.apiClient.View = humioapi.View{
Name: hv.Spec.Name,
Connections: connections,
}
return &h.apiClient.View, nil
}
func (h *MockClientConfig) UpdateView(config *humioapi.Config, req reconcile.Request, hv *humiov1alpha1.HumioView) (*humioapi.View, error) {
return h.AddView(config, req, hv)
}
func (h *MockClientConfig) DeleteView(config *humioapi.Config, req reconcile.Request, hv *humiov1alpha1.HumioView) error {
h.apiClient.View = humioapi.View{}
return nil
}
func (h *MockClientConfig) GetLicense(config *humioapi.Config, req reconcile.Request) (humioapi.License, error) {
emptyOnPremLicense := humioapi.OnPremLicense{}
if !reflect.DeepEqual(h.apiClient.OnPremLicense, emptyOnPremLicense) {
return h.apiClient.OnPremLicense, nil
}
// by default, humio starts without a license
return emptyOnPremLicense, nil
}
func (h *MockClientConfig) InstallLicense(config *humioapi.Config, req reconcile.Request, licenseString string) error {
onPremLicense, err := ParseLicenseType(licenseString)
if err != nil {
return fmt.Errorf("failed to parse license type: %w", err)
}
if onPremLicense != nil {
h.apiClient.OnPremLicense = *onPremLicense
}
return nil
}
func (h *MockClientConfig) GetAction(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAction) (*humioapi.Action, error) {
if h.apiClient.Action.Name == "" {
return nil, fmt.Errorf("could not find action in view %q with name %q, err=%w", ha.Spec.ViewName, ha.Spec.Name, humioapi.EntityNotFound{})
}
return &h.apiClient.Action, nil
}
func (h *MockClientConfig) AddAction(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAction) (*humioapi.Action, error) {
action, err := ActionFromActionCR(ha)
if err != nil {
return action, err
}
h.apiClient.Action = *action
return &h.apiClient.Action, nil
}
func (h *MockClientConfig) UpdateAction(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAction) (*humioapi.Action, error) {
return h.AddAction(config, req, ha)
}
func (h *MockClientConfig) DeleteAction(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAction) error {
h.apiClient.Action = humioapi.Action{}
return nil
}
func (h *MockClientConfig) GetAlert(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAlert) (*humioapi.Alert, error) {
if h.apiClient.Alert.Name == "" {
return nil, fmt.Errorf("could not find alert in view %q with name %q, err=%w", ha.Spec.ViewName, ha.Spec.Name, humioapi.EntityNotFound{})
}
return &h.apiClient.Alert, nil
}
func (h *MockClientConfig) AddAlert(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAlert) (*humioapi.Alert, error) {
actionIdMap, err := h.GetActionIDsMapForAlerts(config, req, ha)
if err != nil {
return &humioapi.Alert{}, fmt.Errorf("could not get action id mapping: %w", err)
}
alert, err := AlertTransform(ha, actionIdMap)
if err != nil {
return alert, err
}
h.apiClient.Alert = *alert
return &h.apiClient.Alert, nil
}
func (h *MockClientConfig) UpdateAlert(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAlert) (*humioapi.Alert, error) {
return h.AddAlert(config, req, ha)
}
func (h *MockClientConfig) DeleteAlert(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAlert) error {
h.apiClient.Alert = humioapi.Alert{}
return nil
}
func (h *MockClientConfig) GetActionIDsMapForAlerts(config *humioapi.Config, req reconcile.Request, ha *humiov1alpha1.HumioAlert) (map[string]string, error) {
actionIdMap := make(map[string]string)
for _, action := range ha.Spec.Actions {
hash := sha512.Sum512([]byte(action))
actionIdMap[action] = hex.EncodeToString(hash[:])
}
return actionIdMap, nil
}
func (h *MockClientConfig) GetHumioClient(config *humioapi.Config, req ctrl.Request) *humioapi.Client {
clusterURL, _ := url.Parse("http://localhost:8080/")
return humioapi.NewClient(humioapi.Config{Address: clusterURL})
}
func (h *MockClientConfig) ClearHumioClientConnections() {
h.apiClient.IngestToken = humioapi.IngestToken{}
h.apiClient.Parser = humioapi.Parser{}
h.apiClient.Repository = humioapi.Repository{}
h.apiClient.View = humioapi.View{}
h.apiClient.OnPremLicense = humioapi.OnPremLicense{}
h.apiClient.Action = humioapi.Action{}
h.apiClient.Alert = humioapi.Alert{}
}
|
package controller
import (
"github.com/gin-gonic/gin"
)
// ListAccounts godoc
// @Summary List Account
// @Description get Account libs
// @Tags account
// @Accept json
// @Produce json
// @Success 200 {object} protocol.AccountPaginator
// @Failure 400 {object} protocol.Error
// @Failure 404 {object} protocol.Error
// @Failure 500 {object} protocol.Error
// @Router /account [get]
func (c *Controller) ListAccounts(ctx *gin.Context) {
paginator := c.App.AccountSpi.ListAccountsPaginate(1,3)
ctx.JSON(200, paginator)
}
|
package algorithm
func Calculate(calc string, operator1 float64, operator2 float64) float64 {
switch calc {
case "mul":
return operator1 * operator2
case "sub":
return operator1 - operator2
case "add":
return operator1 + operator2
case "div":
return operator1 / operator2
default:
return 0
}
} |
package checkout
import (
"flag"
"fmt"
"io"
"os"
)
// Constants CheckoutPath and ProductsPath serve as default paths to JSON data files should they not be given.
const (
// Default checkout data filePath
CheckoutPath = "./checkout_data.json"
// Default price data filePath
ProductsPath = "./product_data.json"
)
// ArgInfo is returned from getArgInfo and contains the filepaths for the checkout file/ products file (if they are given)
//
// Filepaths may be relative or absolute
type ArgInfo struct {
CheckoutPath string // checkout json file path
ProductsPath string // products json file path
}
// GetArgInfo returns an instance of ArgInfo.
//
// If the checkout info file path has not been given, or the products flag has not been given,
// checkoutFile/ productsFile will be returned as "" respectively.
//
// Filepaths may be relative or absolute.
func GetArgInfo() ArgInfo {
var productsPath string
commandLine := flag.NewFlagSet(os.Args[0], flag.ExitOnError)
// get products flag value for products file
commandLine.StringVar(&productsPath, "products", ProductsPath, "optional filepath to products JSON")
commandLine.Parse(os.Args[1:])
// get first positional argument for checkout file
checkoutPath := commandLine.Arg(0)
// set to default CheckoutPath if flag empty/ checkout argument not given
if checkoutPath == "" {
checkoutPath = CheckoutPath
}
return ArgInfo{
CheckoutPath: checkoutPath,
ProductsPath: productsPath,
}
}
// CheckoutCLI is called from the parent main package, and is the primary entry point.
// It accepts an io.writer to write the result string to.
//
// CLI command takes a filename as an argument, expecting a json file of checkout lines,
// If no filename argument is given, the default checkout dataset will instead be used.
//
// An optional products flag can also be given to specify a path to a different products list.
func CheckoutCLI(out io.Writer) error {
// --help info
flag.Usage = func() {
fmt.Printf("Usage: %s [options] <inJSONLocation>\nOptions:\n", os.Args[0])
flag.PrintDefaults()
}
argInfo := GetArgInfo()
// logic to extract from json/ calc checkout value
result, err := ProcessCheckout(argInfo.CheckoutPath, argInfo.ProductsPath)
if err != nil {
return err
}
fmt.Fprintf(out, "checkout file: %s\nproducts file: %s\ntotal value of checkout: %v\n", argInfo.CheckoutPath, argInfo.ProductsPath, result)
return nil
}
|
package models
import (
"dappapi/global/orm"
"fmt"
)
type Perm struct {
Id int `gorm:"column:permid;" json:"permid"`
Roleid int32 `gorm:"column:roleid;" json:"roleid"`
}
func (p *Perm) TableName() string {
return "role_permissions"
}
func (p *Perm) GetByRoleid(roleid int) (permList []*Perm, err error) {
table := orm.Eloquent["admin"].Table(p.TableName())
table = table.Where("`roleid` = ?", roleid)
if err = table.Find(&permList).Error; err != nil {
return
}
return
}
func (p *Perm) DeleteRoleMenu(roleId int) (bool, error) {
if err := orm.Eloquent["admin"].Table(p.TableName()).Where("roleid = ?", roleId).Delete(p).Error; err != nil {
return false, err
}
var role SysRole
if err := orm.Eloquent["admin"].Table("admin_roles").Where("id = ?", roleId).First(&role).Error; err != nil {
return false, err
}
sql := fmt.Sprintf("delete from casbin_rule where `p_type`= '%s' and `v0` = '%s' ;", "p", role.Name)
orm.Eloquent["admin"].Exec(sql)
return true, nil
}
func (p *Perm) Insert(roleId int, menuId []string) (bool, error) {
var role SysRole
if err := orm.Eloquent["admin"].Table("admin_roles").Where("id = ?", roleId).First(&role).Error; err != nil {
return false, err
}
var menu []Menu
if err := orm.Eloquent["admin"].Table("admin_menu").Where("id in (?)", menuId).Find(&menu).Error; err != nil {
return false, err
}
sql := "INSERT INTO `role_permissions` (`permid`, `roleid`) VALUES "
sql2 := "INSERT INTO `casbin_rule` (`p_type`, `v0`, `v1`, `v2`) VALUES "
for i := 0; i < len(menu); i++ {
sql += fmt.Sprintf("(%d, %d),", menu[i].Id, role.RoleId)
if menu[i].Itype == "f" {
sql2 += fmt.Sprintf("('p', '%s', '%s', '%s'),", role.Name, menu[i].Perm, menu[i].Route)
}
}
sql = sql[0:len(sql)-1] + ";"
sql2 = sql2[0:len(sql2)-1] + ";"
orm.Eloquent["admin"].Exec(sql)
orm.Eloquent["admin"].Exec(sql2)
return true, nil
}
|
package common
import (
"encoding/base64"
"fmt"
"net/http"
"strings"
"time"
"github.com/root-gg/utils"
)
// Ensure HTTPError implements error
var _ error = (*HTTPError)(nil)
// HTTPError allows to return an error and a HTTP status code
type HTTPError struct {
Message string
Err error
StatusCode int
}
// NewHTTPError return a new HTTPError
func NewHTTPError(message string, err error, code int) HTTPError {
return HTTPError{message, err, code}
}
// Error return the error
func (e HTTPError) Error() string {
return e.String()
}
func (e HTTPError) String() string {
if e.Err != nil {
return fmt.Sprintf("%s : %s", e.Message, e.Err)
}
return e.Message
}
// StripPrefix returns a handler that serves HTTP requests
// removing the given prefix from the request URL's Path
// It differs from http.StripPrefix by defaulting to "/" and not ""
func StripPrefix(prefix string, handler http.Handler) http.Handler {
if prefix == "" || prefix == "/" {
return handler
}
return http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) {
// Relative paths to javascript, css, ... imports won't work without a tailing slash
if req.URL.Path == prefix {
http.Redirect(resp, req, prefix+"/", http.StatusMovedPermanently)
return
}
if p := strings.TrimPrefix(req.URL.Path, prefix); len(p) < len(req.URL.Path) {
req.URL.Path = p
} else {
http.NotFound(resp, req)
return
}
if !strings.HasPrefix(req.URL.Path, "/") {
req.URL.Path = "/" + req.URL.Path
}
handler.ServeHTTP(resp, req)
})
}
// EncodeAuthBasicHeader return the base64 version of "login:password"
func EncodeAuthBasicHeader(login string, password string) (value string) {
return base64.StdEncoding.EncodeToString([]byte(login + ":" + password))
}
// WriteJSONResponse serialize the response to json and write it to the HTTP response body
func WriteJSONResponse(resp http.ResponseWriter, obj interface{}) {
json, err := utils.ToJson(obj)
if err != nil {
panic(fmt.Errorf("unable to serialize json response : %s", err))
}
_, _ = resp.Write(json)
}
// AskConfirmation from process input
func AskConfirmation(defaultValue bool) (bool, error) {
var input string
_, err := fmt.Scanln(&input)
if err != nil {
if err.Error() == "unexpected newline" {
return defaultValue, nil
}
return false, err
}
if strings.HasPrefix(strings.ToLower(input), "y") {
return true, nil
} else if strings.HasPrefix(strings.ToLower(input), "n") {
return false, nil
} else {
return defaultValue, nil
}
}
// HumanDuration displays duration with days and years
func HumanDuration(d time.Duration) string {
var minus bool
if d < 0 {
minus = true
d = -d
}
years := d / (365 * 24 * time.Hour)
d = d % (365 * 24 * time.Hour)
days := d / (24 * time.Hour)
d = d % (24 * time.Hour)
hours := d / (time.Hour)
d = d % (time.Hour)
minutes := d / (time.Minute)
d = d % (time.Minute)
seconds := d / (time.Second)
d = d % (time.Second)
str := ""
if minus {
str += "-"
}
if years > 0 {
str += fmt.Sprintf("%dy", years)
}
if days > 0 {
str += fmt.Sprintf("%dd", days)
}
if hours > 0 {
str += fmt.Sprintf("%dh", hours)
}
if minutes > 0 {
str += fmt.Sprintf("%dm", minutes)
}
if seconds > 0 {
str += fmt.Sprintf("%ds", seconds)
}
if str == "" || d > 0 {
str += fmt.Sprintf("%s", d)
}
return str
}
|
package tiled
import (
"encoding/xml"
"fmt"
"io/ioutil"
"os"
)
// TileMap represents a Tiled Map
type TileMap struct {
XMLName xml.Name `xml:"map"`
Height int `xml:"height,attr"`
Infinite bool `xml:"infinite,attr"`
NextLayerID int `xml:"nextlayerid,attr"`
NextObjectID int `xml:"nextobjectid,attr"`
Orientation string `xml:"orientation,attr"`
RenderOrder string `xml:"renderorder,attr"`
TiledVersion string `xml:"tiledversion,attr"`
TileHeight int `xml:"tileheight,attr"`
TileWidth int `xml:"tilewidth,attr"`
Version float32 `xml:"version,attr"`
Width int `xml:"width,attr"`
EditorSettings *EditorSetting `xml:"editorsettings"`
TileSets []*MapTileSet `xml:"tileset"`
Layers []*Layer `xml:"layer"`
}
// EditorSetting represents settings in a Tiled map
type EditorSetting struct {
Export *Export `xml:"export"`
}
// Export represents export settings in a Tiled map
type Export struct {
Format string `xml:"format,attr"`
Target string `xml:"target,attr"`
}
// MapTileSet represents the used tile sets in a Tiled map
type MapTileSet struct {
FirstGID int `xml:"firstgid,attr"`
Source string `xml:"source,attr"`
}
// Layer represents a single Tiled map layer
type Layer struct {
Data *Data `xml:"data"`
Height int `xml:"height,attr"`
ID int `xml:"id,attr"`
Name string `xml:"name,attr"`
Width int `xml:"width,attr"`
}
// Data represents a layers encoded data
type Data struct {
Encoding string `xml:"encoding,attr"`
Value string `xml:",chardata"`
}
// loadTileMap loads a Tiled tmx file.
func loadTileMap(tmxPath string) (*TileMap, error) {
f, err := os.Open(tmxPath)
if err != nil {
return nil, fmt.Errorf("error opening tmx: %v", err)
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return nil, fmt.Errorf("error reading tmx: %v", err)
}
var m *TileMap
err = xml.Unmarshal(b, &m)
if err != nil {
return nil, fmt.Errorf("error unmarshalling xml: %v", err)
}
return m, nil
}
|
package miner
import (
"testing"
"github.com/filecoin-project/go-state-types/abi"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDeadlineAssignment(t *testing.T) {
const partitionSize = 4
type deadline struct {
liveSectors, deadSectors uint64
expectSectors []uint64
}
type testCase struct {
sectors uint64
deadlines [WPoStPeriodDeadlines]*deadline
}
testCases := []testCase{{
// Even assignment and striping.
sectors: 10,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
expectSectors: []uint64{
0, 1, 2, 3,
8, 9,
},
},
1: {
expectSectors: []uint64{
4, 5, 6, 7,
},
},
},
}, {
// Fill non-full first
sectors: 5,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
expectSectors: []uint64{3, 4},
},
1: {}, // expect nothing.
3: {
liveSectors: 1,
expectSectors: []uint64{0, 1, 2},
},
},
}, {
// Assign to deadline with least number of live partitions.
sectors: 1,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
// 2 live partitions. +1 would add another.
liveSectors: 8,
},
1: {
// 2 live partitions. +1 wouldn't add another.
// 1 dead partition.
liveSectors: 7,
deadSectors: 5,
expectSectors: []uint64{0},
},
},
}, {
// Avoid increasing max partitions. Both deadlines have the same
// number of partitions post-compaction, but deadline 1 has
// fewer pre-compaction.
sectors: 1,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
// one live, one dead.
liveSectors: 4,
deadSectors: 4,
},
1: {
// 1 live partitions. +1 would add another.
liveSectors: 4,
expectSectors: []uint64{0},
},
},
}, {
// With multiple open partitions, assign to most full first.
sectors: 1,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
liveSectors: 1,
},
1: {
liveSectors: 2,
expectSectors: []uint64{0},
},
},
}, {
// dead sectors also count
sectors: 1,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
liveSectors: 1,
},
1: {
deadSectors: 2,
expectSectors: []uint64{0},
},
},
}, {
// dead sectors really do count.
sectors: 1,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
deadSectors: 1,
},
1: {
deadSectors: 2,
expectSectors: []uint64{0},
},
},
}, {
// when partitions are equally full, assign based on live sectors.
sectors: 1,
deadlines: [WPoStPeriodDeadlines]*deadline{
0: {
liveSectors: 1,
deadSectors: 1,
},
1: {
deadSectors: 2,
expectSectors: []uint64{0},
},
},
}}
for _, tc := range testCases {
var deadlines [WPoStPeriodDeadlines]*Deadline
for i := range deadlines {
dl := tc.deadlines[i]
if dl == nil {
// blackout
continue
}
deadlines[i] = &Deadline{
LiveSectors: dl.liveSectors,
TotalSectors: dl.liveSectors + dl.deadSectors,
}
}
sectors := make([]*SectorOnChainInfo, tc.sectors)
for i := range sectors {
sectors[i] = &SectorOnChainInfo{SectorNumber: abi.SectorNumber(i)}
}
assignment := assignDeadlines(partitionSize, &deadlines, sectors)
for i, sectors := range assignment {
dl := tc.deadlines[i]
// blackout?
if dl == nil {
assert.Empty(t, sectors, "expected no sectors to have been assigned to blacked out deadline")
continue
}
require.Equal(t, len(dl.expectSectors), len(sectors), "for deadline %d", i)
for i, expectedSectorNo := range dl.expectSectors {
assert.Equal(t, uint64(sectors[i].SectorNumber), expectedSectorNo)
}
}
}
}
|
package routers_test
import (
"bytes"
"github.com/haithanh079/go-leaderboard/routers"
"github.com/stretchr/testify/assert"
"net/http"
"net/http/httptest"
"testing"
)
func TestLeaderboardRouters(t *testing.T) {
router := routers.Router{}
router.Init(true)
w := httptest.NewRecorder()
/*
Ping to Get Leaderboard
*/
req, err := http.NewRequest("GET", "/leaderboard/get", nil)
router.ServeHTTP(w, req)
if err != nil {
t.Log(err)
}else {
t.Log("Test Get leaderboard success!")
}
assert.Equal(t, err, nil, "GET LEADERBOARD OK")
/*
Ping to Add user -> Leaderboard
*/
var jsonBodyRequest = []byte(`{"username":"haithanh","score":"1",}`)
req, err = http.NewRequest("POST", "/leaderboard/add", bytes.NewBuffer(jsonBodyRequest))
router.ServeHTTP(w, req)
if err != nil {
t.Log(err)
}else {
t.Log("Test Add user success!")
}
assert.Equal(t, err, nil, "ADD USER -> LEADERBOARD OK")
/*
Add an empty request
*/
jsonBodyRequest = []byte(`{"username":"","score":"",}`)
req, err = http.NewRequest("POST", "/leaderboard/add", nil)
router.ServeHTTP(w, req)
if err != nil{
t.Log(err)
}else {
t.Log("Test Add user with empty body success!")
}
assert.Equal(t, err, nil, "ADD EMPTY REQUEST BODY OK")
}
|
package netipv4
import (
"encoding/binary"
"errors"
"net"
)
// Get IPv4 list from IPNET
func GetIPv4AddressesFromNet(netAddr *net.IPNet) (out []net.IP, err error) {
var ipv4 net.IP
if ipv4 = netAddr.IP.To4(); ipv4 == nil {
err = errors.New("It's not ip version 4 address")
return nil, err
}
num := binary.BigEndian.Uint32([]byte(ipv4))
mask := binary.BigEndian.Uint32([]byte(netAddr.Mask))
num &= mask
for mask < 0xffffffff {
var buf [4]byte
binary.BigEndian.PutUint32(buf[:], num)
out = append(out, net.IP(buf[:]))
mask++
num++
}
return
}
// Get IPv4 from network interface
func GetIpv4FromIface(iface *net.Interface) (ipv4Address net.IP) {
cidrList, err := iface.Addrs()
if nil != err {
panic("Don't found ip addresses")
}
for _, cidr := range cidrList {
if ipnet, ok := cidr.(*net.IPNet); ok {
if ipv4 := ipnet.IP.To4(); ipv4 != nil {
ipv4Address = ipv4
break
}
}
}
return
}
// Get IPNet from network interface
func GetNetworkAddressFromIface(iface *net.Interface) (netAddress *net.IPNet) {
cidrList, err := iface.Addrs()
if nil != err {
panic("Don't found ip addresses")
}
for _, cidr := range cidrList {
if ipnet, ok := cidr.(*net.IPNet); ok {
if ipv4 := ipnet.IP.To4(); ipv4 != nil {
netAddress = ipnet
break
}
}
}
return
}
|
package game_map
import (
"github.com/faiface/pixel"
"github.com/faiface/pixel/pixelgl"
"github.com/steelx/go-rpg-cgm/gui"
)
type ArenaCompleteState struct {
Stack *gui.StateStack
prevState gui.StackInterface
captions gui.SimpleCaptionsScreen
}
func ArenaCompleteStateCreate(stack *gui.StateStack, prevState gui.StackInterface) *ArenaCompleteState {
captions := []gui.CaptionStyle{
{"YOU WON!", 3},
{"Champion of the Arena", 1},
}
return &ArenaCompleteState{
Stack: stack,
prevState: prevState,
captions: gui.SimpleCaptionsScreenCreate(captions, pixel.V(0, 0)),
}
}
func (s *ArenaCompleteState) Enter() {
}
func (s *ArenaCompleteState) Exit() {
}
func (s *ArenaCompleteState) Update(dt float64) bool {
return false
}
func (s *ArenaCompleteState) Render(win *pixelgl.Window) {
s.captions.Render(win)
}
func (s *ArenaCompleteState) HandleInput(win *pixelgl.Window) {
if win.JustPressed(pixelgl.KeySpace) || win.JustPressed(pixelgl.KeyEscape) {
s.Stack.Pop()
s.Stack.Push(s.prevState)
}
}
|
package flyingbehavior
type FlyWithWings struct {}
func (FlyWithWings) Fly() string {
return "I am flying with wings"
}
|
package _283_Move_Zeroes
func moveZeroes(nums []int) {
if len(nums) == 0 || len(nums) == 1 {
return
}
for i := 0; i < len(nums); i++ {
if nums[i] != 0 {
continue
}
// 如果发现0了,向后找第一个不是0的交换,
j := i + 1
for j <= len(nums)-1 && nums[j] == 0 {
j++
}
if j >= len(nums) { // 后面全是0
return
}
// 找到不是0的,交换
nums[i], nums[j] = nums[j], nums[i]
}
}
|
package main
import (
"sync"
)
type deal struct {
table *table
communityCards []*card
}
func newDeal(table *table) *deal {
newDeal := deal{
table: table,
communityCards: make([]*card, 0),
}
return &newDeal
}
// dealFlops deals 2 cards for each player.
func (r *deal) dealFlops() {
for _, player := range r.table.players {
player.flop[0] = r.table.deck.drawLastCard()
player.flop[1] = r.table.deck.drawLastCard()
r.table.handCounter++
}
}
func (r *deal) flop() {
for i := 0; i < 3; i++ {
r.communityCards = append(r.communityCards, r.table.deck.drawLastCard())
}
}
func (r *deal) turn() {
r.communityCards = append(r.communityCards, r.table.deck.drawLastCard())
}
func (r *deal) river() {
r.communityCards = append(r.communityCards, r.table.deck.drawLastCard())
}
// findHands looks for hands in all combinations of
// players hands and all community cards.
func (r *deal) findHands(counter map[string]int) {
numberOfCommunityCards := len(r.communityCards)
var combinationsOf3Cards [][]*card
if numberOfCommunityCards >= 4 {
combinationsOf3Cards = get3CardCombination(3, r.communityCards)
} else if numberOfCommunityCards == 3 {
combinationsOf3Cards = make([][]*card, 1)
combinationsOf3Cards[0] = r.communityCards
} else {
combinationsOf3Cards = nil
}
var combinationsOf4Cards [][]*card
if numberOfCommunityCards == 5 {
combinationsOf4Cards = get3CardCombination(4, r.communityCards)
} else if numberOfCommunityCards == 4 {
combinationsOf4Cards = make([][]*card, 1)
combinationsOf4Cards[0] = r.communityCards
} else {
combinationsOf4Cards = nil
}
var wg sync.WaitGroup
var lock = sync.RWMutex{}
for _, player := range r.table.players {
wg.Add(1)
go func(wg *sync.WaitGroup) {
bestHandName := player.findHands(combinationsOf3Cards, combinationsOf4Cards)
lock.Lock()
counter[bestHandName]++
lock.Unlock()
wg.Done()
}(&wg)
}
wg.Wait()
}
|
package main
import (
"fmt"
"math/rand"
)
func main() {
doneChan := make(chan bool)
stream := getProducer(doneChan)
for i := 0; i < 5; i++ {
fmt.Printf("%d. Value %d\n", i+1, <-stream)
}
close(doneChan)
fmt.Println("Done!")
}
func getProducer(done <-chan bool) <-chan int {
stream := make(chan int)
go func() {
defer fmt.Println("Producer stopped!")
defer close(stream)
for {
select {
case stream <- rand.Int():
case <-done:
return
}
}
}()
return stream
}
|
// CereVoice Cloud API Library for Go
// https://www.cereproc.com/files/CereVoiceCloudGuide.pdf
// This is a pre-release version and is subject to change
// Copyright 2018 Bryan Anderson (https://www.bganderson.com)
// Relesed under a BSD-style license which can be found in the LICENSE file
package cerevoicego
import (
"bytes"
"encoding/xml"
"io/ioutil"
"net/http"
)
const (
// VERSION is the global package version
VERSION = "0.3.0"
// DefaultRESTAPIURL is the default CereVoice Cloud REST API endpoint
DefaultRESTAPIURL = "https://cerevoice.com/rest/rest_1_1.php"
)
// Client API connection settings
type Client struct {
AccountID string // CereVoice Cloud API AccountID
Password string // CereVoice Cloud API Password
CereVoiceAPIURL string // CereVoice Cloud API URL
}
// Request to CereVoice Cloud API
type Request struct {
XMLName xml.Name
AccountID string `xml:"accountID"`
Password string `xml:"password"`
Voice string `xml:"voice,omitempty"`
Text string `xml:"text,omitempty"`
AudioFormat string `xml:"audioFormat,omitempty"`
SampleRate string `xml:"sampleRate,omitempty"`
Audio3D bool `xml:"audio3D,omitempty"`
Metadata bool `xml:"metadata,omitempty"`
LexiconFile string `xml:"lexiconFile,omitempty"`
AbbreviationFile string `xml:"abbreviationFile,omitempty"`
Language string `xml:"language,omitempty"`
Accent string `xml:"accent,omitempty"`
}
// Response from CereVoice Cloud API
type Response struct {
Raw []byte
Error error
}
// SpeakSimpleInput contains speakSimple parameters
type SpeakSimpleInput struct {
Voice string
Text string
}
// SpeakExtendedInput contains speakExtended parameters
type SpeakExtendedInput struct {
Voice string
Text string
AudioFormat string
SampleRate string
Audio3D bool
Metadata bool
}
// UploadLexiconInput contains uploadLexicon paramters
type UploadLexiconInput struct {
LexiconFile string
Language string
Accent string
}
// UploadAbbreviationsInput contains uploadAbbreviations parameters
type UploadAbbreviationsInput struct {
AbbreviationFile string
Language string
}
// SpeakSimpleResponse contains response from speakSimple
type SpeakSimpleResponse struct {
FileURL string `xml:"fileUrl"`
CharCount string `xml:"charCount"`
ResultCode string `xml:"resultCode"`
ResultDescription string `xml:"resultDescription"`
Error error
}
// SpeakExtendedResponse contains response from speakExtended
type SpeakExtendedResponse struct {
FileURL string `xml:"fileUrl"`
CharCount string `xml:"charCount"`
ResultCode string `xml:"resultCode"`
ResultDescription string `xml:"resultDescription"`
Metadata string `xml:"metadataUrl"`
Error error
}
// ListVoicesResponse contains response from listVoices
type ListVoicesResponse struct {
VoiceList []Voice `xml:"voicesList>voice"`
Error error
}
// UploadLexiconResponse contains response from uploadLexicon
type UploadLexiconResponse struct {
ResultCode int `xml:"resultCode"`
ResultDescription string `xml:"resultDescription"`
Error error
}
// ListLexiconsResponse contains response from listLexicons
type ListLexiconsResponse struct {
LexiconList []Lexicon `xml:"lexiconList>lexiconFile"`
Error error
}
// UploadAbbreviationsResponse contains response from uploadAbbreviations
type UploadAbbreviationsResponse struct {
ResultCode int `xml:"resultCode"`
ResultDescription string `xml:"resultDescription"`
Error error
}
// ListAbbreviationsResponse contains response from listAbbreviations
type ListAbbreviationsResponse struct {
AbbreviationList []Abbreviation `xml:"abbreviationList>abbreviationFile"`
Error error
}
// ListAudioFormatsResponse contains response from listAudioFormats
type ListAudioFormatsResponse struct {
AudioFormats []string `xml:"formatList>format"`
Error error
}
// GetCreditResponse contains response from getCredit
type GetCreditResponse struct {
Credit Credit `xml:"credit"`
Error error
}
// Voice contains details about a voice
type Voice struct {
SampleRate string `xml:"sampleRate"`
VoiceName string `xml:"voiceName"`
LanguageCodeISO string `xml:"languageCodeISO"`
CountryCodeISO string `xml:"countryCodeISO"`
AccentCode string `xml:"accentCode"`
Sex string `xml:"sex"`
LanguageCodeMicrosoft string `xml:"languageCodeMicrosoft"`
Country string `xml:"country"`
Region string `xml:"region"`
Accent string `xml:"accent"`
}
// Lexicon contains details about a lexicon
type Lexicon struct {
URL string `xml:"url"`
Language string `xml:"language"`
Accent string `xml:"accent"`
LastModified string `xml:"lastModified"`
Size string `xml:"size"`
}
// Abbreviation contains details about an abbreviation
type Abbreviation struct {
URL string `xml:"url"`
Language string `xml:"language"`
LastModified string `xml:"lastModified"`
Size string `xml:"size"`
}
// Credit contains details about CereVoice Cloud credits
type Credit struct {
FreeCredit string `xml:"freeCredit"`
PaidCredit string `xml:"paidCredit"`
CharsAvailable string `xml:"charsAvailable"`
}
// SpeakSimple synthesises input text with the selected voice
func (c *Client) SpeakSimple(input *SpeakSimpleInput) (r *SpeakSimpleResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "speakSimple"},
AccountID: c.AccountID,
Password: c.Password,
Voice: input.Voice,
Text: input.Text,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// SpeakExtended allows for more control over the audio output
func (c *Client) SpeakExtended(input *SpeakExtendedInput) (r *SpeakExtendedResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "speakExtended"},
AccountID: c.AccountID,
Password: c.Password,
Voice: input.Voice,
Text: input.Text,
AudioFormat: input.AudioFormat,
SampleRate: input.SampleRate,
Audio3D: input.Audio3D,
Metadata: input.Metadata,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// ListVoices outputs information about the available voices
func (c *Client) ListVoices() (r *ListVoicesResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "listVoices"},
AccountID: c.AccountID,
Password: c.Password,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// UploadLexicon uploads and stores a custom lexicon file
func (c *Client) UploadLexicon(input *UploadLexiconInput) (r *UploadLexiconResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "uploadLexicon"},
AccountID: c.AccountID,
Password: c.Password,
LexiconFile: input.LexiconFile,
Language: input.Language,
Accent: input.Accent,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// ListLexicons lists custom lexicon file(s)
func (c *Client) ListLexicons() (r *ListLexiconsResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "listLexicons"},
AccountID: c.AccountID,
Password: c.Password,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// UploadAbbreviations uploads and stores a custom abbreviation file
func (c *Client) UploadAbbreviations(input *UploadAbbreviationsInput) (r *UploadAbbreviationsResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "uploadAbbreviations"},
AccountID: c.AccountID,
Password: c.Password,
LexiconFile: input.AbbreviationFile,
Language: input.Language,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// ListAbbreviations lists custom abbreviation file(s)
func (c *Client) ListAbbreviations() (r *ListAbbreviationsResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "listAbbreviations"},
AccountID: c.AccountID,
Password: c.Password,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
err := xml.Unmarshal(resp.Raw, &r)
if err != nil {
r.Error = err
}
return
}
// ListAudioFormats lists the available audio encoding formats
func (c *Client) ListAudioFormats() (r *ListAudioFormatsResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "listAudioFormats"},
AccountID: c.AccountID,
Password: c.Password,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// GetCredit retrieves the credit information for the given account
func (c *Client) GetCredit() (r *GetCreditResponse) {
resp := c.queryAPI(&Request{
XMLName: xml.Name{Local: "getCredit"},
AccountID: c.AccountID,
Password: c.Password,
})
if resp.Error != nil {
r.Error = resp.Error
return
}
if err := xml.Unmarshal(resp.Raw, &r); err != nil {
r.Error = err
}
return
}
// Query CereVoice Cloud API
func (c *Client) queryAPI(req *Request) (r *Response) {
output, err := xml.MarshalIndent(req, "", " ")
if err != nil {
r.Error = err
return
}
request := bytes.NewReader(append([]byte(xml.Header), output...))
resp, err := http.Post(c.CereVoiceAPIURL, "text/xml", request)
if err != nil {
r.Error = err
return
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
r.Error = err
return
}
return &Response{Raw: body}
}
|
package models
import (
"github.com/jinzhu/gorm"
"github.com/satori/go.uuid"
"time"
)
type BaseModel struct {
ID uuid.UUID `gorm:"type:uuid;primary_key" json:"id"`
// Date auto generation by gorm
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time
}
// Auto generate uuid on new record
func (BaseModel) BeforeCreate(scope *gorm.Scope) error {
return scope.SetColumn("ID", uuid.NewV4())
}
|
package api
import (
"net/http"
"time"
"github.com/direktiv/direktiv/pkg/util"
"github.com/direktiv/direktiv/pkg/version"
"github.com/gorilla/mux"
"go.uber.org/zap"
"google.golang.org/protobuf/types/known/emptypb"
)
var logger *zap.SugaredLogger
// Server struct for API server.
type Server struct {
logger *zap.SugaredLogger
router *mux.Router
srv *http.Server
config *util.Config
// handlers
flowHandler *flowHandler
telend func()
}
// GetRouter is a getter for s.router.
func (s *Server) GetRouter() *mux.Router {
return s.router
}
type mw struct {
h http.Handler
}
func (mw *mw) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger.Infof("request received: %v %v", r.Method, r.URL.String())
mw.h.ServeHTTP(w, r)
}
// NewServer return new API server.
func NewServer(l *zap.SugaredLogger) (*Server, error) {
logger = l
logger.Infof("starting api server")
r := mux.NewRouter().PathPrefix("/api").Subrouter()
s := &Server{
logger: l,
router: r,
srv: &http.Server{
Handler: r,
Addr: ":1644",
ReadHeaderTimeout: time.Second * 60,
},
}
// swagger:operation GET /api/version Other version
// ---
// description: |
// Returns version information for servers in the cluster.
// summary: Returns version information for servers in the cluster.
// responses:
// '200':
// "description": "version query was successful"
r.HandleFunc("/version", s.version).Name(RN_Version).Methods(http.MethodGet)
// read config
conf, err := util.ReadConfig("/etc/direktiv/flow-config.yaml")
if err != nil {
return nil, err
}
s.config = conf
r.Use(func(h http.Handler) http.Handler {
return &mw{h: h}
})
logger.Debug("Initializing telemetry.")
s.telend, err = util.InitTelemetry(s.config, "direktiv/api", "direktiv")
if err != nil {
return nil, err
}
r.Use(util.TelemetryMiddleware, s.logMiddleware)
s.flowHandler, err = newFlowHandler(logger, r, s.config)
if err != nil {
logger.Error("can not get flow handler: %v", err)
s.telend()
return nil, err
}
logger.Debug("adding options routes")
s.prepareHelperRoutes()
return s, nil
}
func (s *Server) version(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
m := make(map[string]string)
m["api"] = version.Version
flowResp, _ := s.flowHandler.client.Build(ctx, &emptypb.Empty{})
if flowResp != nil {
m["flow"] = flowResp.GetBuild()
}
funcsResp, _ := s.flowHandler.functionsClient.Build(ctx, &emptypb.Empty{})
if flowResp != nil {
m["functions"] = funcsResp.GetBuild()
}
respondJSON(w, m, nil)
}
// Start starts API server.
func (s *Server) Start() error {
defer s.telend()
logger.Infof("start listening")
return s.srv.ListenAndServe()
}
func (s *Server) prepareHelperRoutes() {
// Options ..
s.router.HandleFunc("/{path:.*}", func(w http.ResponseWriter, r *http.Request) {
// https://github.com/cloudevents/spec/blob/v1.0/http-webhook.md#4-abuse-protection
w.Header().Add("WebHook-Allowed-Rate", "120")
w.Header().Add("Webhook-Allowed-Origin", "eventgrid.azure.net")
w.Header().Add("Cache-Control", "no-cache, no-store, must-revalidate")
w.Header().Add("Pragma", "no-cache")
w.Header().Add("Expires", "0")
w.WriteHeader(http.StatusOK)
}).Methods(http.MethodOptions).Name(RN_Preflight)
}
|
package proxy
import (
"net"
"strconv"
"strings"
"golang.org/x/net/dns/dnsmessage"
"github.com/nextdns/nextdns/resolver"
)
func replyNXDomain(q resolver.Query, buf []byte) (n int, i resolver.ResolveInfo, err error) {
var p dnsmessage.Parser
h, err := p.Start(q.Payload)
if err != nil {
return 0, i, err
}
q1, err := p.Question()
if err != nil {
return 0, i, err
}
h.Response = true
h.RCode = dnsmessage.RCodeNameError
b := dnsmessage.NewBuilder(buf[:0], h)
_ = b.StartQuestions()
_ = b.Question(q1)
buf, err = b.Finish()
return len(buf), i, err
}
func isPrivateReverse(qname string) bool {
if ip := ptrIP(qname); ip != nil {
if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
return true
}
if ip := ip.To4(); ip != nil {
return (ip[0] == 10) ||
(ip[0] == 172 && ip[1]&0xf0 == 16) ||
(ip[0] == 192 && ip[1] == 168)
}
return ip[0] == 0xfd
}
return false
}
func ptrIP(ptr string) net.IP {
if !strings.HasSuffix(ptr, ".arpa.") {
return nil
}
ptr = ptr[:len(ptr)-6]
var l int
var base int
if strings.HasSuffix(ptr, ".in-addr") {
ptr = ptr[:len(ptr)-8]
l = net.IPv4len
base = 10
} else if strings.HasSuffix(ptr, ".ip6") {
ptr = ptr[:len(ptr)-4]
l = net.IPv6len
base = 16
}
if l == 0 {
return nil
}
ip := make(net.IP, l)
if base == 16 {
l *= 2
}
for i := 0; i < l && ptr != ""; i++ {
idx := strings.LastIndexByte(ptr, '.')
off := idx + 1
if idx == -1 {
idx = 0
off = 0
} else if idx == len(ptr)-1 {
return nil
}
n, err := strconv.ParseUint(ptr[off:], base, 8)
if err != nil {
return nil
}
b := byte(n)
ii := i
if base == 16 {
// ip6 use hex nibbles instead of base 10 bytes, so we need to join
// nibbles by two.
ii /= 2
if i&1 == 1 {
b |= ip[ii] << 4
}
}
ip[ii] = b
ptr = ptr[:idx]
}
return ip
}
func addrIP(addr net.Addr) (ip net.IP) {
// Avoid parsing/alloc when it's an IP already.
switch addr := addr.(type) {
case *net.IPAddr:
ip = addr.IP
case *net.UDPAddr:
ip = addr.IP
case *net.TCPAddr:
ip = addr.IP
default:
host, _, _ := net.SplitHostPort(addr.String())
ip = net.ParseIP(host)
}
return
}
|
package exchange
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewCSVFeed(t *testing.T) {
feed, err := NewCSVFeed("1d", PairFeed{
Timeframe: "1d",
Pair: "BTCUSDT",
File: "../../testdata/btc-1d.csv",
})
candle := feed.CandlePairTimeFrame["BTCUSDT--1d"][0]
require.NoError(t, err)
require.Len(t, feed.CandlePairTimeFrame["BTCUSDT--1d"], 14)
require.Equal(t, "2021-04-26 00:00:00", candle.Time.UTC().Format("2006-01-02 15:04:05"))
require.Equal(t, 49066.76, candle.Open)
require.Equal(t, 54001.39, candle.Close)
require.Equal(t, 48753.44, candle.Low)
require.Equal(t, 54356.62, candle.High)
require.Equal(t, 86310.8, candle.Volume)
}
func TestCSVFeed_CandlesByLimit(t *testing.T) {
feed, err := NewCSVFeed("1d", PairFeed{
Timeframe: "1d",
Pair: "BTCUSDT",
File: "../../testdata/btc-1d.csv",
})
require.NoError(t, err)
candles, err := feed.CandlesByLimit(context.Background(), "BTCUSDT", "1d", 1)
require.Nil(t, err)
require.Len(t, candles, 1)
require.Equal(t, "2021-04-26 00:00:00", candles[0].Time.UTC().Format("2006-01-02 15:04:05"))
// should remove the candle from array
require.Len(t, feed.CandlePairTimeFrame["BTCUSDT--1d"], 13)
candle := feed.CandlePairTimeFrame["BTCUSDT--1d"][0]
require.Equal(t, "2021-04-27 00:00:00", candle.Time.UTC().Format("2006-01-02 15:04:05"))
}
func TestCSVFeed_resample(t *testing.T) {
feed, err := NewCSVFeed(
"1d",
PairFeed{
Timeframe: "1h",
Pair: "BTCUSDT",
File: "../../testdata/btc-1h-2021-05-13.csv",
})
require.NoError(t, err)
require.Len(t, feed.CandlePairTimeFrame["BTCUSDT--1d"], 24)
require.Len(t, feed.CandlePairTimeFrame["BTCUSDT--1h"], 24)
for _, candle := range feed.CandlePairTimeFrame["BTCUSDT--1d"][:23] {
require.False(t, candle.Complete)
}
last := feed.CandlePairTimeFrame["BTCUSDT--1d"][23]
require.Equal(t, int64(1620946800), last.Time.UTC().Unix()) // 23h
assert.Equal(t, 49537.15, last.Open)
assert.Equal(t, 49670.97, last.Close)
assert.Equal(t, 46000.00, last.Low)
assert.Equal(t, 51367.19, last.High)
assert.Equal(t, 147332.0, last.Volume)
assert.True(t, last.Complete)
// load feed with 180 days witch candles of 1h
feed, err = NewCSVFeed(
"1d",
PairFeed{
Timeframe: "1h",
Pair: "BTCUSDT",
File: "../../testdata/btc-1h.csv",
})
require.NoError(t, err)
totalComplete := 0
for _, candle := range feed.CandlePairTimeFrame["BTCUSDT--1d"] {
if candle.Time.Hour() == 23 {
require.True(t, true)
}
if candle.Complete {
totalComplete++
}
}
require.Equal(t, 180, totalComplete)
}
func TestIsLastCandlePeriod(t *testing.T) {
t.Run("valid", func(t *testing.T) {
tt := []struct {
sourceTimeFrame string
targetTimeFrame string
time time.Time
last bool
}{
{"1s", "1m", time.Date(2021, 1, 1, 23, 59, 59, 0, time.UTC), true},
{"1h", "1h", time.Date(2021, 1, 1, 23, 59, 0, 0, time.UTC), true},
{"1m", "1d", time.Date(2021, 1, 1, 23, 59, 0, 0, time.UTC), true},
{"1m", "1d", time.Date(2021, 1, 1, 23, 58, 0, 0, time.UTC), false},
{"1h", "1d", time.Date(2021, 1, 1, 23, 00, 0, 0, time.UTC), true},
{"1h", "1d", time.Date(2021, 1, 1, 22, 00, 0, 0, time.UTC), false},
{"1m", "5m", time.Date(2021, 1, 1, 0, 4, 0, 0, time.UTC), true},
{"1m", "5m", time.Date(2021, 1, 1, 0, 1, 0, 0, time.UTC), false},
{"1m", "10m", time.Date(2021, 1, 1, 0, 9, 0, 0, time.UTC), true},
{"1m", "15m", time.Date(2021, 1, 1, 0, 14, 0, 0, time.UTC), true},
{"1m", "15m", time.Date(2021, 1, 1, 0, 13, 0, 0, time.UTC), false},
{"1h", "1w", time.Date(2021, 1, 2, 23, 00, 0, 0, time.UTC), true},
{"1m", "30m", time.Date(2021, 1, 2, 0, 29, 0, 0, time.UTC), true},
{"1m", "1h", time.Date(2021, 1, 2, 0, 59, 0, 0, time.UTC), true},
{"1m", "2h", time.Date(2021, 1, 2, 1, 59, 0, 0, time.UTC), true},
{"1m", "4h", time.Date(2021, 1, 2, 3, 59, 0, 0, time.UTC), true},
{"1m", "12h", time.Date(2021, 1, 2, 23, 59, 0, 0, time.UTC), true},
}
for _, tc := range tt {
t.Run(fmt.Sprintf("%s to %s", tc.sourceTimeFrame, tc.targetTimeFrame), func(t *testing.T) {
last, err := isLastCandlePeriod(tc.time, tc.sourceTimeFrame, tc.targetTimeFrame)
require.NoError(t, err)
require.Equal(t, tc.last, last)
})
}
})
t.Run("invalid source", func(t *testing.T) {
last, err := isLastCandlePeriod(time.Now(), "invalid", "1h")
require.Error(t, err)
require.False(t, last)
})
t.Run("not supported interval", func(t *testing.T) {
last, err := isLastCandlePeriod(time.Now(), "1d", "1y")
require.EqualError(t, err, "invalid timeframe: 1y")
require.False(t, last)
})
}
|
package main
import (
"encoding/json"
"github.com/gin-gonic/gin"
"github.com/stretchr/testify/assert"
"log"
"net/http"
"net/http/httptest"
"testing"
)
const message = "message"
var drinkList = [3]string{"beer", "wine", "coke"}
func TestPingPong(t *testing.T) {
// Build our expected body
body := gin.H{
message: "pong",
}
router := InitRoutes()
w := performRequest(router, "GET", "/ping")
assert.Equal(t, http.StatusOK, w.Code)
// Convert the JSON response to a map
var response map[string]string
err := json.Unmarshal([]byte(w.Body.String()), &response)
// Grab the value & whether or not it exists
value, exists := response[message]
// Make some assertions on the correctness of the response.
assert.Nil(t, err)
assert.True(t, exists)
assert.Equal(t, body[message], value)
}
func TestRouteShowAllHighScore(t *testing.T) {
createMockDatabase()
key := GLOBALNAME + ":" + GLOBALLIST
for _, drink := range drinkList {
// set key that will be expected
setDrinksInRedis(key, drink)
}
router := InitRoutes()
w := performRequest(router, "GET", prefixURL)
assert.Equal(t, http.StatusOK, w.Code)
// Convert the JSON response to a map
var response []Score
err := json.Unmarshal([]byte(w.Body.String()), &response)
if err != nil {
log.Fatal(err)
}
// Make some assertions on the correctness of the response.
for _, resp := range response {
assert.Contains(t, drinkList, resp.Drink)
}
}
func TestRouteShowHighScoreNonExistent(t *testing.T) {
createMockDatabase()
router := InitRoutes()
w := performRequest(router, "GET", marvinURL)
assert.Equal(t, http.StatusNotFound, w.Code)
}
func TestRouteShowHighScore(t *testing.T) {
createMockDatabase()
for _, drink := range drinkList {
// set key that will be expected
setDrinksInRedis(userKey, drink)
}
router := InitRoutes()
w := performRequest(router, "GET", marvinURL)
assert.Equal(t, http.StatusOK, w.Code)
// Convert the JSON response to a map
var response []Score
err := json.Unmarshal([]byte(w.Body.String()), &response)
if err != nil {
log.Fatal(err)
}
// Make some assertions on the correctness of the response.
for _, resp := range response {
assert.Contains(t, drinkList, resp.Drink)
}
}
func performRequest(r http.Handler, method, path string) *httptest.ResponseRecorder {
req, _ := http.NewRequest(method, path, nil)
w := httptest.NewRecorder()
r.ServeHTTP(w, req)
return w
}
|
package bslib
import (
"fmt"
"time"
)
type SettingInfo struct {
DatabaseId string `json:"database_id"`
}
const sqlCreateTableSettings = `
CREATE TABLE IF NOT EXISTS settings (
database_id CHAR PRIMARY KEY NOT NULL,
keyword CHAR NOT NULL,
crypt_id CHAR NOT NULL,
database_version INT NOT NULL,
update_timestamp DATETIME NOT NULL,
sync_timestamp DATETIME NOT NULL
)
`
const sqlInsertInitialSettings = `
INSERT INTO settings (database_id,keyword,crypt_id,database_version,update_timestamp,sync_timestamp)
VALUES ('%s','%s','%s','%d','%s','%s')
`
func (sdb *storageDB) initSettings(dbID, keyWord, cryptID string) error {
if sdb.sTX == nil {
return formError(BSERR00003DbTransactionFailed, "initSettings")
}
lastUpdate := prepareTimeForDb(time.Now())
sqlQuery := fmt.Sprintf(sqlInsertInitialSettings, dbID, keyWord, cryptID,
defaultDbVersion, lastUpdate, constZeroTime)
_, err := sdb.sTX.Exec(sqlQuery)
if err != nil {
return formError(BSERR00006DbInsertFailed, err.Error(), "initSettings")
}
return nil
}
|
package lintcode
/**
* @param nums: A list of integers
* @return: A integer indicate the sum of max subarray
*/
func maxSubArray(nums []int) int {
maxNum := nums[0]
result := nums[0]
for i := 1; i < len(nums); i++ {
// dp[i+1]=max(dp[i],dp[i]+dp[i+1])
maxNum = max(maxNum+nums[i], nums[i])
if maxNum > result {
result = maxNum
}
}
return result
}
func max(a int, b int) int {
if b > a {
return b
}
return a
}
|
package rtmp
import (
"fmt"
"net/url"
"reflect"
"strings"
"time"
"github.com/ubinte/livego/av"
"github.com/ubinte/livego/protocol/rtmp/core"
"github.com/ubinte/livego/utils/uid"
log "github.com/sirupsen/logrus"
)
type VirWriter struct {
Uid string
closed bool
av.RWBaser
conn StreamReadWriteCloser
packetQueue chan *av.Packet
WriteBWInfo StaticsBW
}
func NewVirWriter(conn StreamReadWriteCloser) *VirWriter {
ret := &VirWriter{
Uid: uid.NewId(),
conn: conn,
RWBaser: av.NewRWBaser(time.Second * time.Duration(WriteTimeout)),
packetQueue: make(chan *av.Packet, maxQueueNum),
WriteBWInfo: StaticsBW{0, 0, 0, 0, 0, 0, 0, 0},
}
go ret.Check()
go func() {
err := ret.SendPacket()
if err != nil {
log.Warning(err)
}
}()
return ret
}
func (v *VirWriter) SaveStatics(streamid uint32, length uint64, isVideoFlag bool) {
nowInMS := int64(time.Now().UnixNano() / 1e6)
v.WriteBWInfo.StreamId = streamid
if isVideoFlag {
v.WriteBWInfo.VideoDatainBytes = v.WriteBWInfo.VideoDatainBytes + length
} else {
v.WriteBWInfo.AudioDatainBytes = v.WriteBWInfo.AudioDatainBytes + length
}
if v.WriteBWInfo.LastTimestamp == 0 {
v.WriteBWInfo.LastTimestamp = nowInMS
} else if (nowInMS - v.WriteBWInfo.LastTimestamp) >= SAVE_STATICS_INTERVAL {
diffTimestamp := (nowInMS - v.WriteBWInfo.LastTimestamp) / 1000
v.WriteBWInfo.VideoSpeedInBytesperMS = (v.WriteBWInfo.VideoDatainBytes - v.WriteBWInfo.LastVideoDatainBytes) * 8 / uint64(diffTimestamp) / 1000
v.WriteBWInfo.AudioSpeedInBytesperMS = (v.WriteBWInfo.AudioDatainBytes - v.WriteBWInfo.LastAudioDatainBytes) * 8 / uint64(diffTimestamp) / 1000
v.WriteBWInfo.LastVideoDatainBytes = v.WriteBWInfo.VideoDatainBytes
v.WriteBWInfo.LastAudioDatainBytes = v.WriteBWInfo.AudioDatainBytes
v.WriteBWInfo.LastTimestamp = nowInMS
}
}
func (v *VirWriter) Check() {
var c core.ChunkStream
for {
if err := v.conn.Read(&c); err != nil {
v.Close(err)
return
}
}
}
func (v *VirWriter) DropPacket(pktQue chan *av.Packet, info av.Info) {
log.Warningf("[%v] packet queue max!!!", info)
for i := 0; i < maxQueueNum-84; i++ {
tmpPkt, ok := <-pktQue
// try to don't drop audio
if ok && tmpPkt.IsAudio {
if len(pktQue) > maxQueueNum-2 {
log.Debug("drop audio pkt")
<-pktQue
} else {
pktQue <- tmpPkt
}
}
if ok && tmpPkt.IsVideo {
videoPkt, ok := tmpPkt.Header.(av.VideoPacketHeader)
// dont't drop sps config and dont't drop key frame
if ok && (videoPkt.IsSeq() || videoPkt.IsKeyFrame()) {
pktQue <- tmpPkt
}
if len(pktQue) > maxQueueNum-10 {
log.Debug("drop video pkt")
<-pktQue
}
}
}
log.Debug("packet queue len: ", len(pktQue))
}
func (v *VirWriter) Write(p *av.Packet) (err error) {
err = nil
if v.closed {
err = fmt.Errorf("VirWriter closed")
return
}
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("VirWriter has already been closed:%v", e)
}
}()
if len(v.packetQueue) >= maxQueueNum-24 {
v.DropPacket(v.packetQueue, v.Info())
} else {
v.packetQueue <- p
}
return
}
func (v *VirWriter) SendPacket() error {
Flush := reflect.ValueOf(v.conn).MethodByName("Flush")
var cs core.ChunkStream
for {
p, ok := <-v.packetQueue
if ok {
cs.Data = p.Data
cs.Length = uint32(len(p.Data))
cs.StreamID = p.StreamID
cs.Timestamp = p.TimeStamp
cs.Timestamp += v.BaseTimeStamp()
if p.IsVideo {
cs.TypeID = av.TAG_VIDEO
} else {
if p.IsMetadata {
cs.TypeID = av.TAG_SCRIPTDATAAMF0
} else {
cs.TypeID = av.TAG_AUDIO
}
}
v.SaveStatics(p.StreamID, uint64(cs.Length), p.IsVideo)
v.SetPreTime()
v.RecTimeStamp(cs.Timestamp, cs.TypeID)
err := v.conn.Write(cs)
if err != nil {
v.closed = true
return err
}
Flush.Call(nil)
} else {
return fmt.Errorf("closed")
}
}
return nil
}
func (v *VirWriter) Info() (ret av.Info) {
ret.UID = v.Uid
_, _, URL := v.conn.GetInfo()
ret.URL = URL
_url, err := url.Parse(URL)
if err != nil {
log.Warning(err)
}
ret.Key = strings.TrimLeft(_url.Path, "/")
ret.Inter = true
return
}
func (v *VirWriter) Close(err error) {
log.Warning("player ", v.Info(), "closed: "+err.Error())
if !v.closed {
close(v.packetQueue)
}
v.closed = true
v.conn.Close(err)
}
|
package middleware
import (
"log"
"net/http"
"github.com/Kaukov/gopher-translator/handlers"
"github.com/Kaukov/gopher-translator/utils"
)
var storedData utils.Storage = utils.Storage{
Words: make(map[string]string),
Sentences: make(map[string]string),
}
// NewTranslatorStorage - returns a storage middleware that caches each word
// and sentence that gets translated by the API
func NewTranslatorStorage(logger *log.Logger) func(http.Handler) http.Handler {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
next.ServeHTTP(rw, r)
switch next.(type) {
case *handlers.TranslatorWord:
tw, _ := next.(*handlers.TranslatorWord)
requestedWord := tw.GetRequestBodyWord()
if _, ok := storedData.Words[requestedWord]; !ok {
storedData.Words[requestedWord] = tw.GetResponseBodyWord()
}
case *handlers.TranslatorSentence:
ts, _ := next.(*handlers.TranslatorSentence)
requestedSentence := ts.GetRequestBodySentence()
if _, ok := storedData.Words[requestedSentence]; !ok {
storedData.Words[requestedSentence] = ts.GetResponseBodySentence()
}
}
})
}
}
// GetStoredData - returns the cached translated words and sentences
func GetStoredData() utils.Storage {
return storedData
}
|
package repositories
import (
"io/ioutil"
"net/http"
"net/url"
"os"
)
// NewsAPIRepository is a repository to dealing with NewsAPI.
type NewsAPIRepository struct {
Client *http.Client
}
// NewNewsAPIRepository returns an instance of NewsAPIRepository.
func NewNewsAPIRepository() *NewsAPIRepository {
client := &http.Client{}
return &NewsAPIRepository{Client: client}
}
// CallTopHeadlinesAPI calls the TopHeadlinesAPI provided to NewsAPI.
func (r *NewsAPIRepository) CallTopHeadlinesAPI(category string) ([]byte, error) {
baseURL := "https://newsapi.org/v2/top-headlines"
values := url.Values{}
values.Set("country", "us")
values.Set("category", category)
query := values.Encode()
req, err := http.NewRequest("GET", baseURL+"?"+query, nil)
if err != nil {
return nil, err
}
req.Header.Set("Authorization", "Bearer "+os.Getenv("NEWS_API_TOKEN"))
resp, err := r.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == 200 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return body, nil
}
// TODO Error Message
return nil, nil
}
|
package handlers
import (
"testing"
"github.com/decentraland/content-service/mocks"
"github.com/golang/mock/gomock"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
)
func TestValidateRequestSize(t *testing.T) {
mockController := gomock.NewController(t)
defer mockController.Finish()
for _, tc := range sizeTestTable {
mockStorage := mocks.NewMockStorage(mockController)
for k, v := range tc.sizes {
mockStorage.EXPECT().FileSize(k).Return(v, nil).AnyTimes()
}
l := log.New()
l.SetLevel(log.PanicLevel)
us := &UploadServiceImpl{Storage: mockStorage, ParcelSizeLimit: tc.parcelMaxSize, Log: l}
err := us.validateRequestSize(tc.r)
tc.errorsAssertion(t, err)
}
}
type sizeCase struct {
name string
parcelMaxSize int64
r *UploadRequest
sizes map[string]int64
errorsAssertion func(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool
}
var sizeTestTable = []sizeCase{
{
name: "Valid Size",
parcelMaxSize: 1000,
r: &UploadRequest{
Scene: &scene{Scene: sceneData{Parcels: []string{"0,0"}, Base: "0,0"}},
Manifest: &[]FileMetadata{{Cid: "content", Name: "content"}},
},
sizes: map[string]int64{"content": 1000},
errorsAssertion: assert.Nil,
}, {
name: "Valid Size - Multiple Elements",
parcelMaxSize: 800,
r: &UploadRequest{
Scene: &scene{
Scene: sceneData{Parcels: []string{"0,0"}, Base: "0,0"},
},
Manifest: &[]FileMetadata{{Cid: "content1", Name: "content1"}, {Cid: "content2", Name: "content2"}},
},
sizes: map[string]int64{"content1": 400, "content2": 400},
errorsAssertion: assert.Nil,
}, {
name: "Invalid Size - Multiple Elements",
parcelMaxSize: 800,
r: &UploadRequest{
Scene: &scene{
Scene: sceneData{Parcels: []string{"0,0"}, Base: "0,0"},
},
Manifest: &[]FileMetadata{{Cid: "content1", Name: "content1"}, {Cid: "content2", Name: "content2"}},
},
sizes: map[string]int64{"content1": 400, "content2": 410},
errorsAssertion: assert.NotNil,
}, {
name: "Valid Size - Multiple Elements and Parcels",
parcelMaxSize: 800,
r: &UploadRequest{
Scene: &scene{
Scene: sceneData{Parcels: []string{"0,0", "0,1"}, Base: "0,0"},
},
Manifest: &[]FileMetadata{{Cid: "content1", Name: "content1"}, {Cid: "content2", Name: "content2"}},
},
sizes: map[string]int64{"content1": 400, "content2": 410},
errorsAssertion: assert.Nil,
},
}
|
package main
import "fmt"
func main() {
a, b := "100", "10"
fmt.Println(addBinary(a, b))
}
func addBinary(a string, b string) string {
p, q := a, b
if len(a) < len(b) {
p, q = b, a
}
pLen, qLen := len(p), len(q)
res := ""
j := 0
carry := 0
for i := pLen - 1; i >= 0; i-- {
tmp := 0
if qLen >= pLen-i {
tmp = int(p[i]-'0'+q[qLen-pLen+i]-'0') + carry
} else {
tmp = int(p[i]-'0') + carry
}
res = string(tmp%2+'0') + res
carry = tmp / 2
j++
}
if carry == 1 {
res = "1" + res
}
return res
}
|
// Copyright 2019 GoAdmin Core Team. All rights reserved.
// Use of this source code is governed by a Apache-2.0 style
// license that can be found in the LICENSE file.
package language
import "strings"
var cn = LangSet{
"managers": "管理员管理",
"name": "用户名",
"nickname": "昵称",
"role": "角色",
"createdat": "创建时间",
"updatedat": "更新时间",
"path": "路径",
"submit": "提交",
"filter": "筛选",
"new": "新建",
"export": "导出",
"action": "操作",
"toggle dropdown": "下拉",
"delete": "删除",
"refresh": "刷新",
"back": "返回",
"reset": "重置",
"save": "保存",
"edit": "编辑",
"expand": "展开",
"collapse": "折叠",
"online": "在线",
"setting": "设置",
"sign out": "登出",
"home": "首页",
"all": "全部",
"more": "更多",
"browse": "打开",
"remove": "移除",
"permission manage": "权限管理",
"menus manage": "菜单管理",
"roles manage": "角色管理",
"operation log": "操作日志",
"method": "方法",
"input": "输入",
"operation": "操作",
"menu name": "菜单名",
"reload succeeded": "加载成功",
"search": "搜索",
"permission denied": "没有权限",
"error": "错误",
"success": "成功",
"fail": "失败",
"current page": "当前页",
"goadmin is now running. \nrunning in \"debug\" mode. switch to \"release\" mode in production.\n\n": "GoAdmin 启动成功。\n目前处于 \"debug\" 模式。请在生产环境中切换为 \"release\" 模式。\n\n",
"wrong goadmin version, theme %s required goadmin version are %s": "错误的 GoAdmin 版本,当前主题 %s 需要 GoAdmin 版本为 %s",
"wrong theme version, goadmin %s required version of theme %s is %s": "错误的主题版本, GoAdmin %s 需要主题 %s 的版本为 %s",
"adapter is nil, import the default adapter or use addadapter method add the adapter": "适配器为空,请先 import 对应的适配器或使用 AddAdapter 方法引入",
"are you sure to delete": "你确定要删除吗?",
"yes": "确定",
"confirm": "确认",
"got it": "知道了",
"cancel": "取消",
"refresh succeeded": "刷新成功",
"delete succeed": "删除成功",
"edit fail": "编辑失败",
"create fail": "新增失败",
"delete fail": "删除失败",
"confirm password": "确认密码",
"all method if empty": "为空默认为所有方法",
"detail": "详情",
"avatar": "头像",
"password": "密码",
"username": "用户名",
"slug": "标志",
"permission": "权限",
"userid": "用户ID",
"content": "内容",
"parent": "父级",
"icon": "图标",
"uri": "路径",
"close": "关闭",
"login": "登录",
"login fail": "登录失败",
"admin": "管理",
"user": "用户",
"users": "用户",
"roles": "角色",
"menu": "菜单",
"dashboard": "仪表盘",
"continue editing": "继续编辑",
"continue creating": "继续新增",
"username and password can not be empty": "用户名密码不能为空",
"operation not allow": "不允许的操作",
"password does not match": "密码不一致",
"should be unique": "需要保证唯一",
"slug exists": "标志已经存在了",
"no corresponding options?": "没找到对应选项?",
"create here.": "在这里新建一个。",
"use for login": "用于登录",
"use to display": "用来展示",
"a path a line, without global prefix": "一行一个路径,换行输入新路径,路径不包含全局路由前缀",
"slug or http_path or name should not be empty": "标志或路径或权限名不能为空",
"no roles": "无角色",
"no permission": "没有权限",
"fixed the sidebar": "固定侧边栏",
"enter fullscreen": "进入全屏",
"exit fullscreen": "退出全屏",
"wrong captcha": "错误的验证码",
"modify success": "修改成功",
"not found": "找不到记录",
"internal error": "系统内部错误",
"unauthorized": "未认证",
"login overdue, please login again": "登录信息过期,请重新登录",
"login info": "登录信息",
"initialize configuration": "初始化配置",
"initialize navigation buttons": "初始化导航栏按钮",
"initialize plugins": "初始化插件",
"initialize database connections": "初始化数据库连接",
"initialize success": "初始化成功🍺🍺",
"plugins": "插件",
"plugin store": "插件商店",
"get more plugins": "获取更多插件",
"uninstalled": "未安装",
"plugin setting": "插件设置",
"showing <b>%s</b> to <b>%s</b> of <b>%s</b> entries": "显示第 <b>%s</b> 到第 <b>%s</b> 条记录,总共 <b>%s</b> 条记录",
"second": "秒",
"seconds": "秒",
"minute": "分",
"minutes": "分",
"hour": "小时",
"hours": "小时",
"day": "天",
"days": "天",
"week": "周",
"weeks": "周",
"month": "月",
"months": "月",
"year": "年",
"years": "年",
"config.domain": "网站域名",
"config.language": "网站语言",
"config.url prefix": "URL前缀",
"config.theme": "主题",
"config.title": "标题",
"config.index url": "首页URL",
"config.login url": "登录URL",
"config.env": "开发环境",
"config.color scheme": "颜色主题",
"config.cdn url": "cdn资源URL",
"config.login title": "登录标题",
"config.auth user table": "登录用户表",
"config.extra": "额外配置",
"config.store": "文件存储设置",
"config.databases": "数据库设置",
"config.general": "通用",
"config.log": "日志",
"config.site setting": "网站设置",
"config.custom": "定制",
"config.debug": "Debug模式",
"config.site off": "关闭网站",
"config.true": "是",
"config.false": "否",
"config.test": "测试环境",
"config.prod": "生产环境",
"config.local": "本地环境",
"config.logo": "Logo",
"config.mini logo": "Mini Logo",
"config.session life time": "Session时长",
"config.bootstrap file path": "插件文件路径",
"config.go mod file path": "go.mod文件路径",
"config.custom head html": "自定义Head HTML",
"config.custom foot html": "自定义Foot HTML",
"config.custom 404 html": "自定义404页面",
"config.custom 403 html": "自定义403页面",
"config.custom 500 html": "自定义500页面",
"config.hide config center entrance": "隐藏配置中心入口",
"config.hide app info entrance": "隐藏应用信息入口",
"config.hide tool entrance": "隐藏工具入口",
"config.hide plugin entrance": "隐藏插件列表入口",
"config.footer info": "自定义底部信息",
"config.login logo": "登录Logo",
"config.no limit login ip": "取消限制多IP登录",
"config.operation log off": "关闭操作日志",
"config.allow delete operation log": "允许删除操作日志",
"config.animation type": "动画类型",
"config.animation duration": "动画间隔(秒)",
"config.animation delay": "动画延迟(秒)",
"config.file upload engine": "文件上传引擎",
"config.logger rotate": "日志切割设置",
"config.logger rotate max size": "存储最大文件大小(m)",
"config.logger rotate max backups": "存储最多文件数",
"config.logger rotate max age": "最长存储时间(天)",
"config.logger rotate compress": "压缩",
"config.info log path": "信息日志存储路径",
"config.error log path": "错误日志存储路径",
"config.access log path": "访问日志存储路径",
"config.info log off": "关闭信息日志",
"config.error log off": "关闭错误日志",
"config.access log off": "关闭访问日志",
"config.access assets log off": "关闭静态资源访问日志",
"config.sql log on": "打开SQL日志",
"config.log level": "日志级别",
"config.logger rotate encoder": "日志encoder设置",
"config.logger rotate encoder time key": "Time Key",
"config.logger rotate encoder level key": "Level Key",
"config.logger rotate encoder name key": "Name Key",
"config.logger rotate encoder caller key": "Caller Key",
"config.logger rotate encoder message key": "Message Key",
"config.logger rotate encoder stacktrace key": "Stacktrace Key",
"config.logger rotate encoder level": "Level字段编码",
"config.logger rotate encoder time": "Time字段编码",
"config.logger rotate encoder duration": "Duration字段编码",
"config.logger rotate encoder caller": "Caller字段编码",
"config.logger rotate encoder encoding": "输出格式",
"config.capital": "大写",
"config.capitalcolor": "大写带颜色",
"config.lowercase": "小写",
"config.lowercasecolor": "小写带颜色",
"config.seconds": "秒",
"config.nanosecond": "纳秒",
"config.microsecond": "微秒",
"config.millisecond": "毫秒",
"config.full path": "完整路径",
"config.short path": "简短路径",
"config.do not modify when you have not set up all assets": "不要修改,当你还没有设置好所有资源文件的时候",
"config.it will work when theme is adminlte": "当主题为adminlte时生效",
"config.must bigger than 900 seconds": "必须大于900秒",
"config.language." + CN: "中文",
"config.language." + EN: "英文",
"config.language." + JP: "日文",
"config.language." + strings.ToLower(TC): "繁体中文",
"config.language." + PTBR: "Brazilian Portuguese",
"config.modify site config": "修改网站配置",
"config.modify site config success": "修改网站配置成功",
"config.modify site config fail": "修改网站配置失败",
"system.system info": "应用系统信息",
"system.application": "应用信息",
"system.application run": "应用运行信息",
"system.system": "系统信息",
"system.process_id": "进程ID",
"system.golang_version": "Golang版本",
"system.server_uptime": "服务运行时间",
"system.current_goroutine": "当前 Goroutines 数量",
"system.current_memory_usage": "当前内存使用量",
"system.total_memory_allocated": "所有被分配的内存",
"system.memory_obtained": "内存占用量",
"system.pointer_lookup_times": "指针查找次数",
"system.memory_allocate_times": "内存分配次数",
"system.memory_free_times": "内存释放次数",
"system.current_heap_usage": "当前 Heap 内存使用量",
"system.heap_memory_obtained": "Heap 内存占用量",
"system.heap_memory_idle": "Heap 内存空闲量",
"system.heap_memory_in_use": "正在使用的 Heap 内存",
"system.heap_memory_released": "被释放的 Heap 内存",
"system.heap_objects": "Heap 对象数量",
"system.bootstrap_stack_usage": "启动 Stack 使用量",
"system.stack_memory_obtained": "被分配的 Stack 内存",
"system.mspan_structures_usage": "MSpan 结构内存使用量",
"system.mspan_structures_obtained": "被分配的 MSpan 结构内存",
"system.mcache_structures_usage": "MCache 结构内存使用量",
"system.mcache_structures_obtained": "被分配的 MCache 结构内存",
"system.profiling_bucket_hash_table_obtained": "被分配的剖析哈希表内存",
"system.gc_metadata_obtained": "被分配的 GC 元数据内存",
"system.other_system_allocation_obtained": "其它被分配的系统内存",
"system.next_gc_recycle": "下次 GC 内存回收量",
"system.last_gc_time": "距离上次 GC 时间",
"system.total_gc_time": "GC 执行时间总量",
"system.total_gc_pause": "GC 暂停时间总量",
"system.last_gc_pause": "上次 GC 暂停时间",
"system.gc_times": "GC 执行次数",
"system.cpu_logical_core": "cpu逻辑核数",
"system.cpu_core": "cpu物理核数",
"system.os_platform": "系统平台",
"system.os_family": "系统家族",
"system.os_version": "系统版本",
"system.load1": "1分钟内负载",
"system.load5": "5分钟内负载",
"system.load15": "15分钟内负载",
"system.mem_total": "总内存",
"system.mem_available": "可用内存",
"system.mem_used": "使用内存",
"system.app_name": "应用名",
"system.go_admin_version": "应用版本",
"system.theme_name": "主题",
"system.theme_version": "主题版本",
"tool.tool": "工具",
"tool.table": "表格",
"tool.connection": "连接",
"tool.output path is empty": "输出路径为空",
"tool.package": "包名",
"tool.output": "输出路径",
"tool.field": "字段",
"tool.title": "标题",
"tool.field name": "字段名",
"tool.db type": "数据类型",
"tool.form type": "表单类型",
"tool.generate table model": "生成CRUD模型",
"tool.primarykey": "主键",
"tool.field filterable": "可筛选",
"tool.field sortable": "可排序",
"tool.yes": "是",
"tool.no": "否",
"tool.hide": "隐藏",
"tool.show": "显示",
"tool.generate success": "生成成功",
"tool.display": "显示",
"tool.use absolute path": "使用绝对路径",
"tool.basic info": "基本信息",
"tool.table info": "表格信息",
"tool.form info": "表单信息",
"tool.field editable": "允许编辑",
"tool.field can add": "允许新增",
"tool.info field editable": "可编辑",
"tool.field default": "默认值",
"tool.filter area": "筛选框",
"tool.new button": "新建按钮",
"tool.export button": "导出按钮",
"tool.edit button": "编辑按钮",
"tool.delete button": "删除按钮",
"tool.extra import package": "导入包",
"tool.detail button": "详情按钮",
"tool.filter button": "筛选按钮",
"tool.row selector": "列选择按钮",
"tool.pagination": "分页",
"tool.query info": "查询信息",
"tool.filter form layout": "筛选表单布局",
"tool.generate": "生成",
"tool.generated tables": "生成过的表格",
"tool.description": "描述",
"tool.label": "标签",
"tool.image": "图片",
"tool.bool": "布尔",
"tool.link": "链接",
"tool.fileSize": "文件大小",
"tool.date": "日期",
"tool.icon": "Icon",
"tool.dot": "标点",
"tool.progressBar": "进度条",
"tool.loading": "Loading",
"tool.downLoadable": "可下载",
"tool.copyable": "可复制",
"tool.carousel": "图片轮播",
"tool.qrcode": "二维码",
"tool.field hide": "隐藏",
"tool.field display": "显示",
"tool.table permission": "生成表格权限",
"tool.extra code": "额外代码",
"tool.detail display": "显示",
"tool.detail info": "详情页信息",
"tool.follow list page": "跟随列表页",
"tool.inherit from list page": "继承列表页",
"tool.independent from list page": "独立",
"tool.continue edit checkbox": "继续编辑按钮",
"tool.continue new checkbox": "继续新增按钮",
"tool.reset button": "重设按钮",
"tool.back button": "返回按钮",
"tool.field display normal": "显示",
"tool.field diplay hide": "隐藏",
"tool.field diplay edit hide": "编辑隐藏",
"tool.field diplay create hide": "新建隐藏",
"tool.generate table model success": "生成成功",
"tool.generate table model fail": "生成失败",
"generator.query": "查询",
"generator.show edit form page": "编辑页显示",
"generator.show create form page": "新建记录页显示",
"generator.edit": "编辑",
"generator.create": "新建",
"generator.delete": "删除",
"generator.export": "导出",
"plugin.plugin": "插件",
"plugin.plugin detail": "插件详情",
"plugin.introduction": "介绍",
"plugin.website": "网站",
"plugin.version": "版本",
"plugin.created at": "创建日期",
"plugin.updated at": "更新日期",
"plugin.provided by %s": "由 %s 提供",
"plugin.upgrade": "升级",
"plugin.install": "安装",
"plugin.info": "详细信息",
"plugin.download": "下载",
"plugin.buy": "购买",
"plugin.downloading": "下载中",
"plugin.login": "登录",
"plugin.login to goadmin member system": "登录到GoAdmin会员系统",
"plugin.account": "账户名",
"plugin.password": "密码",
"plugin.learn more": "了解更多",
"plugin.no account? click %s here %s to register.": "没有账号?点击%s这里%s注册。",
"plugin.download fail, wrong name": "下载失败,错误的参数",
"plugin.change to debug mode first": "先切换到debug模式",
"plugin.download fail, plugin not exist": "下载失败,插件不存在",
"plugin.download fail": "下载失败",
"plugin.golang develop environment does not exist": "golang开发环境不存在",
"plugin.download success, restart to install": "下载成功,重启程序进行安装",
"plugin.restart to install": "重启程序进行安装",
"plugin.can not connect to the goadmin remote server": "连接到GoAdmin远程服务器失败,请检查您的网络连接。",
"admin.basic admin": "基础Admin",
"admin.a built-in plugins of goadmin which help you to build a crud manager platform quickly.": "一个内置GoAdmin插件,帮助您快速搭建curd简易管理后台。",
"admin.official": "GoAdmin官方",
}
|
package test
import (
"fmt"
"reflect"
"testing"
)
func init() {
}
func IsEmpty(a interface{}) bool {
v := reflect.ValueOf(a)
fmt.Println("kind:", v.Kind())
//fmt.Println("kindOf:",reflect.KindOf(a))
switch v.Kind() {
case reflect.Invalid:
{
return true
}
case reflect.Ptr:
{
return v.IsNil()
}
case reflect.Int:
{
return v.Int() == 0
}
case reflect.String:
{
fmt.Println("v.string", v.String())
return len(v.String()) <= 0
}
}
return false
}
func Test_empty(t *testing.T) {
var p *string
p = nil
fmt.Println("1:", IsEmpty(1))
fmt.Println("0:", IsEmpty(0))
fmt.Println("-1:", IsEmpty(1))
fmt.Println("-----------------")
fmt.Println("nil:", IsEmpty(nil))
fmt.Println("-----------------")
fmt.Println("point:", IsEmpty(p))
fmt.Println("-----------------")
fmt.Println("str :", IsEmpty(""))
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"text/tabwriter"
"github.com/google/gapid/core/app"
"github.com/google/gapid/core/data/protoutil"
"github.com/google/gapid/core/log"
"github.com/google/gapid/gapis/api"
"github.com/google/gapid/gapis/client"
"github.com/google/gapid/gapis/service"
"github.com/google/gapid/gapis/service/path"
)
type pipeVerb struct{ PipelineFlags }
func init() {
verb := &pipeVerb{}
app.AddVerb(&app.Verb{
Name: "dump_pipeline",
ShortHelp: "Prints the bound pipeline and descriptor sets for a point in a .gfxtrace file",
Action: verb,
})
}
func (verb *pipeVerb) Run(ctx context.Context, flags flag.FlagSet) error {
if flags.NArg() != 1 {
app.Usage(ctx, "Exactly one gfx trace file expected, got %d", flags.NArg())
return nil
}
client, err := getGapis(ctx, verb.Gapis, GapirFlags{})
if err != nil {
return log.Err(ctx, err, "Failed to connect to the GAPIS server")
}
defer client.Close()
filepath, err := filepath.Abs(flags.Arg(0))
ctx = log.V{"filepath": filepath}.Bind(ctx)
if err != nil {
return log.Err(ctx, err, "Could not find capture file")
}
c, err := client.LoadCapture(ctx, filepath)
if err != nil {
return log.Err(ctx, err, "Failed to load the capture file")
}
if len(verb.At) == 0 {
boxedCapture, err := client.Get(ctx, c.Path(), nil)
if err != nil {
return log.Err(ctx, err, "Failed to load the capture")
}
verb.At = []uint64{uint64(boxedCapture.(*service.Capture).NumCommands) - 1}
}
cmd := c.Command(verb.At[0], verb.At[1:]...)
pipelineData, err := verb.getBoundPipelineResource(ctx, client, cmd)
if err != nil {
return log.Err(ctx, err, "Failed to get bound pipeline resource data")
}
return verb.printPipelineData(ctx, client, pipelineData)
}
func (verb *pipeVerb) getBoundPipelineResource(ctx context.Context, c client.Client, cmd *path.Command) (*api.Pipeline, error) {
boxedResources, err := c.Get(ctx, (&path.Resources{Capture: cmd.Capture}).Path(), nil)
if err != nil {
return nil, err
}
targetType := api.Pipeline_GRAPHICS
if verb.Compute {
targetType = api.Pipeline_COMPUTE
}
resources := boxedResources.(*service.Resources)
for _, typ := range resources.Types {
if typ.Type != api.ResourceType_PipelineResource {
continue
}
for _, resource := range typ.Resources {
boxedResourceData, err := c.Get(ctx, cmd.ResourceAfter(resource.ID).Path(), nil)
if err != nil {
return nil, log.Err(ctx, err, "Failed to load the pipeline resource")
}
resourceData := boxedResourceData.(*api.ResourceData)
pipelineData := protoutil.OneOf(protoutil.OneOf(resourceData)).(*api.Pipeline)
if pipelineData.Bound && pipelineData.Type == targetType {
return pipelineData, nil
}
}
}
return nil, fmt.Errorf("No bound %v pipeline found", targetType)
}
func (verb *pipeVerb) printPipelineData(ctx context.Context, c client.Client, data *api.Pipeline) error {
// Get the names for descriptor types and image layouts
typeNames, err := getConstantSetMap(ctx, c, data.API, data.BindingTypeConstantsIndex)
if err != nil {
return err
}
layoutNames, err := getConstantSetMap(ctx, c, data.API, data.ImageLayoutConstantsIndex)
if err != nil {
return err
}
w := tabwriter.NewWriter(os.Stdout, 4, 4, 0, ' ', 0)
defer w.Flush()
if verb.Print.Shaders {
fmt.Fprintf(w, "%v shader stages:\n", len(data.Stages))
for _, stage := range data.Stages {
fmt.Fprintf(w, "\t%v stage:\n", stage.Type)
fmt.Fprintf(w, "%v\n", stage.Shader.Source)
}
}
fmt.Fprintf(w, "%v bindings:\n", len(data.Bindings))
for _, binding := range data.Bindings {
fmt.Fprintf(w, "Binding #%v.%v:\n", binding.Set, binding.Binding)
typeName, ok := typeNames[binding.Type]
if !ok {
typeName = strconv.FormatUint(uint64(binding.Type), 10)
}
fmt.Fprintf(w, "\tType: \t%v\n", typeName)
stageTypes := make([]api.StageType, len(binding.StageIdxs))
for i, idx := range binding.StageIdxs {
stageTypes[i] = data.Stages[idx].Type
}
if len(stageTypes) > 0 {
fmt.Fprintf(w, "\tUsed by stages: \t%v\n", strings.Trim(fmt.Sprint(stageTypes), "[]"))
} else {
fmt.Fprintf(w, "\tUnused by pipeline\n")
}
if len(binding.Values) > 1 {
fmt.Fprintf(w, "\tBound values:\n")
}
for i, val := range binding.Values {
var valueType string
switch protoutil.OneOf(val).(type) {
case *api.BindingValue_Unbound:
valueType = "Unbound"
case *api.BindingValue_ImageInfo:
valueType = "Image"
case *api.BindingValue_BufferInfo:
valueType = "Buffer"
case *api.BindingValue_TexelBufferView:
valueType = "Texel Buffer View"
}
if len(binding.Values) > 1 {
fmt.Fprintf(w, "\t%v: %v\n", i, valueType)
} else {
fmt.Fprintf(w, "\tBound value: %v\n", valueType)
}
switch v := protoutil.OneOf(val).(type) {
case *api.BindingValue_ImageInfo:
fmt.Fprintf(w, "\t\tSampler: \t%v\n", v.ImageInfo.Sampler)
fmt.Fprintf(w, "\t\tImage View: \t%v\n", v.ImageInfo.ImageView)
layoutName, ok := layoutNames[v.ImageInfo.ImageLayout]
if !ok {
layoutName = strconv.FormatUint(uint64(v.ImageInfo.ImageLayout), 10)
}
fmt.Fprintf(w, "\t\tImage Layout: \t%v\n", layoutName)
case *api.BindingValue_BufferInfo:
fmt.Fprintf(w, "\t\tHandle: \t%v\n", v.BufferInfo.Buffer)
fmt.Fprintf(w, "\t\tOffset: \t%v\n", v.BufferInfo.Offset)
fmt.Fprintf(w, "\t\tRange: \t%v\n", v.BufferInfo.Range)
case *api.BindingValue_TexelBufferView:
fmt.Fprintf(w, "\t\tBuffer View: \t%v\n", v.TexelBufferView)
}
}
}
return nil
}
func getConstantSetMap(ctx context.Context, c client.Client, api *path.API, index int32) (map[uint32]string, error) {
names := map[uint32]string{}
if index != -1 {
boxedConstants, err := c.Get(ctx, (&path.ConstantSet{
API: api,
Index: uint32(index),
}).Path(), nil)
if err != nil {
return nil, log.Errf(ctx, err, "Failed to load constant set (%v, %v)", api, index)
}
constants := boxedConstants.(*service.ConstantSet)
for _, c := range constants.Constants {
names[uint32(c.Value)] = c.Name
}
}
return names, nil
}
|
// Copyright 2019 The Dice Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package healthcheck provides types and methods for periodic health checks.
package healthcheck
import (
"errors"
"fmt"
"github.com/dominikbraun/dice/entity"
"github.com/dominikbraun/dice/registry"
"net"
"time"
)
var (
ErrInvalidDeployments = errors.New("provided deployments are invalid")
)
// Config concludes the user-configurable properties for health checks.
type Config struct {
Interval time.Duration `json:"interval"`
// When Timeout expires without response, an instance is considered dead.
Timeout time.Duration `json:"timeout"`
}
// HealthCheck is a simple health checker that can run checks periodically as
// well as manually. It will ping all instances of a provided service map and
// mark each instance as dead or alive on each check.
type HealthCheck struct {
config Config
services *map[string]*registry.Service
stop chan bool
}
// New creates a new HealthCheck instance. It will take all service instances
// from a service map into account.
func New(config Config, services *map[string]*registry.Service) (*HealthCheck, error) {
if services == nil {
return nil, ErrInvalidDeployments
}
hc := HealthCheck{
config: config,
services: services,
stop: make(chan bool),
}
return &hc, nil
}
// RunPeriodically runs periodic health checks that will start every time the
// configured interval expires. This function should run in an own goroutine.
func (hc *HealthCheck) RunPeriodically() error {
intervalTick := time.NewTicker(hc.config.Interval)
healthcheck:
for {
select {
case <-intervalTick.C:
hc.checkServices()
case <-hc.stop:
break healthcheck
}
}
return nil
}
// RunManually triggers a manual, single health check. This function should be
// called in an own goroutine as well, since the health check can take a while.
func (hc *HealthCheck) RunManually() error {
hc.checkServices()
return nil
}
// checkServices loops over all services and their deployments. Each instance
// will be pinged and marked as dead or alive after the timeout expires.
func (hc *HealthCheck) checkServices() {
for _, s := range *hc.services {
if s.Entity.IsEnabled {
for _, d := range s.Deployments {
d.Instance.IsAlive = hc.pingInstance(d.Node, d.Instance)
// ToDo: If all instances are dead, check if the node is alive
}
}
}
}
// pingInstance reads the address from an instance and attempts to establish a
// connection to that address. The dialer will use the configured timeout.
func (hc *HealthCheck) pingInstance(node *entity.Node, instance *entity.Instance) bool {
address := fmt.Sprintf("%s:%v", node.Name, instance.URL)
conn, err := net.DialTimeout("tcp", address, hc.config.Timeout)
if err != nil {
return false
}
_ = conn.Close()
return true
}
// Stop gracefully stops an health check. Running checks will not be affected.
func (hc *HealthCheck) Stop() error {
hc.stop <- true
return nil
}
|
package ds
func Repeat(c string) string {
var res string
for i := 0; i < 5; i++ {
res = res + c
}
return res
}
|
/*
* @Description:
* @Author: JiaYe
* @Date: 2021-04-12 13:08:03
* @LastEditTime: 2021-04-12 13:16:33
* @LastEditors: JiaYe
* @Descripttion:
* @version:
*/
package main
import "fmt"
func main() {
/*
多个defer,先defer的最后被执行
*/
defer func() {
fmt.Println("defer 1")
}()
defer func() {
fmt.Println("defer 2")
}()
defer func() {
fmt.Println("defer 3")
}()
fmt.Println("over")
/*
输出结果
over
defer 3
defer 2
defer 1
*/
}
|
package integration
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"testing"
"time"
"bldy.build/build"
"bldy.build/build/builder"
"bldy.build/build/graph"
)
var tests = []struct {
name string
label string
err error
}{
{
name: "empty",
label: "//empty:nothing",
err: nil,
},
{
name: "binary",
label: "//cc:hello",
err: nil,
},
{
name: "library",
label: "//cc:hellowithlib",
err: nil,
},
{
name: "uname",
label: "//docker:uname",
err: nil,
},
{
name: "filemount",
label: "//docker:filemount",
err: nil,
},
}
func setup(t *testing.T) string {
workdir, _ := os.Getwd()
wd := path.Join(workdir, "testdata")
os.Chdir(wd)
return wd
}
func TestGraph(t *testing.T) {
wd := setup(t)
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
g, err := graph.New(wd, test.label)
if err != nil {
t.Fatal(err)
}
if g == nil {
t.Fail()
}
})
}
}
type testNotifier struct {
t *testing.T
}
func (t *testNotifier) Update(n *graph.Node) {
switch n.Status {
case build.Building:
t.t.Logf("Started building %s ", n.Label.String())
default:
t.t.Logf("Started %d %s ", n.Status, n.Label.String())
}
}
func (t *testNotifier) Error(err error) {
t.t.Fail()
t.t.Logf("Errored:%+v\n", err)
}
func (t *testNotifier) Done(d time.Duration) {
t.t.Logf("Finished building in %s\n", d)
}
func TestBuild(t *testing.T) {
wd := setup(t)
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
g, err := graph.New(wd, test.label)
if err != nil {
t.Fatal(err)
}
if g == nil {
t.Fail()
}
tmpDir, _ := ioutil.TempDir("", fmt.Sprintf("bldy_test_%s_", test.name))
b := builder.New(
g,
&builder.Config{
Fresh: true,
BuildOut: &tmpDir,
},
&testNotifier{t},
)
cpus := 1
ctx := context.Background()
b.Execute(ctx, cpus)
files, err := ioutil.ReadDir(tmpDir)
if err != nil {
log.Fatal(err)
}
for _, file := range files {
_ = file
// debug.Println(file.Name())
}
})
}
}
|
// Copyright 2020 The Hugo Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package js
import (
"fmt"
"io/ioutil"
"path"
"strings"
"github.com/spf13/cast"
"github.com/gohugoio/hugo/helpers"
"github.com/gohugoio/hugo/hugolib/filesystems"
"github.com/gohugoio/hugo/media"
"github.com/gohugoio/hugo/resources/internal"
"github.com/mitchellh/mapstructure"
"github.com/evanw/esbuild/pkg/api"
"github.com/gohugoio/hugo/resources"
"github.com/gohugoio/hugo/resources/resource"
)
const defaultTarget = "esnext"
type Options struct {
// If not set, the source path will be used as the base target path.
// Note that the target path's extension may change if the target MIME type
// is different, e.g. when the source is TypeScript.
TargetPath string
// Whether to minify to output.
Minify bool
// The language target.
// One of: es2015, es2016, es2017, es2018, es2019, es2020 or esnext.
// Default is esnext.
Target string
// External dependencies, e.g. "react".
Externals []string `hash:"set"`
// User defined symbols.
Defines map[string]interface{}
// What to use instead of React.createElement.
JSXFactory string
// What to use instead of React.Fragment.
JSXFragment string
}
type internalOptions struct {
TargetPath string
Minify bool
Target string
JSXFactory string
JSXFragment string
Externals []string `hash:"set"`
Defines map[string]string
// These are currently not exposed in the public Options struct,
// but added here to make the options hash as stable as possible for
// whenever we do.
TSConfig string
}
func DecodeOptions(m map[string]interface{}) (opts Options, err error) {
if m == nil {
return
}
err = mapstructure.WeakDecode(m, &opts)
err = mapstructure.WeakDecode(m, &opts)
if opts.TargetPath != "" {
opts.TargetPath = helpers.ToSlashTrimLeading(opts.TargetPath)
}
opts.Target = strings.ToLower(opts.Target)
return
}
type Client struct {
rs *resources.Spec
sfs *filesystems.SourceFilesystem
}
func New(fs *filesystems.SourceFilesystem, rs *resources.Spec) *Client {
return &Client{rs: rs, sfs: fs}
}
type buildTransformation struct {
options internalOptions
rs *resources.Spec
sfs *filesystems.SourceFilesystem
}
func (t *buildTransformation) Key() internal.ResourceTransformationKey {
return internal.NewResourceTransformationKey("jsbuild", t.options)
}
func (t *buildTransformation) Transform(ctx *resources.ResourceTransformationCtx) error {
ctx.OutMediaType = media.JavascriptType
if t.options.TargetPath != "" {
ctx.OutPath = t.options.TargetPath
} else {
ctx.ReplaceOutPathExtension(".js")
}
var target api.Target
switch t.options.Target {
case defaultTarget:
target = api.ESNext
case "es6", "es2015":
target = api.ES2015
case "es2016":
target = api.ES2016
case "es2017":
target = api.ES2017
case "es2018":
target = api.ES2018
case "es2019":
target = api.ES2019
case "es2020":
target = api.ES2020
default:
return fmt.Errorf("invalid target: %q", t.options.Target)
}
var loader api.Loader
switch ctx.InMediaType.SubType {
// TODO(bep) ESBuild support a set of other loaders, but I currently fail
// to see the relevance. That may change as we start using this.
case media.JavascriptType.SubType:
loader = api.LoaderJS
case media.TypeScriptType.SubType:
loader = api.LoaderTS
case media.TSXType.SubType:
loader = api.LoaderTSX
case media.JSXType.SubType:
loader = api.LoaderJSX
default:
return fmt.Errorf("unsupported Media Type: %q", ctx.InMediaType)
}
src, err := ioutil.ReadAll(ctx.From)
if err != nil {
return err
}
sdir, sfile := path.Split(ctx.SourcePath)
sdir = t.sfs.RealFilename(sdir)
buildOptions := api.BuildOptions{
Outfile: "",
Bundle: true,
Target: target,
MinifyWhitespace: t.options.Minify,
MinifyIdentifiers: t.options.Minify,
MinifySyntax: t.options.Minify,
Defines: t.options.Defines,
Externals: t.options.Externals,
JSXFactory: t.options.JSXFactory,
JSXFragment: t.options.JSXFragment,
Tsconfig: t.options.TSConfig,
Stdin: &api.StdinOptions{
Contents: string(src),
Sourcefile: sfile,
ResolveDir: sdir,
Loader: loader,
},
}
result := api.Build(buildOptions)
if len(result.Errors) > 0 {
return fmt.Errorf("%s", result.Errors[0].Text)
}
if len(result.OutputFiles) != 1 {
return fmt.Errorf("unexpected output count: %d", len(result.OutputFiles))
}
ctx.To.Write(result.OutputFiles[0].Contents)
return nil
}
func (c *Client) Process(res resources.ResourceTransformer, opts Options) (resource.Resource, error) {
return res.Transform(
&buildTransformation{rs: c.rs, sfs: c.sfs, options: toInternalOptions(opts)},
)
}
func toInternalOptions(opts Options) internalOptions {
target := opts.Target
if target == "" {
target = defaultTarget
}
var defines map[string]string
if opts.Defines != nil {
defines = cast.ToStringMapString(opts.Defines)
}
return internalOptions{
TargetPath: opts.TargetPath,
Minify: opts.Minify,
Target: target,
Externals: opts.Externals,
Defines: defines,
JSXFactory: opts.JSXFactory,
JSXFragment: opts.JSXFragment,
}
}
|
package main
import (
"reflect"
"testing"
)
func TestSorter(t *testing.T) {
tests := []struct {
name string
args flags
filename string
want []string
wantErr bool
}{
{
name: "simple sort",
args: flags{},
filename: "data.txt",
want: []string{"Apple", "BOOK", "Book", "Go", "Hauptbahnhof", "January", "January", "Napkin"},
wantErr: false,
},
{
name: "sort without register",
args: flags{
flagF: true,
},
filename: "data.txt",
want: []string{"Apple", "Book", "BOOK", "Go", "Hauptbahnhof", "January", "January", "Napkin"},
wantErr: false,
},
{
name: "sort in reverse order",
args: flags{
flagR: true,
},
filename: "data.txt",
want: []string{"Napkin", "January", "January", "Hauptbahnhof", "Go", "Book", "BOOK", "Apple"},
wantErr: true,
},
{
name: "sort with unique bad rows",
args: flags{
flagU: true,
flagK: -1,
},
filename: "data.txt",
want: nil,
wantErr: true,
},
{
name: "sort numbers",
args: flags{
flagN: true,
},
filename: "numbers.txt",
want: []string{"1", "2", "3", "4", "4", "5", "5", "6", "7", "8", "9", "10"},
wantErr: false,
},
{
name: "sort with unique",
args: flags{
flagU: true,
},
filename: "data.txt",
want: []string{"Apple", "BOOK", "Book", "Go", "Hauptbahnhof", "January", "Napkin"},
wantErr: false,
},
{
name: "sort with unique and without register",
args: flags{
flagU: true,
flagF: true,
},
filename: "data.txt",
want: []string{"Apple", "BOOK", "Go", "Hauptbahnhof", "January", "Napkin"},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got, err := Sorter(tt.filename, tt.args); !reflect.DeepEqual(got, tt.want) && (err != nil) != tt.wantErr {
t.Errorf("UnixSort() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import "fmt"
/*
一个结构体就是一个成员变量的集合
结构体的成员变量使用点号来访问,类似于javascript的对象一样
*/
/*
Go 有指针,但没有指针运算
结构体成员变量可以通过结构体指针来访问。
通过指针的间接访问也是透明的
*/
type Classes struct {
student string
teacher string
}
/*
结构体有自己的一种文法 structName{}
通过结构体成员变量的值作为列表来新分配一个结构体
使用key: value 预发可以仅列出部分字段(顺序无所谓)
特殊的前缀&构造了指向结构体文法的指针
*/
type Vertex struct {
X, Y int
}
var (
p = Vertex{1, 2} // has type Vertex
q = &Vertex{1, 2} // has type *Vertex
r = Vertex{X: 1} // Y:0 is implicit
s = Vertex{} // X:0 and Y:0
)
/*
这里在给自己回忆一下大一学过的C++的指针
int p = 10
这里定义一个整型的变量p 给他赋值10
然后我们去定义一个整形的指针p1
int* p1 = &p
因为 指针就是用来存储地址的变量,所以现在p1就是存储p的存储地址
然后我们使用取变量运算符* 去取p1的变量并且去赋值
*p1 = 20
你会发现 p的值就变成了20
因为p会首先去找它的地址上对应的这个数据,结果发现,它变成了20 所以就是20
当然了 p1也是存储地址的 &p1就知道了它的地址,
但是呢 int* pp = &p1 编译器会出错 为什么呢
因为*p1可以得到地址上int类型的值
但是pp
我们如果那样定义了 结果就是*pp == p1 p1是一个指针类型 不是值类型
所以就知道为什么定义的不对了
我们应该这样来定义 定义一个指向指针的指针
int** pp = &p1
*/
/*
实际上Go支持只提供类型
*/
func main() {
var class_24 = Classes{"John", "Nick"}
fmt.Println(class_24)
class_24.teacher = "axx"
fmt.Println(class_24)
class_23 := &class_24
class_25 := class_24
class_25.teacher = "25"
fmt.Println(class_24) //no difference
class_23.teacher = "chong"
fmt.Println(class_24) //show difference
fmt.Println(class_25) //no difference
fmt.Println(p, q, r, s)
}
|
package rabbit
import (
"common/clog"
"github.com/streadway/amqp"
"strconv"
)
type QueMsg <-chan amqp.Delivery
type Rabbit struct {
Host string
Port int
user string
pw string
conn *amqp.Connection
ch *amqp.Channel
sque amqp.Queue
rque amqp.Queue
}
type RabbitMgr interface {
ConnectRabbit(host string, port int, id string, pw string)
UdrSendQueueDeclare(qn string)
ReqRecvQueueDeclare(qn string)
PublishToQueue(msg string) error
ConsumeQueue() (QueMsg, error)
CloseChanRabbit()
CloseConnRabbit()
}
var _ RabbitMgr = &Rabbit{}
var log = clog.GetLogger()
func NewRabbitManager() RabbitMgr {
rmgr := &Rabbit{}
return rmgr
}
func (r *Rabbit) ConnectRabbit(host string, port int, id string, pw string) {
r.Host = host
r.Port = port
r.user = id
r.pw = pw
var err error
r.conn, err = amqp.Dial("amqp://" + id + ":" + pw + "@" + r.Host + ":" + strconv.Itoa(r.Port) + "/")
if err != nil {
log.Panic(err)
}
r.ch, err = r.conn.Channel()
if err != nil {
log.Panic(err)
}
log.Info("Successfully connect to RabbitMQ...")
}
func (r *Rabbit) CloseConnRabbit() {
r.conn.Close()
log.Info("Successfully close RabbitMQ connection...")
}
func (r *Rabbit) CloseChanRabbit() {
r.ch.Close()
log.Info("Successfully close RabbitMQ channel...")
}
func (r *Rabbit) UdrSendQueueDeclare(qn string) {
var err error
r.sque, err = r.ch.QueueDeclare(
qn, // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
log.Panic(err)
}
}
func (r *Rabbit) ReqRecvQueueDeclare(qn string) {
var err error
r.rque, err = r.ch.QueueDeclare(
qn, // name
true, // durable
false, // delete when unused
false, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
log.Panic(err)
}
}
func (r *Rabbit) PublishToQueue(msg string) error {
err := r.ch.Publish(
"", // exchange
r.sque.Name, // routing key
false, // mandatory
false,
amqp.Publishing{
DeliveryMode: amqp.Persistent,
ContentType: "text/plain",
Body: []byte(msg),
})
if err != nil {
return err
}
return nil
}
func (r *Rabbit) ConsumeQueue() (QueMsg, error) {
msgs, err := r.ch.Consume(
r.rque.Name, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
if err != nil {
return nil, err
}
return msgs, nil
}
func ResponseAck(d amqp.Delivery, pCh chan bool) {
result := <-pCh
if result {
d.Ack(false)
} else {
d.Reject(true)
}
}
|
package tonberry
import (
"github.com/zeroshade/Go-SDL/sdl"
"image"
)
type Camera struct {
sdl.Rect
}
func NewCamera(bounds image.Rectangle) Camera {
var c Camera
c.Rect = sdl.RectFromGoRect(bounds)
return c
}
type Screen struct {
*sdl.Surface
}
type Event interface {
sdl.Event
}
type QuitEvent struct {
*sdl.QuitEvent
}
type KeyboardEvent struct {
*sdl.KeyboardEvent
}
func MapRGB(game Game, r, g, b uint8) uint32 {
return sdl.MapRGB(game.GetScreen().Format, r, g, b)
}
func load_image(file string) *sdl.Surface {
loadedImage := sdl.Load(file)
var optimizedImg *sdl.Surface
if loadedImage != nil {
optimizedImg = sdl.DisplayFormat(loadedImage)
loadedImage.Free()
}
if optimizedImg != nil {
colorKey := sdl.MapRGB(optimizedImg.Format, 0x00, 0xFF, 0xFF)
optimizedImg.SetColorKey(sdl.SRCCOLORKEY, colorKey)
}
return optimizedImg
}
func apply_surface(x, y int16, src, dst *sdl.Surface) {
apply_surface_clip(x, y, src, dst, nil)
}
func apply_surface_clip(x, y int16, src, dst *sdl.Surface, clip *sdl.Rect) {
offset := sdl.Rect{X: x, Y: y}
dst.Blit(&offset, src, clip)
}
|
package ds
import (
"fmt"
"math"
)
type MaxHeap struct {
Nodes []int
}
func NewMaxHeap(arr []int) *MaxHeap{
heap := MaxHeap{}
for _, num := range arr {
heap.Add(num)
}
return &heap
}
func (heap *MaxHeap) Add(value int) {
heap.Nodes = append(heap.Nodes, value)
heap.Heapify(len(heap.Nodes)-1, value)
}
func (heap *MaxHeap) Heapify(childIndex int, childValue int) {
parentIndex := getParentIndex(childIndex)
for childIndex != 0 && heap.Nodes[parentIndex] <= childValue {
heap.Nodes[parentIndex], heap.Nodes[childIndex] = heap.Nodes[childIndex], heap.Nodes[parentIndex]
childIndex = parentIndex
parentIndex = getParentIndex(childIndex)
}
}
func (heap *MaxHeap) GetRootValue() int {
return heap.Nodes[0]
}
func (heap *MaxHeap) Print() {
for i, node := range heap.Nodes {
fmt.Printf("%d ", node)
if isEdge(i) {
fmt.Println()
}
}
}
func getParentIndex(currentIndex int) int {
parentIndex := (currentIndex - 1) / 2
return int(math.Ceil(float64(parentIndex)))
}
func isEdge(currentIndex int) bool {
parentIndex := getParentIndex(currentIndex)
return parentIndex * 2 + 2 == currentIndex
} |
package service
import (
"context"
"fmt"
"testing"
"time"
"github.com/go-ocf/cloud/resource-aggregate/cqrs/eventbus/nats"
pbCQRS "github.com/go-ocf/cloud/resource-aggregate/pb"
pbRS "github.com/go-ocf/cloud/resource-directory/pb/resource-shadow"
kitNetGrpc "github.com/go-ocf/kit/net/grpc"
"github.com/go-ocf/kit/security/certManager"
"github.com/kelseyhightower/envconfig"
"github.com/panjf2000/ants"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc/codes"
)
func TestResourceShadow_RetrieveResourcesValues(t *testing.T) {
type args struct {
req *pbRS.RetrieveResourcesValuesRequest
}
tests := []struct {
name string
args args
wantStatusCode codes.Code
wantErr bool
want map[string]*pbRS.ResourceValue
}{
{
name: "list unauthorized device",
args: args{
req: &pbRS.RetrieveResourcesValuesRequest{
AuthorizationContext: &pbCQRS.AuthorizationContext{},
DeviceIdsFilter: []string{Resource0.DeviceId},
},
},
wantStatusCode: codes.NotFound,
wantErr: true,
},
{
name: "filter by resource Id",
args: args{
req: &pbRS.RetrieveResourcesValuesRequest{
AuthorizationContext: &pbCQRS.AuthorizationContext{},
ResourceIdsFilter: []string{Resource1.Id, Resource2.Id},
},
},
want: map[string]*pbRS.ResourceValue{
Resource1.Id: {
ResourceId: Resource1.Id,
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
Content: &Resource1.Content,
Types: Resource1.ResourceTypes,
},
Resource2.Id: {
ResourceId: Resource2.Id,
DeviceId: Resource2.DeviceId,
Href: Resource2.Href,
Content: &Resource2.Content,
Types: Resource2.ResourceTypes,
},
},
},
{
name: "filter by device Id",
args: args{
req: &pbRS.RetrieveResourcesValuesRequest{
AuthorizationContext: &pbCQRS.AuthorizationContext{},
DeviceIdsFilter: []string{Resource1.DeviceId},
},
},
want: map[string]*pbRS.ResourceValue{
Resource1.Id: {
ResourceId: Resource1.Id,
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
Content: &Resource1.Content,
Types: Resource1.ResourceTypes,
},
Resource3.Id: {
ResourceId: Resource3.Id,
DeviceId: Resource3.DeviceId,
Href: Resource3.Href,
Content: &Resource3.Content,
Types: Resource3.ResourceTypes,
},
},
},
{
name: "filter by type",
args: args{
req: &pbRS.RetrieveResourcesValuesRequest{
AuthorizationContext: &pbCQRS.AuthorizationContext{},
TypeFilter: []string{Resource2.ResourceTypes[0]},
},
},
want: map[string]*pbRS.ResourceValue{
Resource1.Id: {
ResourceId: Resource1.Id,
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
Content: &Resource1.Content,
Types: Resource1.ResourceTypes,
},
Resource2.Id: {
ResourceId: Resource2.Id,
DeviceId: Resource2.DeviceId,
Href: Resource2.Href,
Content: &Resource2.Content,
Types: Resource2.ResourceTypes,
},
},
},
{
name: "filter by device Id and type",
args: args{
req: &pbRS.RetrieveResourcesValuesRequest{
AuthorizationContext: &pbCQRS.AuthorizationContext{},
DeviceIdsFilter: []string{Resource1.DeviceId},
TypeFilter: []string{Resource1.ResourceTypes[0]},
},
},
want: map[string]*pbRS.ResourceValue{
Resource1.Id: {
ResourceId: Resource1.Id,
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
Content: &Resource1.Content,
Types: Resource1.ResourceTypes,
},
},
},
{
name: "list all resources of user",
args: args{
req: &pbRS.RetrieveResourcesValuesRequest{
AuthorizationContext: &pbCQRS.AuthorizationContext{},
},
},
want: map[string]*pbRS.ResourceValue{
Resource1.Id: {
ResourceId: Resource1.Id,
DeviceId: Resource1.DeviceId,
Href: Resource1.Href,
Content: &Resource1.Content,
Types: Resource1.ResourceTypes,
},
Resource2.Id: {
ResourceId: Resource2.Id,
DeviceId: Resource2.DeviceId,
Href: Resource2.Href,
Content: &Resource2.Content,
Types: Resource2.ResourceTypes,
},
Resource3.Id: {
ResourceId: Resource3.Id,
DeviceId: Resource3.DeviceId,
Href: Resource3.Href,
Content: &Resource3.Content,
Types: Resource3.ResourceTypes,
},
},
},
}
var cmconfig certManager.Config
err := envconfig.Process("DIAL", &cmconfig)
assert.NoError(t, err)
dialCertManager, err := certManager.NewCertManager(cmconfig)
require.NoError(t, err)
defer dialCertManager.Close()
tlsConfig := dialCertManager.GetClientTLSConfig()
pool, err := ants.NewPool(1)
require.NoError(t, err)
var natsCfg nats.Config
err = envconfig.Process("", &natsCfg)
require.NoError(t, err)
resourceSubscriber, err := nats.NewSubscriber(natsCfg, pool.Submit, func(err error) { require.NoError(t, err) }, nats.WithTLS(tlsConfig))
require.NoError(t, err)
ctx := kitNetGrpc.CtxWithIncomingToken(context.Background(), "b")
resourceProjection, err := NewProjection(ctx, "test", testCreateEventstore(), resourceSubscriber, time.Second)
require.NoError(t, err)
rd := NewResourceShadow(resourceProjection, []string{ /*Resource0.DeviceId,*/ Resource1.DeviceId, Resource2.DeviceId})
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Println(tt.name)
var got map[string]*pbRS.ResourceValue
gotStatusCode, err := rd.RetrieveResourcesValues(context.Background(), tt.args.req, func(r *pbRS.ResourceValue) error {
if got == nil {
got = make(map[string]*pbRS.ResourceValue)
}
got[r.ResourceId] = r
return nil
})
if tt.wantErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.wantStatusCode, gotStatusCode)
assert.Equal(t, tt.want, got)
got = nil
})
}
}
|
package datastruct
import "fmt"
func ExampleQueue() {
// test for type int
intQueue := NewQueue(10)
intQueue.Push(10)
intQueue.Push(1)
intQueue.Push(-5)
fmt.Println(intQueue.Front())
intQueue.Pop()
intQueue.Push(5)
for !intQueue.IsEmpty() {
fmt.Println(intQueue.Front())
intQueue.Pop()
}
// test for type struct
type pair struct {
first, second int
}
structQueue := NewQueue(10)
structQueue.Push(&pair{first: 3, second: 10})
structQueue.Push(&pair{first: -1, second: 2})
structQueue.Push(&pair{first: 5, second: -5})
top := structQueue.Front().(*pair)
fmt.Println(top.first, top.second)
structQueue.Pop()
structQueue.Push(&pair{0, 3})
for !structQueue.IsEmpty() {
top = structQueue.Front().(*pair)
fmt.Println(top.first, top.second)
structQueue.Pop()
}
// Output:
// 10
// 1
// -5
// 5
// 3 10
// -1 2
// 5 -5
// 0 3
}
|
package rpc
import (
"context"
"time"
mlog "github.com/jinmukeji/go-pkg/v2/log"
"github.com/micro/go-micro/v2/server"
"github.com/sirupsen/logrus"
)
var (
// log is the package global logger
log = mlog.StandardLogger()
)
const (
logCidKey = "cid"
logLatencyKey = "latency"
logRpcCallKey = "rpc.call"
// rpcMetadata = "[RPC METADATA]"
rpcFailed = "[RPC ERR]"
rpcOk = "[RPC OK]"
)
// ContextLogger 打印ctx中的cid
func ContextLogger(ctx context.Context) *logrus.Entry {
cid := ContextGetCid(ctx)
return log.WithField(logCidKey, cid)
}
// LogWrapper is a handler wrapper that logs server request.
func LogWrapper(fn server.HandlerFunc) server.HandlerFunc {
return func(ctx context.Context, req server.Request, rsp interface{}) error {
start := time.Now()
err := fn(ctx, req, rsp)
// RPC 计算经历的时间长度
//no time.Since in order to format it well after
end := time.Now()
latency := end.Sub(start)
cid := ContextGetCid(ctx)
// l.Infof("%s %s", rpcMetadata, flatMetadata(md))
l := log.
WithField(logRpcCallKey, req.Method()).
WithField(logCidKey, cid).
WithField(logLatencyKey, latency.String())
// Log rpc call execution result
if err != nil {
l.WithError(err).Warn(rpcFailed)
} else {
l.Info(rpcOk)
}
return err
}
}
// flatMetadata 将 Metadata 打平为 "k=v" 形式的字符串序列
// func flatMetadata(md metadata.Metadata) string {
// var buffer bytes.Buffer
// for k, v := range md {
// buffer.WriteString(strconv.Quote(k))
// buffer.WriteString("=")
// buffer.WriteString(strconv.Quote(v))
// buffer.WriteString(" ")
// }
// return buffer.String()
// }
|
package dto
import (
"github.com/artrey/go-bank-service/pkg/models"
)
type Transaction struct {
Id int64 `json:"id"`
From *Card `json:"from"`
To *Card `json:"to"`
Sum int64 `json:"sum"`
Mcc *Mcc `json:"mcc"`
Icon *Icon `json:"icon"`
Description *string `json:"description"`
CreatedAt int64 `json:"createdAt"`
}
func FromModelTransaction(t models.Transaction) *Transaction {
return &Transaction{
Id: t.Id,
Sum: t.Sum,
Description: t.Description,
CreatedAt: t.CreatedAt,
}
}
type TransactionBuilder struct {
t *Transaction
}
func NewTransactionBuilder(t *Transaction) *TransactionBuilder {
return &TransactionBuilder{
t: t,
}
}
func (b *TransactionBuilder) Build() *Transaction {
return b.t
}
func (b *TransactionBuilder) SetFrom(c *Card) *TransactionBuilder {
b.t.From = c
return b
}
func (b *TransactionBuilder) SetTo(c *Card) *TransactionBuilder {
b.t.To = c
return b
}
func (b *TransactionBuilder) SetMcc(m *Mcc) *TransactionBuilder {
b.t.Mcc = m
return b
}
func (b *TransactionBuilder) SetIcon(i *Icon) *TransactionBuilder {
b.t.Icon = i
return b
}
|
package main
import (
"bufio"
"flag"
"fmt"
"github.com/climber73/tendermint-challenge/worldx"
"os"
"path/filepath"
"strings"
)
func main() {
n := flag.Int("n", 3, "number of rows")
m := flag.Int("m", 3, "number of cols")
path := flag.String("path", "", "path to map file")
flag.Parse()
if len(*path) == 0 {
exit(fmt.Errorf("empty path"))
}
file, err := createFile(*path)
if err != nil {
exit(err)
}
if file == nil {
os.Exit(0)
}
defer file.Close()
cities := make(map[string]*worldx.City, *n**m)
for i := 0; i < *n; i++ {
for j := 0; j < *m; j++ {
name := cityName(i, j)
cities[name] = worldx.NewCity(
name,
northernName(i, j),
southernName(i, j, *n),
westernName(i, j),
easternName(i, j, *m),
)
}
}
for _, city := range cities {
s := fmt.Sprintf("%v\n", *city)
if _, err := file.WriteString(s); err != nil {
exit(err)
}
}
fmt.Printf("map (%vx%v) created\n", *n, *m)
}
func cityName(i, j int) string {
return fmt.Sprintf("C-%v-%v", i, j)
}
func northernName(i, j int) string {
if i < 1 {
return ""
} else {
return cityName(i-1, j)
}
}
func southernName(i, j, n int) string {
if i >= n-1 {
return ""
} else {
return cityName(i+1, j)
}
}
func westernName(i, j int) string {
if j < 1 {
return ""
} else {
return cityName(i, j-1)
}
}
func easternName(i, j, m int) string {
if j >= m-1 {
return ""
} else {
return cityName(i, j+1)
}
}
func createFile(path string) (*os.File, error) {
info, err := os.Stat(path)
if !os.IsNotExist(err) {
if err != nil {
return nil, err
}
if info.IsDir() {
return nil, fmt.Errorf("'%s' is a directory.", path)
}
question := fmt.Sprintf("File '%s' already exists. Do you want to overwrite it? (yes/no)", path)
if ask(question) {
return os.Create(path)
} else {
return nil, nil
}
}
if err := ensureDir(filepath.Dir(path), 0700); err != nil {
return nil, err
}
return os.Create(path)
}
func ask(msg string) bool {
reader := bufio.NewReader(os.Stdin)
for {
fmt.Println(msg)
answer, _ := reader.ReadString('\n')
answer = strings.Replace(answer, "\n", "", -1)
switch strings.ToLower(answer) {
case "yes":
return true
case "no":
return false
}
}
}
func exit(err error) {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(-1)
}
func ensureDir(dir string, mode os.FileMode) error {
if _, err := os.Stat(dir); os.IsNotExist(err) {
err := os.MkdirAll(dir, mode)
if err != nil {
return fmt.Errorf("Could not create directory %v. %v", dir, err)
}
}
return nil
}
|
package main
import (
"io"
"github.com/prologic/toybox/applets/arp"
"github.com/prologic/toybox/applets/ash"
"github.com/prologic/toybox/applets/base64"
"github.com/prologic/toybox/applets/basename"
"github.com/prologic/toybox/applets/cat"
"github.com/prologic/toybox/applets/chgrp"
"github.com/prologic/toybox/applets/chmod"
"github.com/prologic/toybox/applets/chown"
"github.com/prologic/toybox/applets/cksum"
"github.com/prologic/toybox/applets/cmp"
"github.com/prologic/toybox/applets/cp"
"github.com/prologic/toybox/applets/cut"
"github.com/prologic/toybox/applets/date"
"github.com/prologic/toybox/applets/df"
"github.com/prologic/toybox/applets/diff"
"github.com/prologic/toybox/applets/dirname"
"github.com/prologic/toybox/applets/du"
"github.com/prologic/toybox/applets/echo"
"github.com/prologic/toybox/applets/false"
"github.com/prologic/toybox/applets/head"
"github.com/prologic/toybox/applets/ln"
"github.com/prologic/toybox/applets/ls"
"github.com/prologic/toybox/applets/md5sum"
"github.com/prologic/toybox/applets/mkdir"
"github.com/prologic/toybox/applets/mv"
"github.com/prologic/toybox/applets/rm"
"github.com/prologic/toybox/applets/rmdir"
"github.com/prologic/toybox/applets/seq"
"github.com/prologic/toybox/applets/sha1sum"
"github.com/prologic/toybox/applets/sha256sum"
"github.com/prologic/toybox/applets/sha512sum"
"github.com/prologic/toybox/applets/sleep"
"github.com/prologic/toybox/applets/tr"
"github.com/prologic/toybox/applets/true"
"github.com/prologic/toybox/applets/uniq"
"github.com/prologic/toybox/applets/uuidgen"
"github.com/prologic/toybox/applets/wc"
"github.com/prologic/toybox/applets/wget"
"github.com/prologic/toybox/applets/which"
"github.com/prologic/toybox/applets/yes"
)
var Applets map[string]Applet
type Applet func(io.Writer, []string) error
func init() {
Applets = map[string]Applet{
"arp": arp.Main,
"ash": ash.Main,
"basename": basename.Main,
"base64": base64.Main,
"cat": cat.Main,
"chgrp": chgrp.Main,
"chown": chown.Main,
"chmod": chmod.Main,
"cksum": cksum.Main,
"cmp": cmp.Main,
"cp": cp.Main,
"cut": cut.Main,
"date": date.Main,
"df": df.Main,
"dirname": dirname.Main,
"diff": diff.Main,
"du": du.Main,
"echo": echo.Main,
"false": false.Main,
"head": head.Main,
"ls": ls.Main,
"ln": ln.Main,
"mkdir": mkdir.Main,
"mv": mv.Main,
"md5sum": md5sum.Main,
"sha1sum": sha1sum.Main,
"sha256sum": sha256sum.Main,
"sha512sum": sha512sum.Main,
"sleep": sleep.Main,
"seq": seq.Main,
"true": true.Main,
"uniq": uniq.Main,
"uuidgen": uuidgen.Main,
"rm": rm.Main,
"rmdir": rmdir.Main,
"tr": tr.Main,
"yes": yes.Main,
"wc": wc.Main,
"wget": wget.Main,
"which": which.Main,
"--install": InstallMain,
"--help": UsageMain,
}
}
|
package utils
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net"
"runtime"
"strconv"
"strings"
"text/scanner"
"time"
"unicode"
"unsafe"
"github.com/skoo87/log4go"
)
// FormatJSONStr format no-stand json str
func FormatJSONStr(str string) string {
replacer := strings.NewReplacer("\t", "", "\n", "", "\v", "", "\f", "", "\r", "", " ", "")
str = replacer.Replace(str)
var s scanner.Scanner
s.Init(strings.NewReader(str))
retStr := ""
for s.Scan() != scanner.EOF {
token := s.TokenText()
next := s.Peek()
if next == ':' {
if !strings.HasPrefix(token, "\"") {
token = "\"" + token
}
if !strings.HasSuffix(token, "\"") {
token += "\""
}
}
if next == ']' || next == '}' {
if token == "," {
continue
}
}
retStr += token
}
return retStr
}
// EqualFloat64 比较float64 f1 f2可以是字符串或者float64
func EqualFloat64(f1 interface{}, f2 interface{}) (int, error) {
ff1, err := Interface2Float64(f1)
if err != nil {
errMsg := fmt.Sprintf("parseFloat64 parse %v error:"+err.Error()+"\n", f1)
return 0, errors.New(errMsg)
}
ff2, err := Interface2Float64(f2)
if err != nil {
errMsg := fmt.Sprintf("parseFloat64 parse %v error:"+err.Error()+"\n", f2)
return 0, errors.New(errMsg)
}
if ff1-ff2 > 0.0 {
return 1, nil
} else if ff1-ff2 < 0.0 {
return -1, nil
} else {
return 0, nil
}
}
func Interface2Float64(v interface{}) (fv float64, err error) {
switch vv := v.(type) {
case string:
fv, err = strconv.ParseFloat(vv, 64)
if err != nil {
return
}
return
case float64:
fv = vv
return
case float32:
fv = float64(vv)
return
case int:
fv = float64(vv)
return
case int64:
fv = float64(vv)
return
default:
return fv, errors.New("格式不正确")
}
}
// 从source里随机字符生成出长度为n的字符串
func RandStringN(n int, source string) (str string) {
len := len(source)
if len == 0 {
return
}
for i := 0; i < n; i++ {
str += string(source[rand.Intn(len)])
}
return
}
// 字符串和byte互转 无copy 无垃圾回收
func S2b(s string) []byte {
x := (*[2]uintptr)(unsafe.Pointer(&s))
h := [3]uintptr{x[0], x[1], x[1]}
return *(*[]byte)(unsafe.Pointer(&h))
}
func B2s(b []byte) string {
return *(*string)(unsafe.Pointer(&b))
}
// 控制台等待动画
func wait(duration time.Duration) {
timer := time.After(duration)
for {
select {
case <-timer:
fmt.Printf("\r")
return
default:
}
for _, c := range "/-\\|" {
fmt.Printf("\r%c", c)
time.Sleep(time.Second)
}
}
}
// 是否是中文
func IsChinese(str string) bool {
for _, v := range str {
if !unicode.Is(unicode.Han, v) {
return false
}
}
return true
}
func JsonMarshalNoError(v interface{}) string {
data, err := json.Marshal(v)
if err != nil {
log4go.Error("JsonMarshalNoError:%s", err.Error())
return ""
}
return string(data)
}
func JsonEncodeNoError(v interface{}, escapeHTML ...bool) string {
var buff bytes.Buffer
enc := json.NewEncoder(&buff)
if len(escapeHTML) > 0 {
enc.SetEscapeHTML(escapeHTML[0])
}
err := enc.Encode(v)
if err != nil {
log4go.Error("JsonEncodeNoError:%s", err.Error())
return ""
}
return buff.String()
}
var delimiter = []byte("\n")
const base64MaxLenRFC2045 = 76
// Base64WrapRFC2045 返回符合 RFC 2045 的Base64 encoded结果(每76个字符添加\n)
func Base64WrapRFC2045(src []byte) (m string) {
m = base64.StdEncoding.EncodeToString(src)
the_len := len(m)
if the_len <= base64MaxLenRFC2045 {
return m
}
new_m := []byte(m)
// set the slice capacity to the slice len + each newline delimiters
m1 := make([]byte, 0, the_len+(len(delimiter)*int(the_len/base64MaxLenRFC2045)))
ii := 0
for i := 0; i < int(the_len/base64MaxLenRFC2045); i++ {
m1 = append(m1, new_m[i*base64MaxLenRFC2045:(i+1)*base64MaxLenRFC2045]...)
m1 = append(m1, delimiter...)
ii++
}
m1 = append(m1, new_m[ii*base64MaxLenRFC2045:the_len]...)
m = string(m1)
return m
}
// GenFakeMobile 生成假手机号
func GenFakeMobile() string {
var MobileNOPrefix = [...]string{"187", "156", "189", "186", "137", "139", "135", "157", "188", "153", "183", "131", "177"}
rand.Seed(time.Now().UnixNano())
mobile := MobileNOPrefix[rand.Int()%len(MobileNOPrefix)]
mobile = mobile + fmt.Sprintf("%08d", rand.Int63n(99999999))
return mobile
}
// GenFakeEmail 生成假的email地址
func GenFakeEmail(prefix string) string {
if prefix == "" {
prefix = GenFakeMobile()
}
mailDomains := []string{"163.com", "126.com", "sina.com.cn", "139.com", "yeah.net", "21cn.com", "sohu.com", "qq.com"}
index := rand.Intn(len(mailDomains))
return prefix + "@" + mailDomains[index]
}
// 函数执行时间
// defer Elapsed.Stop()
type elapsedTime struct {
start time.Time
}
func (e *elapsedTime) Stop() {
elapsed := time.Now().Sub(e.start)
pc, _, _, _ := runtime.Caller(1)
f := runtime.FuncForPC(pc)
fmt.Println(f.Name(), "耗时:", elapsed)
}
func Elapsed() interface {
Stop()
} {
var e elapsedTime
e.start = time.Now()
return &e
}
func LocalIP() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback the display it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
return ""
}
// ExternalIP get external ip.
func ExternalIP() (res []string) {
inters, err := net.Interfaces()
if err != nil {
return
}
for _, inter := range inters {
if !strings.HasPrefix(inter.Name, "lo") {
addrs, err := inter.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok {
if ipnet.IP.IsLoopback() || ipnet.IP.IsLinkLocalMulticast() || ipnet.IP.IsLinkLocalUnicast() {
continue
}
if ip4 := ipnet.IP.To4(); ip4 != nil {
switch true {
case ip4[0] == 10:
continue
case ip4[0] == 172 && ip4[1] >= 16 && ip4[1] <= 31:
continue
case ip4[0] == 192 && ip4[1] == 168:
continue
default:
res = append(res, ipnet.IP.String())
}
}
}
}
}
}
return
}
// InternalIP get internal ip.
func InternalIP() string {
inters, err := net.Interfaces()
if err != nil {
return ""
}
for _, inter := range inters {
if !strings.HasPrefix(inter.Name, "lo") {
addrs, err := inter.Addrs()
if err != nil {
continue
}
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
}
}
return ""
}
|
/*
You are given an integer array nums. You want to maximize the number of points you get by performing the following operation any number of times:
Pick any nums[i] and delete it to earn nums[i] points. Afterwards, you must delete every element equal to nums[i] - 1 and every element equal to nums[i] + 1.
Return the maximum number of points you can earn by applying the above operation some number of times.
Example 1:
Input: nums = [3,4,2]
Output: 6
Explanation: You can perform the following operations:
- Delete 4 to earn 4 points. Consequently, 3 is also deleted. nums = [2].
- Delete 2 to earn 2 points. nums = [].
You earn a total of 6 points.
Example 2:
Input: nums = [2,2,3,3,3,4]
Output: 9
Explanation: You can perform the following operations:
- Delete a 3 to earn 3 points. All 2's and 4's are also deleted. nums = [3,3].
- Delete a 3 again to earn 3 points. nums = [3].
- Delete a 3 once more to earn 3 points. nums = [].
You earn a total of 9 points.
Constraints:
1 <= nums.length <= 2 * 10^4
1 <= nums[i] <= 10^4
Hint:
If you take a number, you might as well take them all. Keep track of what the value is of the subset of the input with maximum M when you either take or don't take M.
*/
package main
func main() {
assert(earnings([]int{3, 4, 2}) == 6)
assert(earnings([]int{2, 2, 3, 3, 3, 4}) == 9)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func earnings(a []int) int {
s := make(map[int]int)
m := 0
for _, v := range a {
s[v] += v
m = max(m, v)
}
for i := 2; i <= m; i++ {
s[i] = max(s[i-1], s[i-2]+s[i])
}
return s[m]
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package renderer
import (
"io"
"io/ioutil"
"log"
"path/filepath"
"os"
)
type RenderContext struct {
BaseDir string
}
func NewRenderContext() RenderContext {
path, err := ioutil.TempDir("", "render-context")
if err != nil {
log.Fatal(err)
}
copyStyle(path)
path = filepath.ToSlash(path)
rc := RenderContext{path}
return rc
}
func (rc RenderContext) NewCommentDir(id string) {
// Create Directory
path := filepath.Join(rc.BaseDir, id)
err := os.Mkdir(path, 0700)
if err != nil {
log.Fatal(err)
}
//Symlink Style
rc.symlinkStyle(path)
}
func (rc RenderContext) symlinkStyle(path string) {
err := os.Symlink(filepath.Join(rc.BaseDir, "reddit.css"), filepath.Join(path, "reddit.css"))
if err != nil {
log.Fatal(err)
}
}
func copyStyle(dir string) {
dir = filepath.FromSlash(dir)
styleFile, err := os.Open(filepath.FromSlash("./styles/reddit.css"))
if err != nil {
log.Fatal(err)
}
defer styleFile.Close()
styleCopy, err := os.Create(filepath.Join(dir, "reddit.css"))
if err != nil {
log.Fatal(err)
}
defer styleCopy.Close()
_, err = io.Copy(styleCopy, styleFile)
if err != nil {
log.Fatal(err)
}
err = styleCopy.Sync()
if err != nil {
log.Fatal(err)
}
}
|
/* SPDX-License-Identifier: Apache-2.0
* Copyright (c) 2019 Intel Corporation
*/
package ngcnef
import "context"
/* The SB interface towards the AF for sending the notifications received
from different NF's */
// AfNotification definesthe interfaces that are exposed for sending
// nofitifications towards the AF
type AfNotification interface {
// AAfNotificationUpfEvent sends the UPF event through POST method
// towards the AF
AfNotificationUpfEvent(ctx context.Context,
afURI URI,
body EventNotification) error
}
|
package index
import (
"io/ioutil"
"os"
"testing"
)
var postFiles = map[string]string{
"file0": "",
"file1": "Tester Code Search",
"file2": "Tester Code Project Hosting",
"file3": "Tester Web Search",
}
func tri(x, y, z byte) uint32 {
return uint32(x)<<16 | uint32(y)<<8 | uint32(z)
}
func TestTrivialPosting(t *testing.T) {
f, _ := ioutil.TempFile("", "index-test")
defer os.Remove(f.Name())
out := f.Name()
buildIndex(out, nil, postFiles)
ix := Open(out)
if l := ix.PostingList(tri('S', 'e', 'a')); !equalList(l, []uint32{1, 3}) {
t.Errorf("PostingList(Sea) = %v, want [1 3]", l)
}
if l := ix.PostingList(tri('T', 'e', 's')); !equalList(l, []uint32{1, 2, 3}) {
t.Errorf("PostingList(Tes) = %v, want [1 2 3]", l)
}
if l := ix.PostingAnd(ix.PostingList(tri('S', 'e', 'a')), tri('T', 'e', 's')); !equalList(l, []uint32{1, 3}) {
t.Errorf("PostingList(Sea&Tes) = %v, want [1 3]", l)
}
if l := ix.PostingAnd(ix.PostingList(tri('T', 'e', 's')), tri('S', 'e', 'a')); !equalList(l, []uint32{1, 3}) {
t.Errorf("PostingList(Tes&Sea) = %v, want [1 3]", l)
}
if l := ix.PostingOr(ix.PostingList(tri('S', 'e', 'a')), tri('T', 'e', 's')); !equalList(l, []uint32{1, 2, 3}) {
t.Errorf("PostingList(Sea|Tes) = %v, want [1 2 3]", l)
}
if l := ix.PostingOr(ix.PostingList(tri('T', 'e', 's')), tri('S', 'e', 'a')); !equalList(l, []uint32{1, 2, 3}) {
t.Errorf("PostingList(Tes|Sea) = %v, want [1 2 3]", l)
}
}
func equalList(x, y []uint32) bool {
if len(x) != len(y) {
return false
}
for i, xi := range x {
if xi != y[i] {
return false
}
}
return true
}
|
package test
import (
// "fmt"
"testing"
// "portal/service"
// "portal/database"
)
// func TestSignin(t *testing.T) {
// id, name := service.Signin("test@qq.com", "123456")
// if id != 1 {
// t.Error("查询错误", id, name)
// }
// }
// func TestQueryUser(t *testing.T) {
// res, err := service.QueryUserList()
// if err != nil {
// t.Error(err)
// }
// t.Error(res)
// }
func TestChangePasswd(t *testing.T) {
// database.OpenDB("root:scut2018@tcp(192.168.80.243:3306)/portal2?parseTime=true")
// var id = 35
// s, err := database.FindById(id, "portal_users")
// pw, _ := database.GetPasswd(35)
// code, _ := service.ChangePasswd(37, "scut2017", "scut2019")
// if code != 0 {
// t.Error(err)
// }
} |
package app
import "github.com/stretchr/testify/mock"
import "github.com/bryanl/dolb/entity"
type MockLoadBalancerFactory struct {
mock.Mock
}
func (_m *MockLoadBalancerFactory) Build(bootstrapConfig *BootstrapConfig) (*entity.LoadBalancer, error) {
ret := _m.Called(bootstrapConfig)
var r0 *entity.LoadBalancer
if rf, ok := ret.Get(0).(func(*BootstrapConfig) *entity.LoadBalancer); ok {
r0 = rf(bootstrapConfig)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(*entity.LoadBalancer)
}
}
var r1 error
if rf, ok := ret.Get(1).(func(*BootstrapConfig) error); ok {
r1 = rf(bootstrapConfig)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
|
package products
import (
"context"
"fmt"
"log"
"os"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var DB *mongo.Database
// DBConnect : Function for return a Mongo DB client
func DBConnect() {
// Set client options
clientOptions := options.Client().ApplyURI(os.Getenv("MONGODB_URI"))
// Connect to MongoDB
client, err := mongo.Connect(context.TODO(), clientOptions)
if err != nil {
log.Fatal(err)
}
// Check the connection
err = client.Ping(context.TODO(), nil)
if err != nil {
log.Fatal(err)
}
DB = client.Database("cafe")
fmt.Println("Connected to MongoDB!")
}
|
package main
import (
"fmt"
"net/http"
)
func main() {
http.HandleFunc("/", handler)
/*If we get request to "/*", call handler function*/
http.ListenAndServe(":9000", nil) //Port adress
}
//Answer to port:9000 request
func handler(w http.ResponseWriter, r *http.Request) { //request handler
fmt.Fprint(w, "Hello!")
}
|
/*
** Copyright 2019 Bloomberg Finance L.P.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
package common
import (
"errors"
"github.com/spf13/viper"
)
type Settings struct {
Log *LogSettings
SourceOfTrust *SourceOfTrustSettings
}
type SourceOfTrustSettings struct {
File *FileTrustSourceSettings
}
type FileTrustSourceSettings struct {
Domains map[string][]string
}
type LogSettings struct {
Filename string
Level string
MaxSize int
MaxBackups int
MaxAge int
Compress bool
}
func wrapError(err error) error {
return errors.New("parse-settings: " + err.Error())
}
func ReadSettings(fromPath string) (*Settings, error) {
settings := new(Settings)
// cause we use it a few times
var err error
// Load the config from disk at sourcePath
viper.SetConfigFile(fromPath)
if err = viper.ReadInConfig(); err != nil {
return nil, wrapError(err)
}
// Read logging settings
if settings.Log, err = readLogSettings(); err != nil {
return nil, wrapError(err)
}
// Read our source of trust settings
if settings.SourceOfTrust, err = readSourceOfTrustSettings(); err != nil {
return nil, wrapError(err)
}
return settings, nil
}
func readLogSettings() (*LogSettings, error) {
// Set defaults for non-reqiured values
viper.SetDefault("log.level", "INFO")
viper.SetDefault("log.maxsize", 10)
viper.SetDefault("log.maxbackups", 10)
viper.SetDefault("log.maxage", 30)
viper.SetDefault("log.compress", false)
// Check for required values
if !viper.IsSet("log.filename") {
return nil, errors.New("log.filename is required but not found")
}
logSettings := new(LogSettings)
logSettings.Filename = viper.GetString("log.filename")
logSettings.Level = viper.GetString("log.level")
logSettings.MaxSize = viper.GetInt("log.maxsize")
logSettings.MaxBackups = viper.GetInt("log.maxbackups")
logSettings.MaxAge = viper.GetInt("log.maxage")
logSettings.Compress = viper.GetBool("log.compress")
return logSettings, nil
}
func readSourceOfTrustSettings() (*SourceOfTrustSettings, error) {
if !viper.IsSet("trustsource.file") && !viper.IsSet("trustsource.spire") {
return nil, errors.New("Either trustsource.file or trustsource.spire are required but neither found")
}
// cause we use it a few times
var err error
sourceOfTrust := new(SourceOfTrustSettings)
if viper.IsSet("trustsource.file") {
if sourceOfTrust.File, err = readFileSourceOfTrustSettings(); err != nil {
return nil, err
}
}
// TODO: Add implementation for Spire being the source of trust
//if(viper.IsSet("trustsource.spire")){
// if trustSettings.File, err = readSpireSourceOfTrustSettings(); err != nil {
// return nil, err
// }
//}
return sourceOfTrust, nil
}
func readFileSourceOfTrustSettings() (*FileTrustSourceSettings, error) {
if !viper.IsSet("trustsource.file.domains") {
return nil, errors.New("trustsource.file.domains is required but not found")
}
fileSettings := new(FileTrustSourceSettings)
fileSettings.Domains = viper.GetStringMapStringSlice("trustsource.file.domains")
return fileSettings, nil
}
|
package api
func Create(titre string, description string, dueDate int) (error, int) {
var myTodo Todo
myTodo.Titre = titre
myTodo.Description = description
myTodo.DueDate = dueDate
myTodo.Id = ClePrimaire
ClePrimaire++
Todos = append(Todos, myTodo)
return nil, ClePrimaire
}
|
package handler
import (
"os"
"time"
"github.com/openfaas/nats-queue-worker/nats"
)
type NATSConfig interface {
GetClientID() string
GetMaxReconnect() int
GetReconnectDelay() time.Duration
}
type DefaultNATSConfig struct {
maxReconnect int
reconnectDelay time.Duration
}
func NewDefaultNATSConfig(maxReconnect int, reconnectDelay time.Duration) DefaultNATSConfig {
return DefaultNATSConfig{maxReconnect, reconnectDelay}
}
// GetClientID returns the ClientID assigned to this producer/consumer.
func (DefaultNATSConfig) GetClientID() string {
val, _ := os.Hostname()
return getClientID(val)
}
func (c DefaultNATSConfig) GetMaxReconnect() int {
return c.maxReconnect
}
func (c DefaultNATSConfig) GetReconnectDelay() time.Duration {
return c.reconnectDelay
}
func getClientID(hostname string) string {
return "faas-publisher-" + nats.GetClientID(hostname)
}
|
// Package lexer contains the code to lex input-programs into a stream
// of tokens, such that they may be parsed.
package lexer
import (
"fmt"
"strings"
"unicode"
"github.com/kasworld/nonkey/enum/tokentype"
"github.com/kasworld/nonkey/interpreter/token"
)
// Lexer holds our object-state.
type Lexer struct {
// for debug,error message
curLine int
curPosInLine int
codeLineBegins []int // line begin pos
// The current character position
position int
// The next character position
readPosition int
//The current character
ch rune
// A rune slice of our input string
characters []rune
// Previous token.
prevToken token.Token
}
// New a Lexer instance from string input.
func New(input string) *Lexer {
l := &Lexer{characters: []rune(input)}
l.codeLineBegins = []int{0}
l.readChar()
return l
}
// GetLineStr return source code line
func (l *Lexer) GetLineStr(line int) string {
lineBegin := l.codeLineBegins[line]
if len(l.codeLineBegins) > line+1 {
lineEnd := l.codeLineBegins[line+1]
return string(l.characters[lineBegin:lineEnd])
} else {
return string(l.characters[lineBegin:])
}
}
// CurrentLine return current line in source code
func (l *Lexer) CurrentLine() int {
return l.curLine
}
// CurrentPosInLine return current pos in line in source code
func (l *Lexer) CurrentPosInLine() int {
return l.curPosInLine
}
// read one forward character
func (l *Lexer) readChar() {
if l.readPosition >= len(l.characters) {
l.ch = rune(0)
l.codeLineBegins = append(l.codeLineBegins, l.position+1)
} else {
l.ch = l.characters[l.readPosition]
}
l.position = l.readPosition
l.readPosition++
// for debug, error message
l.curPosInLine++
if l.ch == rune('\n') {
l.curLine++
l.curPosInLine = 0
l.codeLineBegins = append(l.codeLineBegins, l.position)
}
}
// NextToken to read next token, skipping the white space.
func (l *Lexer) NextToken() token.Token {
var tok token.Token
l.skipWhitespace()
// skip single-line comments
if l.ch == rune('#') ||
(l.ch == rune('/') && l.peekChar() == rune('/')) {
l.skipComment()
return l.NextToken()
}
// multi-line comments
if l.ch == rune('/') && l.peekChar() == rune('*') {
l.skipMultiLineComment()
}
switch l.ch {
case rune('&'):
if l.peekChar() == rune('&') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.AND, string(ch)+string(l.ch))
}
case rune('|'):
if l.peekChar() == rune('|') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.OR, string(ch)+string(l.ch))
}
case rune('='):
if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.EQ, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.ASSIGN, string(l.ch))
}
// case rune(';'), rune('\r'), rune('\n'):
case rune(';'):
tok = l.newToken(tokentype.SEMICOLON, string(l.ch))
case rune('?'):
tok = l.newToken(tokentype.QUESTION, string(l.ch))
case rune('('):
tok = l.newToken(tokentype.LPAREN, string(l.ch))
case rune(')'):
tok = l.newToken(tokentype.RPAREN, string(l.ch))
case rune(','):
tok = l.newToken(tokentype.COMMA, string(l.ch))
case rune('.'):
if l.peekChar() == rune('.') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.DOTDOT, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.PERIOD, string(l.ch))
}
case rune('+'):
if l.peekChar() == rune('+') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.PLUS_PLUS, string(ch)+string(l.ch))
} else if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.PLUS_EQUALS, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.PLUS, string(l.ch))
}
case rune('%'):
tok = l.newToken(tokentype.MOD, string(l.ch))
case rune('{'):
tok = l.newToken(tokentype.LBRACE, string(l.ch))
case rune('}'):
tok = l.newToken(tokentype.RBRACE, string(l.ch))
case rune('-'):
if l.peekChar() == rune('-') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.MINUS_MINUS, string(ch)+string(l.ch))
} else if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.MINUS_EQUALS, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.MINUS, string(l.ch))
}
case rune('/'):
if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.SLASH_EQUALS, string(ch)+string(l.ch))
} else {
// slash is mostly division, but could
// be the start of a regular expression
// We exclude:
// a[b] / c -> RBRACKET
// ( a + b ) / c -> RPAREN
// a / c -> IDENT
// 3.2 / c -> FLOAT
// 1 / c -> IDENT
//
if l.prevToken.Type == tokentype.RBRACKET ||
l.prevToken.Type == tokentype.RPAREN ||
l.prevToken.Type == tokentype.IDENT ||
l.prevToken.Type == tokentype.INT ||
l.prevToken.Type == tokentype.FLOAT {
tok = l.newToken(tokentype.SLASH, string(l.ch))
} else {
str, err := l.readRegexp()
if err == nil {
tok = l.newToken(tokentype.REGEXP, str)
} else {
fmt.Printf("%s\n", err.Error())
tok = l.newToken(tokentype.REGEXP, str)
}
}
}
case rune('*'):
if l.peekChar() == rune('*') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.POW, string(ch)+string(l.ch))
} else if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.ASTERISK_EQUALS, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.ASTERISK, string(l.ch))
}
case rune('<'):
if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.LT_EQUALS, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.LT, string(l.ch))
}
case rune('>'):
if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.GT_EQUALS, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.GT, string(l.ch))
}
case rune('~'):
if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.CONTAINS, string(ch)+string(l.ch))
}
case rune('!'):
if l.peekChar() == rune('=') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.NOT_EQ, string(ch)+string(l.ch))
} else {
if l.peekChar() == rune('~') {
ch := l.ch
l.readChar()
tok = l.newToken(tokentype.NOT_CONTAINS, string(ch)+string(l.ch))
} else {
tok = l.newToken(tokentype.BANG, string(l.ch))
}
}
case rune('"'):
str := l.readString()
tok = l.newToken(tokentype.STRING, str)
case rune('`'):
str := l.readBacktick()
tok = l.newToken(tokentype.BACKTICK, str)
case rune('['):
tok = l.newToken(tokentype.LBRACKET, string(l.ch))
case rune(']'):
tok = l.newToken(tokentype.RBRACKET, string(l.ch))
case rune(':'):
tok = l.newToken(tokentype.COLON, string(l.ch))
case rune(0):
tok = l.newToken(tokentype.EOF, "")
default:
if isDigit(l.ch) {
tok = l.readDecimal()
l.prevToken = tok
return tok
}
str := l.readIdentifier()
tType := tokentype.LookupKeyword(str)
tok = l.newToken(tType, str)
l.prevToken = tok
return tok
}
l.readChar()
l.prevToken = tok
return tok
}
// return new token
func (l *Lexer) newToken(tokenType tokentype.TokenType, s string) token.Token {
return token.Token{
Type: tokenType,
Literal: s,
Line: l.curLine,
Pos: l.curPosInLine,
}
}
// readIdentifier is designed to read an identifier (name of variable,
// function, etc).
//
// However there is a complication due to our historical implementation
// of the standard library. We really want to stop identifiers if we hit
// a period, to allow method-calls to work on objects.
//
// So with input like this:
//
// a.blah();
//
// Our identifier should be "a" (then we have a period, then a second
// identifier "blah", followed by opening & closing parenthesis).
//
// However we also have to cover the case of:
//
// string.toupper( "blah" );
// os.getenv( "PATH" );
// ..
//
// So we have a horrid implementation..
func (l *Lexer) readIdentifier() string {
//
// Functions which are permitted to have dots in their name.
//
valid := map[string]bool{
"directory.glob": true,
"math.abs": true,
"math.random": true,
"math.sqrt": true,
"os.environment": true,
"os.getenv": true,
"os.setenv": true,
"string.interpolate": true,
}
//
// Types which will have valid methods.
//
types := []string{
"string.",
"array.",
"integer.",
"float.",
"hash.",
"object."}
id := ""
//
// Save our position, in case we need to jump backwards in
// our scanning. Yeah.
//
position := l.position
rposition := l.readPosition
//
// Build up our identifier, handling only valid characters.
//
// NOTE: This WILL consider the period valid, allowing the
// parsing of "foo.bar", "os.getenv", "blah.blah.blah", etc.
//
for isIdentifier(l.ch) {
id += string(l.ch)
l.readChar()
}
//
// Now we to see if our identifier had a period inside it.
//
if strings.Contains(id, ".") {
// Is it a known-good function?
ok := valid[id]
// If not see if it has a type-prefix, which will
// let the definition succeed.
if !ok {
for _, i := range types {
if strings.HasPrefix(id, i) {
ok = true
}
}
}
//
// Not permitted? Then we abort.
//
// We reset our lexer-state to the position just ahead
// of the period. This will then lead to a syntax
// error.
//
// Which probably means our lexer should abort instead.
//
// For the moment we'll leave as-is.
//
if !ok {
//
// OK first of all we truncate our identifier
// at the position before the "."
//
offset := strings.Index(id, ".")
id = id[:offset]
//
// Now we have to move backwards - as a quickie
// We'll reset our position and move forwards
// the length of the bits we went too-far.
l.position = position
l.readPosition = rposition
for offset > 0 {
l.readChar()
offset--
}
}
}
// And now our pain is over.
return id
}
// skip white space
func (l *Lexer) skipWhitespace() {
for isWhitespace(l.ch) {
l.readChar()
}
}
// skip comment (until the end of the line).
func (l *Lexer) skipComment() {
for l.ch != '\n' && l.ch != rune(0) {
l.readChar()
}
l.skipWhitespace()
}
// Consume all tokens until we've had the close of a multi-line
// comment.
func (l *Lexer) skipMultiLineComment() {
found := false
for !found {
// break at the end of our input.
if l.ch == rune(0) {
found = true
}
// otherwise keep going until we find "*/".
if l.ch == '*' && l.peekChar() == '/' {
found = true
// Our current position is "*", so skip
// forward to consume the "/".
l.readChar()
}
l.readChar()
}
l.skipWhitespace()
}
// read number - this handles 0x1234 and 0b101010101 too.
func (l *Lexer) readNumber() string {
str := ""
// We usually just accept digits.
accept := "0123456789"
// But if we have `0x` as a prefix we accept hexadecimal instead.
if l.ch == '0' && l.peekChar() == 'x' {
accept = "0x123456789abcdefABCDEF"
}
// If we have `0b` as a prefix we accept binary digits only.
if l.ch == '0' && l.peekChar() == 'b' {
accept = "b01"
}
for strings.Contains(accept, string(l.ch)) {
str += string(l.ch)
l.readChar()
}
return str
}
// read decimal
func (l *Lexer) readDecimal() token.Token {
//
// Read an integer-number.
//
integer := l.readNumber()
//
// Now we either expect:
//
// .[digits] -> Which converts us from an int to a float.
//
// .blah -> Which is a method-call on a raw number.
//
if l.ch == rune('.') && isDigit(l.peekChar()) {
//
// OK here we think we've got a float.
//
l.readChar()
fraction := l.readNumber()
return l.newToken(tokentype.FLOAT, integer+"."+fraction)
}
return l.newToken(tokentype.INT, integer)
}
// read string
func (l *Lexer) readString() string {
out := ""
for {
l.readChar()
if l.ch == '"' {
break
}
//
// Handle \n, \r, \t, \", etc.
//
if l.ch == '\\' {
l.readChar()
if l.ch == rune('n') {
l.ch = '\n'
}
if l.ch == rune('r') {
l.ch = '\r'
}
if l.ch == rune('t') {
l.ch = '\t'
}
if l.ch == rune('"') {
l.ch = '"'
}
if l.ch == rune('\\') {
l.ch = '\\'
}
}
out = out + string(l.ch)
}
return out
}
// read a regexp, including flags.
func (l *Lexer) readRegexp() (string, error) {
out := ""
for {
l.readChar()
if l.ch == rune(0) {
return "unterminated regular expression", fmt.Errorf("unterminated regular expression")
}
if l.ch == '/' {
// consume the terminating "/".
l.readChar()
// prepare to look for flags
flags := ""
// two flags are supported:
// i -> Ignore-case
// m -> Multiline
//
for l.ch == rune('i') || l.ch == rune('m') {
// save the char - unless it is a repeat
if !strings.Contains(flags, string(l.ch)) {
// we're going to sort the flags
tmp := strings.Split(flags, "")
tmp = append(tmp, string(l.ch))
flags = strings.Join(tmp, "")
}
// read the next
l.readChar()
}
// convert the regexp to go-lang
if len(flags) > 0 {
out = "(?" + flags + ")" + out
}
break
}
out = out + string(l.ch)
}
return out, nil
}
// read the end of a backtick-quoted string
func (l *Lexer) readBacktick() string {
position := l.position + 1
for {
l.readChar()
if l.ch == '`' {
break
}
}
out := string(l.characters[position:l.position])
return out
}
// peek character
func (l *Lexer) peekChar() rune {
if l.readPosition >= len(l.characters) {
return rune(0)
}
return l.characters[l.readPosition]
}
// determinate ch is identifier or not
func isIdentifier(ch rune) bool {
if unicode.IsLetter(ch) || unicode.IsDigit(ch) || ch == '.' || ch == '?' || ch == '$' || ch == '_' {
return true
}
return false
}
// is white space
func isWhitespace(ch rune) bool {
// return ch == rune(' ') || ch == rune('\t') //|| ch == rune('\n') || ch == rune('\r')
return ch == rune(' ') || ch == rune('\t') || ch == rune('\n') || ch == rune('\r')
}
// is Digit
func isDigit(ch rune) bool {
return rune('0') <= ch && ch <= rune('9')
}
|
package resolvers
import (
"context"
"github.com/syncromatics/kafmesh/internal/graph/generated"
"github.com/syncromatics/kafmesh/internal/graph/model"
"github.com/pkg/errors"
)
//go:generate mockgen -source=./processorOutput.go -destination=./processorOutput_mock_test.go -package=resolvers_test
// ProcessorOutputLoader is the dataloader for a processor output
type ProcessorOutputLoader interface {
ProcessorByOutput(int) (*model.Processor, error)
TopicByOutput(int) (*model.Topic, error)
}
var _ generated.ProcessorOutputResolver = &ProcessorOutputResolver{}
// ProcessorOutputResolver resolves the output's relationships
type ProcessorOutputResolver struct {
*Resolver
}
// Processor returns the output's processor
func (r *ProcessorOutputResolver) Processor(ctx context.Context, output *model.ProcessorOutput) (*model.Processor, error) {
result, err := r.DataLoaders.ProcessorOutputLoader(ctx).ProcessorByOutput(output.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get processor from loader")
}
return result, nil
}
// Topic returns the output's topic
func (r *ProcessorOutputResolver) Topic(ctx context.Context, output *model.ProcessorOutput) (*model.Topic, error) {
result, err := r.DataLoaders.ProcessorOutputLoader(ctx).TopicByOutput(output.ID)
if err != nil {
return nil, errors.Wrap(err, "failed to get topic from loader")
}
return result, nil
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"context"
"encoding/hex"
"encoding/json"
"strconv"
"strings"
"time"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/domain"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/expression/aggregation"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/charset"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/parser/opcode"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/sessionctx/variable"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/tablecodec"
"github.com/pingcap/tidb/types"
driver "github.com/pingcap/tidb/types/parser_driver"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/codec"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/hint"
"github.com/pingcap/tidb/util/sem"
"github.com/pingcap/tidb/util/stringutil"
)
// EvalSubqueryFirstRow evaluates incorrelated subqueries once, and get first row.
var EvalSubqueryFirstRow func(ctx context.Context, p PhysicalPlan, is infoschema.InfoSchema, sctx sessionctx.Context) (row []types.Datum, err error)
// evalAstExpr evaluates ast expression directly.
func evalAstExpr(sctx sessionctx.Context, expr ast.ExprNode) (types.Datum, error) {
if val, ok := expr.(*driver.ValueExpr); ok {
return val.Datum, nil
}
newExpr, err := rewriteAstExpr(sctx, expr, nil, nil, false)
if err != nil {
return types.Datum{}, err
}
return newExpr.Eval(chunk.Row{})
}
// rewriteAstExpr rewrites ast expression directly.
func rewriteAstExpr(sctx sessionctx.Context, expr ast.ExprNode, schema *expression.Schema, names types.NameSlice, allowCastArray bool) (expression.Expression, error) {
var is infoschema.InfoSchema
// in tests, it may be null
if s, ok := sctx.GetInfoSchema().(infoschema.InfoSchema); ok {
is = s
}
b, savedBlockNames := NewPlanBuilder().Init(sctx, is, &hint.BlockHintProcessor{})
b.allowBuildCastArray = allowCastArray
fakePlan := LogicalTableDual{}.Init(sctx, 0)
if schema != nil {
fakePlan.schema = schema
fakePlan.names = names
}
b.curClause = expressionClause
newExpr, _, err := b.rewrite(context.TODO(), expr, fakePlan, nil, true)
if err != nil {
return nil, err
}
sctx.GetSessionVars().PlannerSelectBlockAsName.Store(&savedBlockNames)
return newExpr, nil
}
func (b *PlanBuilder) rewriteInsertOnDuplicateUpdate(ctx context.Context, exprNode ast.ExprNode, mockPlan LogicalPlan, insertPlan *Insert) (expression.Expression, error) {
b.rewriterCounter++
defer func() { b.rewriterCounter-- }()
b.curClause = fieldList
rewriter := b.getExpressionRewriter(ctx, mockPlan)
// The rewriter maybe is obtained from "b.rewriterPool", "rewriter.err" is
// not nil means certain previous procedure has not handled this error.
// Here we give us one more chance to make a correct behavior by handling
// this missed error.
if rewriter.err != nil {
return nil, rewriter.err
}
rewriter.insertPlan = insertPlan
rewriter.asScalar = true
expr, _, err := b.rewriteExprNode(rewriter, exprNode, true)
return expr, err
}
// rewrite function rewrites ast expr to expression.Expression.
// aggMapper maps ast.AggregateFuncExpr to the columns offset in p's output schema.
// asScalar means whether this expression must be treated as a scalar expression.
// And this function returns a result expression, a new plan that may have apply or semi-join.
func (b *PlanBuilder) rewrite(ctx context.Context, exprNode ast.ExprNode, p LogicalPlan, aggMapper map[*ast.AggregateFuncExpr]int, asScalar bool) (expression.Expression, LogicalPlan, error) {
expr, resultPlan, err := b.rewriteWithPreprocess(ctx, exprNode, p, aggMapper, nil, asScalar, nil)
return expr, resultPlan, err
}
// rewriteWithPreprocess is for handling the situation that we need to adjust the input ast tree
// before really using its node in `expressionRewriter.Leave`. In that case, we first call
// er.preprocess(expr), which returns a new expr. Then we use the new expr in `Leave`.
func (b *PlanBuilder) rewriteWithPreprocess(
ctx context.Context,
exprNode ast.ExprNode,
p LogicalPlan, aggMapper map[*ast.AggregateFuncExpr]int,
windowMapper map[*ast.WindowFuncExpr]int,
asScalar bool,
preprocess func(ast.Node) ast.Node,
) (expression.Expression, LogicalPlan, error) {
b.rewriterCounter++
defer func() { b.rewriterCounter-- }()
rewriter := b.getExpressionRewriter(ctx, p)
// The rewriter maybe is obtained from "b.rewriterPool", "rewriter.err" is
// not nil means certain previous procedure has not handled this error.
// Here we give us one more chance to make a correct behavior by handling
// this missed error.
if rewriter.err != nil {
return nil, nil, rewriter.err
}
rewriter.aggrMap = aggMapper
rewriter.windowMap = windowMapper
rewriter.asScalar = asScalar
rewriter.preprocess = preprocess
expr, resultPlan, err := b.rewriteExprNode(rewriter, exprNode, asScalar)
return expr, resultPlan, err
}
func (b *PlanBuilder) getExpressionRewriter(ctx context.Context, p LogicalPlan) (rewriter *expressionRewriter) {
defer func() {
if p != nil {
rewriter.schema = p.Schema()
rewriter.names = p.OutputNames()
}
}()
if len(b.rewriterPool) < b.rewriterCounter {
rewriter = &expressionRewriter{p: p, b: b, sctx: b.ctx, ctx: ctx, rollExpand: b.currentBlockExpand}
rewriter.sctx.SetValue(expression.TiDBDecodeKeyFunctionKey, decodeKeyFromString)
b.rewriterPool = append(b.rewriterPool, rewriter)
return
}
rewriter = b.rewriterPool[b.rewriterCounter-1]
rewriter.p = p
rewriter.asScalar = false
rewriter.aggrMap = nil
rewriter.preprocess = nil
rewriter.insertPlan = nil
rewriter.disableFoldCounter = 0
rewriter.tryFoldCounter = 0
rewriter.ctxStack = rewriter.ctxStack[:0]
rewriter.ctxNameStk = rewriter.ctxNameStk[:0]
rewriter.ctx = ctx
rewriter.err = nil
rewriter.rollExpand = b.currentBlockExpand
return
}
func (*PlanBuilder) rewriteExprNode(rewriter *expressionRewriter, exprNode ast.ExprNode, asScalar bool) (expression.Expression, LogicalPlan, error) {
if rewriter.p != nil {
curColLen := rewriter.p.Schema().Len()
defer func() {
names := rewriter.p.OutputNames().Shallow()[:curColLen]
for i := curColLen; i < rewriter.p.Schema().Len(); i++ {
names = append(names, types.EmptyName)
}
// After rewriting finished, only old columns are visible.
// e.g. select * from t where t.a in (select t1.a from t1);
// The output columns before we enter the subquery are the columns from t.
// But when we leave the subquery `t.a in (select t1.a from t1)`, we got a Apply operator
// and the output columns become [t.*, t1.*]. But t1.* is used only inside the subquery. If there's another filter
// which is also a subquery where t1 is involved. The name resolving will fail if we still expose the column from
// the previous subquery.
// So here we just reset the names to empty to avoid this situation.
// TODO: implement ScalarSubQuery and resolve it during optimizing. In building phase, we will not change the plan's structure.
rewriter.p.SetOutputNames(names)
}()
}
exprNode.Accept(rewriter)
if rewriter.err != nil {
return nil, nil, errors.Trace(rewriter.err)
}
if !asScalar && len(rewriter.ctxStack) == 0 {
return nil, rewriter.p, nil
}
if len(rewriter.ctxStack) != 1 {
return nil, nil, errors.Errorf("context len %v is invalid", len(rewriter.ctxStack))
}
rewriter.err = expression.CheckArgsNotMultiColumnRow(rewriter.ctxStack[0])
if rewriter.err != nil {
return nil, nil, errors.Trace(rewriter.err)
}
return rewriter.ctxStack[0], rewriter.p, nil
}
type expressionRewriter struct {
ctxStack []expression.Expression
ctxNameStk []*types.FieldName
p LogicalPlan
schema *expression.Schema
names []*types.FieldName
err error
aggrMap map[*ast.AggregateFuncExpr]int
windowMap map[*ast.WindowFuncExpr]int
b *PlanBuilder
sctx sessionctx.Context
ctx context.Context
// asScalar indicates the return value must be a scalar value.
// NOTE: This value can be changed during expression rewritten.
asScalar bool
// preprocess is called for every ast.Node in Leave.
preprocess func(ast.Node) ast.Node
// insertPlan is only used to rewrite the expressions inside the assignment
// of the "INSERT" statement.
insertPlan *Insert
// disableFoldCounter controls fold-disabled scope. If > 0, rewriter will NOT do constant folding.
// Typically, during visiting AST, while entering the scope(disable), the counter will +1; while
// leaving the scope(enable again), the counter will -1.
// NOTE: This value can be changed during expression rewritten.
disableFoldCounter int
tryFoldCounter int
rollExpand *LogicalExpand
}
func (er *expressionRewriter) ctxStackLen() int {
return len(er.ctxStack)
}
func (er *expressionRewriter) ctxStackPop(num int) {
l := er.ctxStackLen()
er.ctxStack = er.ctxStack[:l-num]
er.ctxNameStk = er.ctxNameStk[:l-num]
}
func (er *expressionRewriter) ctxStackAppend(col expression.Expression, name *types.FieldName) {
er.ctxStack = append(er.ctxStack, col)
er.ctxNameStk = append(er.ctxNameStk, name)
}
// constructBinaryOpFunction converts binary operator functions
// 1. If op are EQ or NE or NullEQ, constructBinaryOpFunctions converts (a0,a1,a2) op (b0,b1,b2) to (a0 op b0) and (a1 op b1) and (a2 op b2)
// 2. Else constructBinaryOpFunctions converts (a0,a1,a2) op (b0,b1,b2) to
// `IF( a0 NE b0, a0 op b0,
//
// IF ( isNull(a0 NE b0), Null,
// IF ( a1 NE b1, a1 op b1,
// IF ( isNull(a1 NE b1), Null, a2 op b2))))`
func (er *expressionRewriter) constructBinaryOpFunction(l expression.Expression, r expression.Expression, op string) (expression.Expression, error) {
lLen, rLen := expression.GetRowLen(l), expression.GetRowLen(r)
if lLen == 1 && rLen == 1 {
return er.newFunction(op, types.NewFieldType(mysql.TypeTiny), l, r)
} else if rLen != lLen {
return nil, expression.ErrOperandColumns.GenWithStackByArgs(lLen)
}
switch op {
case ast.EQ, ast.NE, ast.NullEQ:
funcs := make([]expression.Expression, lLen)
for i := 0; i < lLen; i++ {
var err error
funcs[i], err = er.constructBinaryOpFunction(expression.GetFuncArg(l, i), expression.GetFuncArg(r, i), op)
if err != nil {
return nil, err
}
}
if op == ast.NE {
return expression.ComposeDNFCondition(er.sctx, funcs...), nil
}
return expression.ComposeCNFCondition(er.sctx, funcs...), nil
default:
larg0, rarg0 := expression.GetFuncArg(l, 0), expression.GetFuncArg(r, 0)
var expr1, expr2, expr3, expr4, expr5 expression.Expression
expr1 = expression.NewFunctionInternal(er.sctx, ast.NE, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)
expr2 = expression.NewFunctionInternal(er.sctx, op, types.NewFieldType(mysql.TypeTiny), larg0, rarg0)
expr3 = expression.NewFunctionInternal(er.sctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), expr1)
var err error
l, err = expression.PopRowFirstArg(er.sctx, l)
if err != nil {
return nil, err
}
r, err = expression.PopRowFirstArg(er.sctx, r)
if err != nil {
return nil, err
}
expr4, err = er.constructBinaryOpFunction(l, r, op)
if err != nil {
return nil, err
}
expr5, err = er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr3, expression.NewNull(), expr4)
if err != nil {
return nil, err
}
return er.newFunction(ast.If, types.NewFieldType(mysql.TypeTiny), expr1, expr2, expr5)
}
}
// buildSubquery translates the subquery ast to plan.
// Subquery related hints are returned through hintFlags. Please see comments around HintFlagSemiJoinRewrite and PlanBuilder.subQueryHintFlags for details.
func (er *expressionRewriter) buildSubquery(ctx context.Context, subq *ast.SubqueryExpr, subqueryCtx subQueryCtx) (np LogicalPlan, hintFlags uint64, err error) {
if er.schema != nil {
outerSchema := er.schema.Clone()
er.b.outerSchemas = append(er.b.outerSchemas, outerSchema)
er.b.outerNames = append(er.b.outerNames, er.names)
er.b.outerBlockExpand = append(er.b.outerBlockExpand, er.b.currentBlockExpand)
defer func() {
er.b.outerSchemas = er.b.outerSchemas[0 : len(er.b.outerSchemas)-1]
er.b.outerNames = er.b.outerNames[0 : len(er.b.outerNames)-1]
er.b.currentBlockExpand = er.b.outerBlockExpand[len(er.b.outerBlockExpand)-1]
er.b.outerBlockExpand = er.b.outerBlockExpand[0 : len(er.b.outerBlockExpand)-1]
}()
}
// Store the old value before we enter the subquery and reset they to default value.
oldSubQCtx := er.b.subQueryCtx
er.b.subQueryCtx = subqueryCtx
oldHintFlags := er.b.subQueryHintFlags
er.b.subQueryHintFlags = 0
outerWindowSpecs := er.b.windowSpecs
defer func() {
er.b.windowSpecs = outerWindowSpecs
er.b.subQueryCtx = oldSubQCtx
er.b.subQueryHintFlags = oldHintFlags
}()
np, err = er.b.buildResultSetNode(ctx, subq.Query, false)
if err != nil {
return nil, 0, err
}
hintFlags = er.b.subQueryHintFlags
// Pop the handle map generated by the subquery.
er.b.handleHelper.popMap()
return np, hintFlags, nil
}
// Enter implements Visitor interface.
func (er *expressionRewriter) Enter(inNode ast.Node) (ast.Node, bool) {
switch v := inNode.(type) {
case *ast.AggregateFuncExpr:
index, ok := -1, false
if er.aggrMap != nil {
index, ok = er.aggrMap[v]
}
if ok {
// index < 0 indicates this is a correlated aggregate belonging to outer query,
// for which a correlated column will be created later, so we append a null constant
// as a temporary result expression.
if index < 0 {
er.ctxStackAppend(expression.NewNull(), types.EmptyName)
} else {
// index >= 0 indicates this is a regular aggregate column
er.ctxStackAppend(er.schema.Columns[index], er.names[index])
}
return inNode, true
}
// replace correlated aggregate in sub-query with its corresponding correlated column
if col, ok := er.b.correlatedAggMapper[v]; ok {
er.ctxStackAppend(col, types.EmptyName)
return inNode, true
}
er.err = ErrInvalidGroupFuncUse
return inNode, true
case *ast.ColumnNameExpr:
if index, ok := er.b.colMapper[v]; ok {
er.ctxStackAppend(er.schema.Columns[index], er.names[index])
return inNode, true
}
case *ast.CompareSubqueryExpr:
return er.handleCompareSubquery(er.ctx, v)
case *ast.ExistsSubqueryExpr:
return er.handleExistSubquery(er.ctx, v)
case *ast.PatternInExpr:
if v.Sel != nil {
return er.handleInSubquery(er.ctx, v)
}
if len(v.List) != 1 {
break
}
// For 10 in ((select * from t)), the parser won't set v.Sel.
// So we must process this case here.
x := v.List[0]
for {
switch y := x.(type) {
case *ast.SubqueryExpr:
v.Sel = y
return er.handleInSubquery(er.ctx, v)
case *ast.ParenthesesExpr:
x = y.Expr
default:
return inNode, false
}
}
case *ast.SubqueryExpr:
return er.handleScalarSubquery(er.ctx, v)
case *ast.ParenthesesExpr:
case *ast.ValuesExpr:
schema, names := er.schema, er.names
// NOTE: "er.insertPlan != nil" means that we are rewriting the
// expressions inside the assignment of "INSERT" statement. we have to
// use the "tableSchema" of that "insertPlan".
if er.insertPlan != nil {
schema = er.insertPlan.tableSchema
names = er.insertPlan.tableColNames
}
idx, err := expression.FindFieldName(names, v.Column.Name)
if err != nil {
er.err = err
return inNode, false
}
if idx < 0 {
er.err = ErrUnknownColumn.GenWithStackByArgs(v.Column.Name.OrigColName(), "field list")
return inNode, false
}
col := schema.Columns[idx]
er.ctxStackAppend(expression.NewValuesFunc(er.sctx, col.Index, col.RetType), types.EmptyName)
return inNode, true
case *ast.WindowFuncExpr:
index, ok := -1, false
if er.windowMap != nil {
index, ok = er.windowMap[v]
}
if !ok {
er.err = ErrWindowInvalidWindowFuncUse.GenWithStackByArgs(strings.ToLower(v.Name))
return inNode, true
}
er.ctxStackAppend(er.schema.Columns[index], er.names[index])
return inNode, true
case *ast.FuncCallExpr:
er.asScalar = true
if _, ok := expression.DisableFoldFunctions[v.FnName.L]; ok {
er.disableFoldCounter++
}
if _, ok := expression.TryFoldFunctions[v.FnName.L]; ok {
er.tryFoldCounter++
}
case *ast.CaseExpr:
er.asScalar = true
if _, ok := expression.DisableFoldFunctions["case"]; ok {
er.disableFoldCounter++
}
if _, ok := expression.TryFoldFunctions["case"]; ok {
er.tryFoldCounter++
}
case *ast.BinaryOperationExpr:
er.asScalar = true
if v.Op == opcode.LogicAnd || v.Op == opcode.LogicOr {
er.tryFoldCounter++
}
case *ast.SetCollationExpr:
// Do nothing
default:
er.asScalar = true
}
return inNode, false
}
func (er *expressionRewriter) buildSemiApplyFromEqualSubq(np LogicalPlan, l, r expression.Expression, not, markNoDecorrelate bool) {
if er.asScalar || not {
if expression.GetRowLen(r) == 1 {
rCol := r.(*expression.Column)
// If both input columns of `!= all / = any` expression are not null, we can treat the expression
// as normal column equal condition.
if !expression.ExprNotNull(l) || !expression.ExprNotNull(rCol) {
rColCopy := *rCol
rColCopy.InOperand = true
r = &rColCopy
l = expression.SetExprColumnInOperand(l)
}
} else {
rowFunc := r.(*expression.ScalarFunction)
rargs := rowFunc.GetArgs()
args := make([]expression.Expression, 0, len(rargs))
modified := false
for i, rarg := range rargs {
larg := expression.GetFuncArg(l, i)
if !expression.ExprNotNull(larg) || !expression.ExprNotNull(rarg) {
rCol := rarg.(*expression.Column)
rColCopy := *rCol
rColCopy.InOperand = true
rarg = &rColCopy
modified = true
}
args = append(args, rarg)
}
if modified {
r, er.err = er.newFunction(ast.RowFunc, args[0].GetType(), args...)
if er.err != nil {
return
}
l = expression.SetExprColumnInOperand(l)
}
}
}
var condition expression.Expression
condition, er.err = er.constructBinaryOpFunction(l, r, ast.EQ)
if er.err != nil {
return
}
er.p, er.err = er.b.buildSemiApply(er.p, np, []expression.Expression{condition}, er.asScalar, not, false, markNoDecorrelate)
}
func (er *expressionRewriter) handleCompareSubquery(ctx context.Context, v *ast.CompareSubqueryExpr) (ast.Node, bool) {
ci := er.b.prepareCTECheckForSubQuery()
defer resetCTECheckForSubQuery(ci)
v.L.Accept(er)
if er.err != nil {
return v, true
}
lexpr := er.ctxStack[len(er.ctxStack)-1]
subq, ok := v.R.(*ast.SubqueryExpr)
if !ok {
er.err = errors.Errorf("Unknown compare type %T", v.R)
return v, true
}
np, hintFlags, err := er.buildSubquery(ctx, subq, handlingCompareSubquery)
if err != nil {
er.err = err
return v, true
}
noDecorrelate := hintFlags&HintFlagNoDecorrelate > 0
if noDecorrelate && len(extractCorColumnsBySchema4LogicalPlan(np, er.p.Schema())) == 0 {
er.sctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack(
"NO_DECORRELATE() is inapplicable because there are no correlated columns."))
noDecorrelate = false
}
// Only (a,b,c) = any (...) and (a,b,c) != all (...) can use row expression.
canMultiCol := (!v.All && v.Op == opcode.EQ) || (v.All && v.Op == opcode.NE)
if !canMultiCol && (expression.GetRowLen(lexpr) != 1 || np.Schema().Len() != 1) {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(1)
return v, true
}
lLen := expression.GetRowLen(lexpr)
if lLen != np.Schema().Len() {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(lLen)
return v, true
}
var rexpr expression.Expression
if np.Schema().Len() == 1 {
rexpr = np.Schema().Columns[0]
} else {
args := make([]expression.Expression, 0, np.Schema().Len())
for _, col := range np.Schema().Columns {
args = append(args, col)
}
rexpr, er.err = er.newFunction(ast.RowFunc, args[0].GetType(), args...)
if er.err != nil {
return v, true
}
}
// Lexpr cannot compare with rexpr by different collate
opString := new(strings.Builder)
v.Op.Format(opString)
_, er.err = expression.CheckAndDeriveCollationFromExprs(er.sctx, opString.String(), types.ETInt, lexpr, rexpr)
if er.err != nil {
return v, true
}
switch v.Op {
// Only EQ, NE and NullEQ can be composed with and.
case opcode.EQ, opcode.NE, opcode.NullEQ:
if v.Op == opcode.EQ {
if v.All {
er.handleEQAll(lexpr, rexpr, np, noDecorrelate)
} else {
// `a = any(subq)` will be rewriten as `a in (subq)`.
er.asScalar = true
er.buildSemiApplyFromEqualSubq(np, lexpr, rexpr, false, noDecorrelate)
if er.err != nil {
return v, true
}
}
} else if v.Op == opcode.NE {
if v.All {
// `a != all(subq)` will be rewriten as `a not in (subq)`.
er.asScalar = true
er.buildSemiApplyFromEqualSubq(np, lexpr, rexpr, true, noDecorrelate)
if er.err != nil {
return v, true
}
} else {
er.handleNEAny(lexpr, rexpr, np, noDecorrelate)
}
} else {
// TODO: Support this in future.
er.err = errors.New("We don't support <=> all or <=> any now")
return v, true
}
default:
// When < all or > any , the agg function should use min.
useMin := ((v.Op == opcode.LT || v.Op == opcode.LE) && v.All) || ((v.Op == opcode.GT || v.Op == opcode.GE) && !v.All)
er.handleOtherComparableSubq(lexpr, rexpr, np, useMin, v.Op.String(), v.All, noDecorrelate)
}
if er.asScalar {
// The parent expression only use the last column in schema, which represents whether the condition is matched.
er.ctxStack[len(er.ctxStack)-1] = er.p.Schema().Columns[er.p.Schema().Len()-1]
er.ctxNameStk[len(er.ctxNameStk)-1] = er.p.OutputNames()[er.p.Schema().Len()-1]
}
return v, true
}
// handleOtherComparableSubq handles the queries like < any, < max, etc. For example, if the query is t.id < any (select s.id from s),
// it will be rewrote to t.id < (select max(s.id) from s).
func (er *expressionRewriter) handleOtherComparableSubq(lexpr, rexpr expression.Expression, np LogicalPlan, useMin bool, cmpFunc string, all, markNoDecorrelate bool) {
plan4Agg := LogicalAggregation{}.Init(er.sctx, er.b.getSelectOffset())
if hint := er.b.TableHints(); hint != nil {
plan4Agg.aggHints = hint.aggHints
}
plan4Agg.SetChildren(np)
// Create a "max" or "min" aggregation.
funcName := ast.AggFuncMax
if useMin {
funcName = ast.AggFuncMin
}
funcMaxOrMin, err := aggregation.NewAggFuncDesc(er.sctx, funcName, []expression.Expression{rexpr}, false)
if err != nil {
er.err = err
return
}
// Create a column and append it to the schema of that aggregation.
colMaxOrMin := &expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: funcMaxOrMin.RetTp,
}
colMaxOrMin.SetCoercibility(rexpr.Coercibility())
schema := expression.NewSchema(colMaxOrMin)
plan4Agg.names = append(plan4Agg.names, types.EmptyName)
plan4Agg.SetSchema(schema)
plan4Agg.AggFuncs = []*aggregation.AggFuncDesc{funcMaxOrMin}
cond := expression.NewFunctionInternal(er.sctx, cmpFunc, types.NewFieldType(mysql.TypeTiny), lexpr, colMaxOrMin)
er.buildQuantifierPlan(plan4Agg, cond, lexpr, rexpr, all, markNoDecorrelate)
}
// buildQuantifierPlan adds extra condition for any / all subquery.
func (er *expressionRewriter) buildQuantifierPlan(plan4Agg *LogicalAggregation, cond, lexpr, rexpr expression.Expression, all, markNoDecorrelate bool) {
innerIsNull := expression.NewFunctionInternal(er.sctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), rexpr)
outerIsNull := expression.NewFunctionInternal(er.sctx, ast.IsNull, types.NewFieldType(mysql.TypeTiny), lexpr)
funcSum, err := aggregation.NewAggFuncDesc(er.sctx, ast.AggFuncSum, []expression.Expression{innerIsNull}, false)
if err != nil {
er.err = err
return
}
colSum := &expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: funcSum.RetTp,
}
plan4Agg.AggFuncs = append(plan4Agg.AggFuncs, funcSum)
plan4Agg.schema.Append(colSum)
innerHasNull := expression.NewFunctionInternal(er.sctx, ast.NE, types.NewFieldType(mysql.TypeTiny), colSum, expression.NewZero())
// Build `count(1)` aggregation to check if subquery is empty.
funcCount, err := aggregation.NewAggFuncDesc(er.sctx, ast.AggFuncCount, []expression.Expression{expression.NewOne()}, false)
if err != nil {
er.err = err
return
}
colCount := &expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: funcCount.RetTp,
}
plan4Agg.AggFuncs = append(plan4Agg.AggFuncs, funcCount)
plan4Agg.schema.Append(colCount)
if all {
// All of the inner record set should not contain null value. So for t.id < all(select s.id from s), it
// should be rewrote to t.id < min(s.id) and if(sum(s.id is null) != 0, null, true).
innerNullChecker := expression.NewFunctionInternal(er.sctx, ast.If, types.NewFieldType(mysql.TypeTiny), innerHasNull, expression.NewNull(), expression.NewOne())
cond = expression.ComposeCNFCondition(er.sctx, cond, innerNullChecker)
// If the subquery is empty, it should always return true.
emptyChecker := expression.NewFunctionInternal(er.sctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), colCount, expression.NewZero())
// If outer key is null, and subquery is not empty, it should always return null, even when it is `null = all (1, 2)`.
outerNullChecker := expression.NewFunctionInternal(er.sctx, ast.If, types.NewFieldType(mysql.TypeTiny), outerIsNull, expression.NewNull(), expression.NewZero())
cond = expression.ComposeDNFCondition(er.sctx, cond, emptyChecker, outerNullChecker)
} else {
// For "any" expression, if the subquery has null and the cond returns false, the result should be NULL.
// Specifically, `t.id < any (select s.id from s)` would be rewrote to `t.id < max(s.id) or if(sum(s.id is null) != 0, null, false)`
innerNullChecker := expression.NewFunctionInternal(er.sctx, ast.If, types.NewFieldType(mysql.TypeTiny), innerHasNull, expression.NewNull(), expression.NewZero())
cond = expression.ComposeDNFCondition(er.sctx, cond, innerNullChecker)
// If the subquery is empty, it should always return false.
emptyChecker := expression.NewFunctionInternal(er.sctx, ast.NE, types.NewFieldType(mysql.TypeTiny), colCount, expression.NewZero())
// If outer key is null, and subquery is not empty, it should return null.
outerNullChecker := expression.NewFunctionInternal(er.sctx, ast.If, types.NewFieldType(mysql.TypeTiny), outerIsNull, expression.NewNull(), expression.NewOne())
cond = expression.ComposeCNFCondition(er.sctx, cond, emptyChecker, outerNullChecker)
}
// TODO: Add a Projection if any argument of aggregate funcs or group by items are scalar functions.
// plan4Agg.buildProjectionIfNecessary()
if !er.asScalar {
// For Semi LogicalApply without aux column, the result is no matter false or null. So we can add it to join predicate.
er.p, er.err = er.b.buildSemiApply(er.p, plan4Agg, []expression.Expression{cond}, false, false, false, markNoDecorrelate)
return
}
// If we treat the result as a scalar value, we will add a projection with a extra column to output true, false or null.
outerSchemaLen := er.p.Schema().Len()
er.p = er.b.buildApplyWithJoinType(er.p, plan4Agg, InnerJoin, markNoDecorrelate)
joinSchema := er.p.Schema()
proj := LogicalProjection{
Exprs: expression.Column2Exprs(joinSchema.Clone().Columns[:outerSchemaLen]),
}.Init(er.sctx, er.b.getSelectOffset())
proj.names = make([]*types.FieldName, outerSchemaLen, outerSchemaLen+1)
copy(proj.names, er.p.OutputNames())
proj.SetSchema(expression.NewSchema(joinSchema.Clone().Columns[:outerSchemaLen]...))
proj.Exprs = append(proj.Exprs, cond)
proj.schema.Append(&expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: cond.GetType(),
})
proj.names = append(proj.names, types.EmptyName)
proj.SetChildren(er.p)
er.p = proj
}
// handleNEAny handles the case of != any. For example, if the query is t.id != any (select s.id from s), it will be rewrote to
// t.id != s.id or count(distinct s.id) > 1 or [any checker]. If there are two different values in s.id ,
// there must exist a s.id that doesn't equal to t.id.
func (er *expressionRewriter) handleNEAny(lexpr, rexpr expression.Expression, np LogicalPlan, markNoDecorrelate bool) {
// If there is NULL in s.id column, s.id should be the value that isn't null in condition t.id != s.id.
// So use function max to filter NULL.
maxFunc, err := aggregation.NewAggFuncDesc(er.sctx, ast.AggFuncMax, []expression.Expression{rexpr}, false)
if err != nil {
er.err = err
return
}
countFunc, err := aggregation.NewAggFuncDesc(er.sctx, ast.AggFuncCount, []expression.Expression{rexpr}, true)
if err != nil {
er.err = err
return
}
plan4Agg := LogicalAggregation{
AggFuncs: []*aggregation.AggFuncDesc{maxFunc, countFunc},
}.Init(er.sctx, er.b.getSelectOffset())
if hint := er.b.TableHints(); hint != nil {
plan4Agg.aggHints = hint.aggHints
}
plan4Agg.SetChildren(np)
maxResultCol := &expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: maxFunc.RetTp,
}
maxResultCol.SetCoercibility(rexpr.Coercibility())
count := &expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: countFunc.RetTp,
}
plan4Agg.names = append(plan4Agg.names, types.EmptyName, types.EmptyName)
plan4Agg.SetSchema(expression.NewSchema(maxResultCol, count))
gtFunc := expression.NewFunctionInternal(er.sctx, ast.GT, types.NewFieldType(mysql.TypeTiny), count, expression.NewOne())
neCond := expression.NewFunctionInternal(er.sctx, ast.NE, types.NewFieldType(mysql.TypeTiny), lexpr, maxResultCol)
cond := expression.ComposeDNFCondition(er.sctx, gtFunc, neCond)
er.buildQuantifierPlan(plan4Agg, cond, lexpr, rexpr, false, markNoDecorrelate)
}
// handleEQAll handles the case of = all. For example, if the query is t.id = all (select s.id from s), it will be rewrote to
// t.id = (select s.id from s having count(distinct s.id) <= 1 and [all checker]).
func (er *expressionRewriter) handleEQAll(lexpr, rexpr expression.Expression, np LogicalPlan, markNoDecorrelate bool) {
firstRowFunc, err := aggregation.NewAggFuncDesc(er.sctx, ast.AggFuncFirstRow, []expression.Expression{rexpr}, false)
if err != nil {
er.err = err
return
}
countFunc, err := aggregation.NewAggFuncDesc(er.sctx, ast.AggFuncCount, []expression.Expression{rexpr}, true)
if err != nil {
er.err = err
return
}
plan4Agg := LogicalAggregation{
AggFuncs: []*aggregation.AggFuncDesc{firstRowFunc, countFunc},
}.Init(er.sctx, er.b.getSelectOffset())
if hint := er.b.TableHints(); hint != nil {
plan4Agg.aggHints = hint.aggHints
}
plan4Agg.SetChildren(np)
plan4Agg.names = append(plan4Agg.names, types.EmptyName)
// Currently, firstrow agg function is treated like the exact representation of aggregate group key,
// so the data type is the same with group key, even if the group key is not null.
// However, the return type of firstrow should be nullable, we clear the null flag here instead of
// during invoking NewAggFuncDesc, in order to keep compatibility with the existing presumption
// that the return type firstrow does not change nullability, whatsoever.
// Cloning it because the return type is the same object with argument's data type.
newRetTp := firstRowFunc.RetTp.Clone()
newRetTp.DelFlag(mysql.NotNullFlag)
firstRowFunc.RetTp = newRetTp
firstRowResultCol := &expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: firstRowFunc.RetTp,
}
firstRowResultCol.SetCoercibility(rexpr.Coercibility())
plan4Agg.names = append(plan4Agg.names, types.EmptyName)
count := &expression.Column{
UniqueID: er.sctx.GetSessionVars().AllocPlanColumnID(),
RetType: countFunc.RetTp,
}
plan4Agg.SetSchema(expression.NewSchema(firstRowResultCol, count))
leFunc := expression.NewFunctionInternal(er.sctx, ast.LE, types.NewFieldType(mysql.TypeTiny), count, expression.NewOne())
eqCond := expression.NewFunctionInternal(er.sctx, ast.EQ, types.NewFieldType(mysql.TypeTiny), lexpr, firstRowResultCol)
cond := expression.ComposeCNFCondition(er.sctx, leFunc, eqCond)
er.buildQuantifierPlan(plan4Agg, cond, lexpr, rexpr, true, markNoDecorrelate)
}
func (er *expressionRewriter) handleExistSubquery(ctx context.Context, v *ast.ExistsSubqueryExpr) (ast.Node, bool) {
ci := er.b.prepareCTECheckForSubQuery()
defer resetCTECheckForSubQuery(ci)
subq, ok := v.Sel.(*ast.SubqueryExpr)
if !ok {
er.err = errors.Errorf("Unknown exists type %T", v.Sel)
return v, true
}
np, hintFlags, err := er.buildSubquery(ctx, subq, handlingExistsSubquery)
if err != nil {
er.err = err
return v, true
}
np = er.popExistsSubPlan(np)
noDecorrelate := hintFlags&HintFlagNoDecorrelate > 0
if noDecorrelate && len(extractCorColumnsBySchema4LogicalPlan(np, er.p.Schema())) == 0 {
er.sctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack(
"NO_DECORRELATE() is inapplicable because there are no correlated columns."))
noDecorrelate = false
}
semiJoinRewrite := hintFlags&HintFlagSemiJoinRewrite > 0
if semiJoinRewrite && noDecorrelate {
er.sctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack(
"NO_DECORRELATE() and SEMI_JOIN_REWRITE() are in conflict. Both will be ineffective."))
noDecorrelate = false
semiJoinRewrite = false
}
if er.b.disableSubQueryPreprocessing || len(ExtractCorrelatedCols4LogicalPlan(np)) > 0 || hasCTEConsumerInSubPlan(np) {
er.p, er.err = er.b.buildSemiApply(er.p, np, nil, er.asScalar, v.Not, semiJoinRewrite, noDecorrelate)
if er.err != nil || !er.asScalar {
return v, true
}
er.ctxStackAppend(er.p.Schema().Columns[er.p.Schema().Len()-1], er.p.OutputNames()[er.p.Schema().Len()-1])
} else {
// We don't want nth_plan hint to affect separately executed subqueries here, so disable nth_plan temporarily.
nthPlanBackup := er.sctx.GetSessionVars().StmtCtx.StmtHints.ForceNthPlan
er.sctx.GetSessionVars().StmtCtx.StmtHints.ForceNthPlan = -1
physicalPlan, _, err := DoOptimize(ctx, er.sctx, er.b.optFlag, np)
er.sctx.GetSessionVars().StmtCtx.StmtHints.ForceNthPlan = nthPlanBackup
if err != nil {
er.err = err
return v, true
}
if er.b.ctx.GetSessionVars().StmtCtx.InExplainStmt && !er.b.ctx.GetSessionVars().StmtCtx.InExplainAnalyzeStmt && er.b.ctx.GetSessionVars().ExplainNonEvaledSubQuery {
newColID := er.b.ctx.GetSessionVars().AllocPlanColumnID()
subqueryCtx := ScalarSubqueryEvalCtx{
scalarSubQuery: physicalPlan,
ctx: ctx,
is: er.b.is,
outputColIDs: []int64{newColID},
}.Init(er.b.ctx, np.SelectBlockOffset())
scalarSubQ := &ScalarSubQueryExpr{
scalarSubqueryColID: newColID,
evalCtx: subqueryCtx,
}
scalarSubQ.RetType = np.Schema().Columns[0].GetType()
scalarSubQ.SetCoercibility(np.Schema().Columns[0].Coercibility())
er.b.ctx.GetSessionVars().RegisterScalarSubQ(subqueryCtx)
if v.Not {
notWrapped, err := expression.NewFunction(er.b.ctx, ast.UnaryNot, types.NewFieldType(mysql.TypeTiny), scalarSubQ)
if err != nil {
er.err = err
return v, true
}
er.ctxStackAppend(notWrapped, types.EmptyName)
return v, true
}
er.ctxStackAppend(scalarSubQ, types.EmptyName)
return v, true
}
row, err := EvalSubqueryFirstRow(ctx, physicalPlan, er.b.is, er.b.ctx)
if err != nil {
er.err = err
return v, true
}
if (row != nil && !v.Not) || (row == nil && v.Not) {
er.ctxStackAppend(expression.NewOne(), types.EmptyName)
} else {
er.ctxStackAppend(expression.NewZero(), types.EmptyName)
}
}
return v, true
}
// popExistsSubPlan will remove the useless plan in exist's child.
// See comments inside the method for more details.
func (er *expressionRewriter) popExistsSubPlan(p LogicalPlan) LogicalPlan {
out:
for {
switch plan := p.(type) {
// This can be removed when in exists clause,
// e.g. exists(select count(*) from t order by a) is equal to exists t.
case *LogicalProjection, *LogicalSort:
p = p.Children()[0]
case *LogicalAggregation:
if len(plan.GroupByItems) == 0 {
p = LogicalTableDual{RowCount: 1}.Init(er.sctx, er.b.getSelectOffset())
break out
}
p = p.Children()[0]
default:
break out
}
}
return p
}
func (er *expressionRewriter) handleInSubquery(ctx context.Context, v *ast.PatternInExpr) (ast.Node, bool) {
ci := er.b.prepareCTECheckForSubQuery()
defer resetCTECheckForSubQuery(ci)
asScalar := er.asScalar
er.asScalar = true
v.Expr.Accept(er)
if er.err != nil {
return v, true
}
lexpr := er.ctxStack[len(er.ctxStack)-1]
subq, ok := v.Sel.(*ast.SubqueryExpr)
if !ok {
er.err = errors.Errorf("Unknown compare type %T", v.Sel)
return v, true
}
np, hintFlags, err := er.buildSubquery(ctx, subq, handlingInSubquery)
if err != nil {
er.err = err
return v, true
}
lLen := expression.GetRowLen(lexpr)
if lLen != np.Schema().Len() {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(lLen)
return v, true
}
var rexpr expression.Expression
if np.Schema().Len() == 1 {
rexpr = np.Schema().Columns[0]
rCol := rexpr.(*expression.Column)
// For AntiSemiJoin/LeftOuterSemiJoin/AntiLeftOuterSemiJoin, we cannot treat `in` expression as
// normal column equal condition, so we specially mark the inner operand here.
if v.Not || asScalar {
// If both input columns of `in` expression are not null, we can treat the expression
// as normal column equal condition instead. Otherwise, mark the left and right side.
// eg: for some optimization, the column substitute in right side in projection elimination
// will cause case like <lcol EQ rcol(inOperand)> as <lcol EQ constant> which is not
// a valid null-aware EQ. (null in lcol still need to be null-aware)
if !expression.ExprNotNull(lexpr) || !expression.ExprNotNull(rCol) {
rColCopy := *rCol
rColCopy.InOperand = true
rexpr = &rColCopy
lexpr = expression.SetExprColumnInOperand(lexpr)
}
}
} else {
args := make([]expression.Expression, 0, np.Schema().Len())
for i, col := range np.Schema().Columns {
if v.Not || asScalar {
larg := expression.GetFuncArg(lexpr, i)
// If both input columns of `in` expression are not null, we can treat the expression
// as normal column equal condition instead. Otherwise, mark the left and right side.
if !expression.ExprNotNull(larg) || !expression.ExprNotNull(col) {
rarg := *col
rarg.InOperand = true
col = &rarg
if larg != nil {
lexpr.(*expression.ScalarFunction).GetArgs()[i] = expression.SetExprColumnInOperand(larg)
}
}
}
args = append(args, col)
}
rexpr, er.err = er.newFunction(ast.RowFunc, args[0].GetType(), args...)
if er.err != nil {
return v, true
}
}
checkCondition, err := er.constructBinaryOpFunction(lexpr, rexpr, ast.EQ)
if err != nil {
er.err = err
return v, true
}
// If the leftKey and the rightKey have different collations, don't convert the sub-query to an inner-join
// since when converting we will add a distinct-agg upon the right child and this distinct-agg doesn't have the right collation.
// To keep it simple, we forbid this converting if they have different collations.
lt, rt := lexpr.GetType(), rexpr.GetType()
collFlag := collate.CompatibleCollate(lt.GetCollate(), rt.GetCollate())
noDecorrelate := hintFlags&HintFlagNoDecorrelate > 0
corCols := extractCorColumnsBySchema4LogicalPlan(np, er.p.Schema())
if len(corCols) == 0 && noDecorrelate {
er.sctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack(
"NO_DECORRELATE() is inapplicable because there are no correlated columns."))
noDecorrelate = false
}
// If it's not the form of `not in (SUBQUERY)`,
// and has no correlated column from the current level plan(if the correlated column is from upper level,
// we can treat it as constant, because the upper LogicalApply cannot be eliminated since current node is a join node),
// and don't need to append a scalar value, we can rewrite it to inner join.
if er.sctx.GetSessionVars().GetAllowInSubqToJoinAndAgg() && !v.Not && !asScalar && len(corCols) == 0 && collFlag {
// We need to try to eliminate the agg and the projection produced by this operation.
er.b.optFlag |= flagEliminateAgg
er.b.optFlag |= flagEliminateProjection
er.b.optFlag |= flagJoinReOrder
// Build distinct for the inner query.
agg, err := er.b.buildDistinct(np, np.Schema().Len())
if err != nil {
er.err = err
return v, true
}
// Build inner join above the aggregation.
join := LogicalJoin{JoinType: InnerJoin}.Init(er.sctx, er.b.getSelectOffset())
join.SetChildren(er.p, agg)
join.SetSchema(expression.MergeSchema(er.p.Schema(), agg.schema))
join.names = make([]*types.FieldName, er.p.Schema().Len()+agg.Schema().Len())
copy(join.names, er.p.OutputNames())
copy(join.names[er.p.Schema().Len():], agg.OutputNames())
join.AttachOnConds(expression.SplitCNFItems(checkCondition))
// Set join hint for this join.
if er.b.TableHints() != nil {
join.setPreferredJoinTypeAndOrder(er.b.TableHints())
}
er.p = join
} else {
er.p, er.err = er.b.buildSemiApply(er.p, np, expression.SplitCNFItems(checkCondition), asScalar, v.Not, false, noDecorrelate)
if er.err != nil {
return v, true
}
}
er.ctxStackPop(1)
if asScalar {
col := er.p.Schema().Columns[er.p.Schema().Len()-1]
er.ctxStackAppend(col, er.p.OutputNames()[er.p.Schema().Len()-1])
}
return v, true
}
func (er *expressionRewriter) handleScalarSubquery(ctx context.Context, v *ast.SubqueryExpr) (ast.Node, bool) {
ci := er.b.prepareCTECheckForSubQuery()
defer resetCTECheckForSubQuery(ci)
np, hintFlags, err := er.buildSubquery(ctx, v, handlingScalarSubquery)
if err != nil {
er.err = err
return v, true
}
np = er.b.buildMaxOneRow(np)
noDecorrelate := hintFlags&HintFlagNoDecorrelate > 0
if noDecorrelate && len(extractCorColumnsBySchema4LogicalPlan(np, er.p.Schema())) == 0 {
er.sctx.GetSessionVars().StmtCtx.AppendWarning(ErrInternal.GenWithStack(
"NO_DECORRELATE() is inapplicable because there are no correlated columns."))
noDecorrelate = false
}
if er.b.disableSubQueryPreprocessing || len(ExtractCorrelatedCols4LogicalPlan(np)) > 0 || hasCTEConsumerInSubPlan(np) {
er.p = er.b.buildApplyWithJoinType(er.p, np, LeftOuterJoin, noDecorrelate)
if np.Schema().Len() > 1 {
newCols := make([]expression.Expression, 0, np.Schema().Len())
for _, col := range np.Schema().Columns {
newCols = append(newCols, col)
}
expr, err1 := er.newFunction(ast.RowFunc, newCols[0].GetType(), newCols...)
if err1 != nil {
er.err = err1
return v, true
}
er.ctxStackAppend(expr, types.EmptyName)
} else {
er.ctxStackAppend(er.p.Schema().Columns[er.p.Schema().Len()-1], er.p.OutputNames()[er.p.Schema().Len()-1])
}
return v, true
}
// We don't want nth_plan hint to affect separately executed subqueries here, so disable nth_plan temporarily.
nthPlanBackup := er.sctx.GetSessionVars().StmtCtx.StmtHints.ForceNthPlan
er.sctx.GetSessionVars().StmtCtx.StmtHints.ForceNthPlan = -1
physicalPlan, _, err := DoOptimize(ctx, er.sctx, er.b.optFlag, np)
er.sctx.GetSessionVars().StmtCtx.StmtHints.ForceNthPlan = nthPlanBackup
if err != nil {
er.err = err
return v, true
}
if er.b.ctx.GetSessionVars().StmtCtx.InExplainStmt && !er.b.ctx.GetSessionVars().StmtCtx.InExplainAnalyzeStmt && er.b.ctx.GetSessionVars().ExplainNonEvaledSubQuery {
subqueryCtx := ScalarSubqueryEvalCtx{
scalarSubQuery: physicalPlan,
ctx: ctx,
is: er.b.is,
}.Init(er.b.ctx, np.SelectBlockOffset())
newColIDs := make([]int64, 0, np.Schema().Len())
newScalarSubQueryExprs := make([]expression.Expression, 0, np.Schema().Len())
for _, col := range np.Schema().Columns {
newColID := er.b.ctx.GetSessionVars().AllocPlanColumnID()
scalarSubQ := &ScalarSubQueryExpr{
scalarSubqueryColID: newColID,
evalCtx: subqueryCtx,
}
scalarSubQ.RetType = col.RetType
scalarSubQ.SetCoercibility(col.Coercibility())
newColIDs = append(newColIDs, newColID)
newScalarSubQueryExprs = append(newScalarSubQueryExprs, scalarSubQ)
}
subqueryCtx.outputColIDs = newColIDs
er.b.ctx.GetSessionVars().RegisterScalarSubQ(subqueryCtx)
if len(newScalarSubQueryExprs) == 1 {
er.ctxStackAppend(newScalarSubQueryExprs[0], types.EmptyName)
} else {
rowFunc, err := er.newFunction(ast.RowFunc, newScalarSubQueryExprs[0].GetType(), newScalarSubQueryExprs...)
if err != nil {
er.err = err
return v, true
}
er.ctxStack = append(er.ctxStack, rowFunc)
}
return v, true
}
row, err := EvalSubqueryFirstRow(ctx, physicalPlan, er.b.is, er.b.ctx)
if err != nil {
er.err = err
return v, true
}
if np.Schema().Len() > 1 {
newCols := make([]expression.Expression, 0, np.Schema().Len())
for i, data := range row {
constant := &expression.Constant{
Value: data,
RetType: np.Schema().Columns[i].GetType()}
constant.SetCoercibility(np.Schema().Columns[i].Coercibility())
newCols = append(newCols, constant)
}
expr, err1 := er.newFunction(ast.RowFunc, newCols[0].GetType(), newCols...)
if err1 != nil {
er.err = err1
return v, true
}
er.ctxStackAppend(expr, types.EmptyName)
} else {
constant := &expression.Constant{
Value: row[0],
RetType: np.Schema().Columns[0].GetType(),
}
constant.SetCoercibility(np.Schema().Columns[0].Coercibility())
er.ctxStackAppend(constant, types.EmptyName)
}
return v, true
}
func hasCTEConsumerInSubPlan(p LogicalPlan) bool {
if _, ok := p.(*LogicalCTE); ok {
return true
}
for _, child := range p.Children() {
if hasCTEConsumerInSubPlan(child) {
return true
}
}
return false
}
func initConstantRepertoire(c *expression.Constant) {
c.SetRepertoire(expression.ASCII)
if c.GetType().EvalType() == types.ETString {
for _, b := range c.Value.GetBytes() {
// if any character in constant is not ascii, set the repertoire to UNICODE.
if b >= 0x80 {
c.SetRepertoire(expression.UNICODE)
break
}
}
}
}
// Leave implements Visitor interface.
func (er *expressionRewriter) Leave(originInNode ast.Node) (retNode ast.Node, ok bool) {
if er.err != nil {
return retNode, false
}
var inNode = originInNode
if er.preprocess != nil {
inNode = er.preprocess(inNode)
}
switch v := inNode.(type) {
case *ast.AggregateFuncExpr, *ast.ColumnNameExpr, *ast.ParenthesesExpr, *ast.WhenClause,
*ast.SubqueryExpr, *ast.ExistsSubqueryExpr, *ast.CompareSubqueryExpr, *ast.ValuesExpr, *ast.WindowFuncExpr, *ast.TableNameExpr:
case *driver.ValueExpr:
// set right not null flag for constant value
retType := v.Type.Clone()
switch v.Datum.Kind() {
case types.KindNull:
retType.DelFlag(mysql.NotNullFlag)
default:
retType.AddFlag(mysql.NotNullFlag)
}
v.Datum.SetValue(v.Datum.GetValue(), retType)
value := &expression.Constant{Value: v.Datum, RetType: retType}
initConstantRepertoire(value)
er.ctxStackAppend(value, types.EmptyName)
case *driver.ParamMarkerExpr:
var value *expression.Constant
value, er.err = expression.ParamMarkerExpression(er.sctx, v, false)
if er.err != nil {
return retNode, false
}
initConstantRepertoire(value)
er.ctxStackAppend(value, types.EmptyName)
case *ast.VariableExpr:
er.rewriteVariable(v)
case *ast.FuncCallExpr:
if _, ok := expression.TryFoldFunctions[v.FnName.L]; ok {
er.tryFoldCounter--
}
er.funcCallToExpression(v)
if _, ok := expression.DisableFoldFunctions[v.FnName.L]; ok {
er.disableFoldCounter--
}
case *ast.TableName:
er.toTable(v)
case *ast.ColumnName:
er.toColumn(v)
case *ast.UnaryOperationExpr:
er.unaryOpToExpression(v)
case *ast.BinaryOperationExpr:
if v.Op == opcode.LogicAnd || v.Op == opcode.LogicOr {
er.tryFoldCounter--
}
er.binaryOpToExpression(v)
case *ast.BetweenExpr:
er.betweenToExpression(v)
case *ast.CaseExpr:
if _, ok := expression.TryFoldFunctions["case"]; ok {
er.tryFoldCounter--
}
er.caseToExpression(v)
if _, ok := expression.DisableFoldFunctions["case"]; ok {
er.disableFoldCounter--
}
case *ast.FuncCastExpr:
if v.Tp.IsArray() && !er.b.allowBuildCastArray {
er.err = expression.ErrNotSupportedYet.GenWithStackByArgs("Use of CAST( .. AS .. ARRAY) outside of functional index in CREATE(non-SELECT)/ALTER TABLE or in general expressions")
return retNode, false
}
arg := er.ctxStack[len(er.ctxStack)-1]
er.err = expression.CheckArgsNotMultiColumnRow(arg)
if er.err != nil {
return retNode, false
}
// check the decimal precision of "CAST(AS TIME)".
er.err = er.checkTimePrecision(v.Tp)
if er.err != nil {
return retNode, false
}
castFunction, err := expression.BuildCastFunctionWithCheck(er.sctx, arg, v.Tp)
if err != nil {
er.err = err
return retNode, false
}
if v.Tp.EvalType() == types.ETString {
castFunction.SetCoercibility(expression.CoercibilityImplicit)
if v.Tp.GetCharset() == charset.CharsetASCII {
castFunction.SetRepertoire(expression.ASCII)
} else {
castFunction.SetRepertoire(expression.UNICODE)
}
} else {
castFunction.SetCoercibility(expression.CoercibilityNumeric)
castFunction.SetRepertoire(expression.ASCII)
}
er.ctxStack[len(er.ctxStack)-1] = castFunction
er.ctxNameStk[len(er.ctxNameStk)-1] = types.EmptyName
case *ast.PatternLikeOrIlikeExpr:
er.patternLikeOrIlikeToExpression(v)
case *ast.PatternRegexpExpr:
er.regexpToScalarFunc(v)
case *ast.RowExpr:
er.rowToScalarFunc(v)
case *ast.PatternInExpr:
if v.Sel == nil {
er.inToExpression(len(v.List), v.Not, &v.Type)
}
case *ast.PositionExpr:
er.positionToScalarFunc(v)
case *ast.IsNullExpr:
er.isNullToExpression(v)
case *ast.IsTruthExpr:
er.isTrueToScalarFunc(v)
case *ast.DefaultExpr:
er.evalDefaultExpr(v)
// TODO: Perhaps we don't need to transcode these back to generic integers/strings
case *ast.TrimDirectionExpr:
er.ctxStackAppend(&expression.Constant{
Value: types.NewIntDatum(int64(v.Direction)),
RetType: types.NewFieldType(mysql.TypeTiny),
}, types.EmptyName)
case *ast.TimeUnitExpr:
er.ctxStackAppend(&expression.Constant{
Value: types.NewStringDatum(v.Unit.String()),
RetType: types.NewFieldType(mysql.TypeVarchar),
}, types.EmptyName)
case *ast.GetFormatSelectorExpr:
er.ctxStackAppend(&expression.Constant{
Value: types.NewStringDatum(v.Selector.String()),
RetType: types.NewFieldType(mysql.TypeVarchar),
}, types.EmptyName)
case *ast.SetCollationExpr:
arg := er.ctxStack[len(er.ctxStack)-1]
if collate.NewCollationEnabled() {
var collInfo *charset.Collation
// TODO(bb7133): use charset.ValidCharsetAndCollation when its bug is fixed.
if collInfo, er.err = collate.GetCollationByName(v.Collate); er.err != nil {
break
}
chs := arg.GetType().GetCharset()
// if the field is json, the charset is always utf8mb4.
if arg.GetType().GetType() == mysql.TypeJSON {
chs = mysql.UTF8MB4Charset
}
if chs != "" && collInfo.CharsetName != chs {
er.err = charset.ErrCollationCharsetMismatch.GenWithStackByArgs(collInfo.Name, chs)
break
}
}
// SetCollationExpr sets the collation explicitly, even when the evaluation type of the expression is non-string.
if _, ok := arg.(*expression.Column); ok || arg.GetType().GetType() == mysql.TypeJSON {
if arg.GetType().GetType() == mysql.TypeEnum || arg.GetType().GetType() == mysql.TypeSet {
er.err = ErrNotSupportedYet.GenWithStackByArgs("use collate clause for enum or set")
break
}
// Wrap a cast here to avoid changing the original FieldType of the column expression.
exprType := arg.GetType().Clone()
// if arg type is json, we should cast it to longtext if there is collate clause.
if arg.GetType().GetType() == mysql.TypeJSON {
exprType = types.NewFieldType(mysql.TypeLongBlob)
exprType.SetCharset(mysql.UTF8MB4Charset)
}
exprType.SetCollate(v.Collate)
casted := expression.BuildCastFunction(er.sctx, arg, exprType)
arg = casted
er.ctxStackPop(1)
er.ctxStackAppend(casted, types.EmptyName)
} else {
// For constant and scalar function, we can set its collate directly.
arg.GetType().SetCollate(v.Collate)
}
er.ctxStack[len(er.ctxStack)-1].SetCoercibility(expression.CoercibilityExplicit)
er.ctxStack[len(er.ctxStack)-1].SetCharsetAndCollation(arg.GetType().GetCharset(), arg.GetType().GetCollate())
default:
er.err = errors.Errorf("UnknownType: %T", v)
return retNode, false
}
if er.err != nil {
return retNode, false
}
return originInNode, true
}
// newFunctionWithInit chooses which expression.NewFunctionImpl() will be used.
func (er *expressionRewriter) newFunctionWithInit(funcName string, retType *types.FieldType, init expression.ScalarFunctionCallBack, args ...expression.Expression) (ret expression.Expression, err error) {
if init != nil {
ret, err = expression.NewFunctionWithInit(er.sctx, funcName, retType, init, args...)
} else if er.disableFoldCounter > 0 {
ret, err = expression.NewFunctionBase(er.sctx, funcName, retType, args...)
} else if er.tryFoldCounter > 0 {
ret, err = expression.NewFunctionTryFold(er.sctx, funcName, retType, args...)
} else {
ret, err = expression.NewFunction(er.sctx, funcName, retType, args...)
}
if err != nil {
return
}
if scalarFunc, ok := ret.(*expression.ScalarFunction); ok {
er.b.ctx.BuiltinFunctionUsageInc(scalarFunc.Function.PbCode().String())
}
return
}
// newFunction is being redirected to newFunctionWithInit.
func (er *expressionRewriter) newFunction(funcName string, retType *types.FieldType, args ...expression.Expression) (ret expression.Expression, err error) {
return er.newFunctionWithInit(funcName, retType, nil, args...)
}
func (*expressionRewriter) checkTimePrecision(ft *types.FieldType) error {
if ft.EvalType() == types.ETDuration && ft.GetDecimal() > types.MaxFsp {
return errTooBigPrecision.GenWithStackByArgs(ft.GetDecimal(), "CAST", types.MaxFsp)
}
return nil
}
func (er *expressionRewriter) useCache() bool {
return er.sctx.GetSessionVars().StmtCtx.UseCache
}
func (er *expressionRewriter) rewriteVariable(v *ast.VariableExpr) {
stkLen := len(er.ctxStack)
name := strings.ToLower(v.Name)
sessionVars := er.b.ctx.GetSessionVars()
if !v.IsSystem {
if v.Value != nil {
tp := er.ctxStack[stkLen-1].GetType()
er.ctxStack[stkLen-1], er.err = er.newFunction(ast.SetVar, tp,
expression.DatumToConstant(types.NewDatum(name), mysql.TypeString, 0),
er.ctxStack[stkLen-1])
er.ctxNameStk[stkLen-1] = types.EmptyName
// Store the field type of the variable into SessionVars.UserVarTypes.
// Normally we can infer the type from SessionVars.User, but we need SessionVars.UserVarTypes when
// GetVar has not been executed to fill the SessionVars.Users.
sessionVars.SetUserVarType(name, tp)
return
}
tp, ok := sessionVars.GetUserVarType(name)
if !ok {
tp = types.NewFieldType(mysql.TypeVarString)
tp.SetFlen(mysql.MaxFieldVarCharLength)
}
f, err := er.newFunction(ast.GetVar, tp, expression.DatumToConstant(types.NewStringDatum(name), mysql.TypeString, 0))
if err != nil {
er.err = err
return
}
f.SetCoercibility(expression.CoercibilityImplicit)
er.ctxStackAppend(f, types.EmptyName)
return
}
sysVar := variable.GetSysVar(name)
if sysVar == nil {
er.err = variable.ErrUnknownSystemVar.FastGenByArgs(name)
if err := variable.CheckSysVarIsRemoved(name); err != nil {
// Removed vars still return an error, but we customize it from
// "unknown" to an explanation of why it is not supported.
// This is important so users at least know they had the name correct.
er.err = err
}
return
}
if sysVar.IsNoop && !variable.EnableNoopVariables.Load() {
// The variable does nothing, append a warning to the statement output.
sessionVars.StmtCtx.AppendWarning(ErrGettingNoopVariable.GenWithStackByArgs(sysVar.Name))
}
if sem.IsEnabled() && sem.IsInvisibleSysVar(sysVar.Name) {
err := ErrSpecificAccessDenied.GenWithStackByArgs("RESTRICTED_VARIABLES_ADMIN")
er.b.visitInfo = appendDynamicVisitInfo(er.b.visitInfo, "RESTRICTED_VARIABLES_ADMIN", false, err)
}
if v.ExplicitScope && !sysVar.HasNoneScope() {
if v.IsGlobal && !(sysVar.HasGlobalScope() || sysVar.HasInstanceScope()) {
er.err = variable.ErrIncorrectScope.GenWithStackByArgs(name, "SESSION")
return
}
if !v.IsGlobal && !sysVar.HasSessionScope() {
er.err = variable.ErrIncorrectScope.GenWithStackByArgs(name, "GLOBAL")
return
}
}
var val string
var err error
if sysVar.HasNoneScope() {
val = sysVar.Value
} else if v.IsGlobal {
val, err = sessionVars.GetGlobalSystemVar(er.ctx, name)
} else {
val, err = sessionVars.GetSessionOrGlobalSystemVar(er.ctx, name)
}
if err != nil {
er.err = err
return
}
nativeVal, nativeType, nativeFlag := sysVar.GetNativeValType(val)
e := expression.DatumToConstant(nativeVal, nativeType, nativeFlag)
switch nativeType {
case mysql.TypeVarString:
charset, _ := sessionVars.GetSystemVar(variable.CharacterSetConnection)
e.GetType().SetCharset(charset)
collate, _ := sessionVars.GetSystemVar(variable.CollationConnection)
e.GetType().SetCollate(collate)
case mysql.TypeLong, mysql.TypeLonglong:
e.GetType().SetCharset(charset.CharsetBin)
e.GetType().SetCollate(charset.CollationBin)
default:
er.err = errors.Errorf("Not supported type(%x) in GetNativeValType() function", nativeType)
return
}
er.ctxStackAppend(e, types.EmptyName)
}
func (er *expressionRewriter) unaryOpToExpression(v *ast.UnaryOperationExpr) {
stkLen := len(er.ctxStack)
var op string
switch v.Op {
case opcode.Plus:
// expression (+ a) is equal to a
return
case opcode.Minus:
op = ast.UnaryMinus
case opcode.BitNeg:
op = ast.BitNeg
case opcode.Not, opcode.Not2:
op = ast.UnaryNot
default:
er.err = errors.Errorf("Unknown Unary Op %T", v.Op)
return
}
if expression.GetRowLen(er.ctxStack[stkLen-1]) != 1 {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(1)
return
}
er.ctxStack[stkLen-1], er.err = er.newFunction(op, &v.Type, er.ctxStack[stkLen-1])
er.ctxNameStk[stkLen-1] = types.EmptyName
}
func (er *expressionRewriter) binaryOpToExpression(v *ast.BinaryOperationExpr) {
stkLen := len(er.ctxStack)
var function expression.Expression
switch v.Op {
case opcode.EQ, opcode.NE, opcode.NullEQ, opcode.GT, opcode.GE, opcode.LT, opcode.LE:
function, er.err = er.constructBinaryOpFunction(er.ctxStack[stkLen-2], er.ctxStack[stkLen-1],
v.Op.String())
default:
lLen := expression.GetRowLen(er.ctxStack[stkLen-2])
rLen := expression.GetRowLen(er.ctxStack[stkLen-1])
if lLen != 1 || rLen != 1 {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(1)
return
}
function, er.err = er.newFunction(v.Op.String(), types.NewFieldType(mysql.TypeUnspecified), er.ctxStack[stkLen-2:]...)
}
if er.err != nil {
return
}
er.ctxStackPop(2)
er.ctxStackAppend(function, types.EmptyName)
}
func (er *expressionRewriter) notToExpression(hasNot bool, op string, tp *types.FieldType,
args ...expression.Expression) expression.Expression {
opFunc, err := er.newFunction(op, tp, args...)
if err != nil {
er.err = err
return nil
}
if !hasNot {
return opFunc
}
opFunc, err = er.newFunction(ast.UnaryNot, tp, opFunc)
if err != nil {
er.err = err
return nil
}
return opFunc
}
func (er *expressionRewriter) isNullToExpression(v *ast.IsNullExpr) {
stkLen := len(er.ctxStack)
if expression.GetRowLen(er.ctxStack[stkLen-1]) != 1 {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(1)
return
}
function := er.notToExpression(v.Not, ast.IsNull, &v.Type, er.ctxStack[stkLen-1])
er.ctxStackPop(1)
er.ctxStackAppend(function, types.EmptyName)
}
func (er *expressionRewriter) positionToScalarFunc(v *ast.PositionExpr) {
pos := v.N
str := strconv.Itoa(pos)
if v.P != nil {
stkLen := len(er.ctxStack)
val := er.ctxStack[stkLen-1]
intNum, isNull, err := expression.GetIntFromConstant(er.sctx, val)
str = "?"
if err == nil {
if isNull {
return
}
pos = intNum
er.ctxStackPop(1)
}
er.err = err
}
if er.err == nil && pos > 0 && pos <= er.schema.Len() && !er.schema.Columns[pos-1].IsHidden {
er.ctxStackAppend(er.schema.Columns[pos-1], er.names[pos-1])
} else {
er.err = ErrUnknownColumn.GenWithStackByArgs(str, clauseMsg[er.b.curClause])
}
}
func (er *expressionRewriter) isTrueToScalarFunc(v *ast.IsTruthExpr) {
stkLen := len(er.ctxStack)
op := ast.IsTruthWithoutNull
if v.True == 0 {
op = ast.IsFalsity
}
if expression.GetRowLen(er.ctxStack[stkLen-1]) != 1 {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(1)
return
}
function := er.notToExpression(v.Not, op, &v.Type, er.ctxStack[stkLen-1])
er.ctxStackPop(1)
er.ctxStackAppend(function, types.EmptyName)
}
// inToExpression converts in expression to a scalar function. The argument lLen means the length of in list.
// The argument not means if the expression is not in. The tp stands for the expression type, which is always bool.
// a in (b, c, d) will be rewritten as `(a = b) or (a = c) or (a = d)`.
func (er *expressionRewriter) inToExpression(lLen int, not bool, tp *types.FieldType) {
stkLen := len(er.ctxStack)
l := expression.GetRowLen(er.ctxStack[stkLen-lLen-1])
for i := 0; i < lLen; i++ {
if l != expression.GetRowLen(er.ctxStack[stkLen-lLen+i]) {
er.err = expression.ErrOperandColumns.GenWithStackByArgs(l)
return
}
}
args := er.ctxStack[stkLen-lLen-1:]
leftFt := args[0].GetType()
leftEt, leftIsNull := leftFt.EvalType(), leftFt.GetType() == mysql.TypeNull
if leftIsNull {
er.ctxStackPop(lLen + 1)
er.ctxStackAppend(expression.NewNull(), types.EmptyName)
return
}
if leftEt == types.ETInt {
for i := 1; i < len(args); i++ {
if c, ok := args[i].(*expression.Constant); ok {
var isExceptional bool
if expression.MaybeOverOptimized4PlanCache(er.sctx, []expression.Expression{c}) {
if c.GetType().EvalType() == types.ETInt {
continue // no need to refine it
}
er.sctx.GetSessionVars().StmtCtx.SetSkipPlanCache(errors.Errorf("'%v' may be converted to INT", c.String()))
expression.RemoveMutableConst(er.sctx, []expression.Expression{c})
}
args[i], isExceptional = expression.RefineComparedConstant(er.sctx, *leftFt, c, opcode.EQ)
if isExceptional {
args[i] = c
}
}
}
}
allSameType := true
for _, arg := range args[1:] {
if arg.GetType().GetType() != mysql.TypeNull && expression.GetAccurateCmpType(args[0], arg) != leftEt {
allSameType = false
break
}
}
var function expression.Expression
if allSameType && l == 1 && lLen > 1 {
function = er.notToExpression(not, ast.In, tp, er.ctxStack[stkLen-lLen-1:]...)
} else {
// If we rewrite IN to EQ, we need to decide what's the collation EQ uses.
coll := er.deriveCollationForIn(l, lLen, args)
if er.err != nil {
return
}
er.castCollationForIn(l, lLen, stkLen, coll)
eqFunctions := make([]expression.Expression, 0, lLen)
for i := stkLen - lLen; i < stkLen; i++ {
expr, err := er.constructBinaryOpFunction(args[0], er.ctxStack[i], ast.EQ)
if err != nil {
er.err = err
return
}
eqFunctions = append(eqFunctions, expr)
}
function = expression.ComposeDNFCondition(er.sctx, eqFunctions...)
if not {
var err error
function, err = er.newFunction(ast.UnaryNot, tp, function)
if err != nil {
er.err = err
return
}
}
}
er.ctxStackPop(lLen + 1)
er.ctxStackAppend(function, types.EmptyName)
}
// deriveCollationForIn derives collation for in expression.
// We don't handle the cases if the element is a tuple, such as (a, b, c) in ((x1, y1, z1), (x2, y2, z2)).
func (er *expressionRewriter) deriveCollationForIn(colLen int, _ int, args []expression.Expression) *expression.ExprCollation {
if colLen == 1 {
// a in (x, y, z) => coll[0]
coll2, err := expression.CheckAndDeriveCollationFromExprs(er.sctx, "IN", types.ETInt, args...)
er.err = err
if er.err != nil {
return nil
}
return coll2
}
return nil
}
// castCollationForIn casts collation info for arguments in the `in clause` to make sure the used collation is correct after we
// rewrite it to equal expression.
func (er *expressionRewriter) castCollationForIn(colLen int, elemCnt int, stkLen int, coll *expression.ExprCollation) {
// We don't handle the cases if the element is a tuple, such as (a, b, c) in ((x1, y1, z1), (x2, y2, z2)).
if colLen != 1 {
return
}
for i := stkLen - elemCnt; i < stkLen; i++ {
// todo: consider refining the code and reusing expression.BuildCollationFunction here
if er.ctxStack[i].GetType().EvalType() == types.ETString {
rowFunc, ok := er.ctxStack[i].(*expression.ScalarFunction)
if ok && rowFunc.FuncName.String() == ast.RowFunc {
continue
}
// Don't convert it if it's charset is binary. So that we don't convert 0x12 to a string.
if er.ctxStack[i].GetType().GetCollate() == coll.Collation {
continue
}
tp := er.ctxStack[i].GetType().Clone()
if er.ctxStack[i].GetType().Hybrid() {
if !(expression.GetAccurateCmpType(er.ctxStack[stkLen-elemCnt-1], er.ctxStack[i]) == types.ETString) {
continue
}
tp = types.NewFieldType(mysql.TypeVarString)
} else if coll.Charset == charset.CharsetBin {
// When cast character string to binary string, if we still use fixed length representation,
// then 0 padding will be used, which can affect later execution.
// e.g. https://github.com/pingcap/tidb/pull/35053#pullrequestreview-1008757770 gives an unexpected case.
// On the other hand, we can not directly return origin expr back,
// since we need binary collation to do string comparison later.
// Here we use VarString type of cast, i.e `cast(a as binary)`, to avoid this problem.
tp.SetType(mysql.TypeVarString)
}
tp.SetCharset(coll.Charset)
tp.SetCollate(coll.Collation)
er.ctxStack[i] = expression.BuildCastFunction(er.sctx, er.ctxStack[i], tp)
er.ctxStack[i].SetCoercibility(expression.CoercibilityExplicit)
}
}
}
func (er *expressionRewriter) caseToExpression(v *ast.CaseExpr) {
stkLen := len(er.ctxStack)
argsLen := 2 * len(v.WhenClauses)
if v.ElseClause != nil {
argsLen++
}
er.err = expression.CheckArgsNotMultiColumnRow(er.ctxStack[stkLen-argsLen:]...)
if er.err != nil {
return
}
// value -> ctxStack[stkLen-argsLen-1]
// when clause(condition, result) -> ctxStack[stkLen-argsLen:stkLen-1];
// else clause -> ctxStack[stkLen-1]
var args []expression.Expression
if v.Value != nil {
// args: eq scalar func(args: value, condition1), result1,
// eq scalar func(args: value, condition2), result2,
// ...
// else clause
value := er.ctxStack[stkLen-argsLen-1]
args = make([]expression.Expression, 0, argsLen)
for i := stkLen - argsLen; i < stkLen-1; i += 2 {
arg, err := er.newFunction(ast.EQ, types.NewFieldType(mysql.TypeTiny), value, er.ctxStack[i])
if err != nil {
er.err = err
return
}
args = append(args, arg)
args = append(args, er.ctxStack[i+1])
}
if v.ElseClause != nil {
args = append(args, er.ctxStack[stkLen-1])
}
argsLen++ // for trimming the value element later
} else {
// args: condition1, result1,
// condition2, result2,
// ...
// else clause
args = er.ctxStack[stkLen-argsLen:]
}
function, err := er.newFunction(ast.Case, &v.Type, args...)
if err != nil {
er.err = err
return
}
er.ctxStackPop(argsLen)
er.ctxStackAppend(function, types.EmptyName)
}
func (er *expressionRewriter) patternLikeOrIlikeToExpression(v *ast.PatternLikeOrIlikeExpr) {
l := len(er.ctxStack)
er.err = expression.CheckArgsNotMultiColumnRow(er.ctxStack[l-2:]...)
if er.err != nil {
return
}
char, col := er.sctx.GetSessionVars().GetCharsetInfo()
var function expression.Expression
fieldType := &types.FieldType{}
isPatternExactMatch := false
// Treat predicate 'like' or 'ilike' the same way as predicate '=' when it is an exact match and new collation is not enabled.
if patExpression, ok := er.ctxStack[l-1].(*expression.Constant); ok && !collate.NewCollationEnabled() {
patString, isNull, err := patExpression.EvalString(nil, chunk.Row{})
if err != nil {
er.err = err
return
}
if !isNull {
patValue, patTypes := stringutil.CompilePattern(patString, v.Escape)
if stringutil.IsExactMatch(patTypes) && er.ctxStack[l-2].GetType().EvalType() == types.ETString {
op := ast.EQ
if v.Not {
op = ast.NE
}
types.DefaultTypeForValue(string(patValue), fieldType, char, col)
function, er.err = er.constructBinaryOpFunction(er.ctxStack[l-2],
&expression.Constant{Value: types.NewStringDatum(string(patValue)), RetType: fieldType},
op)
isPatternExactMatch = true
}
}
}
if !isPatternExactMatch {
funcName := ast.Like
if !v.IsLike {
funcName = ast.Ilike
}
types.DefaultTypeForValue(int(v.Escape), fieldType, char, col)
function = er.notToExpression(v.Not, funcName, &v.Type,
er.ctxStack[l-2], er.ctxStack[l-1], &expression.Constant{Value: types.NewIntDatum(int64(v.Escape)), RetType: fieldType})
}
er.ctxStackPop(2)
er.ctxStackAppend(function, types.EmptyName)
}
func (er *expressionRewriter) regexpToScalarFunc(v *ast.PatternRegexpExpr) {
l := len(er.ctxStack)
er.err = expression.CheckArgsNotMultiColumnRow(er.ctxStack[l-2:]...)
if er.err != nil {
return
}
function := er.notToExpression(v.Not, ast.Regexp, &v.Type, er.ctxStack[l-2], er.ctxStack[l-1])
er.ctxStackPop(2)
er.ctxStackAppend(function, types.EmptyName)
}
func (er *expressionRewriter) rowToScalarFunc(v *ast.RowExpr) {
stkLen := len(er.ctxStack)
length := len(v.Values)
rows := make([]expression.Expression, 0, length)
for i := stkLen - length; i < stkLen; i++ {
rows = append(rows, er.ctxStack[i])
}
er.ctxStackPop(length)
function, err := er.newFunction(ast.RowFunc, rows[0].GetType(), rows...)
if err != nil {
er.err = err
return
}
er.ctxStackAppend(function, types.EmptyName)
}
func (er *expressionRewriter) wrapExpWithCast() (expr, lexp, rexp expression.Expression) {
stkLen := len(er.ctxStack)
expr, lexp, rexp = er.ctxStack[stkLen-3], er.ctxStack[stkLen-2], er.ctxStack[stkLen-1]
var castFunc func(sessionctx.Context, expression.Expression) expression.Expression
switch expression.ResolveType4Between([3]expression.Expression{expr, lexp, rexp}) {
case types.ETInt:
castFunc = expression.WrapWithCastAsInt
case types.ETReal:
castFunc = expression.WrapWithCastAsReal
case types.ETDecimal:
castFunc = expression.WrapWithCastAsDecimal
case types.ETString:
castFunc = func(ctx sessionctx.Context, e expression.Expression) expression.Expression {
// string kind expression do not need cast
if e.GetType().EvalType().IsStringKind() {
return e
}
return expression.WrapWithCastAsString(ctx, e)
}
case types.ETDuration:
expr = expression.WrapWithCastAsTime(er.sctx, expr, types.NewFieldType(mysql.TypeDuration))
lexp = expression.WrapWithCastAsTime(er.sctx, lexp, types.NewFieldType(mysql.TypeDuration))
rexp = expression.WrapWithCastAsTime(er.sctx, rexp, types.NewFieldType(mysql.TypeDuration))
return
case types.ETDatetime:
expr = expression.WrapWithCastAsTime(er.sctx, expr, types.NewFieldType(mysql.TypeDatetime))
lexp = expression.WrapWithCastAsTime(er.sctx, lexp, types.NewFieldType(mysql.TypeDatetime))
rexp = expression.WrapWithCastAsTime(er.sctx, rexp, types.NewFieldType(mysql.TypeDatetime))
return
default:
return
}
expr = castFunc(er.sctx, expr)
lexp = castFunc(er.sctx, lexp)
rexp = castFunc(er.sctx, rexp)
return
}
func (er *expressionRewriter) betweenToExpression(v *ast.BetweenExpr) {
stkLen := len(er.ctxStack)
er.err = expression.CheckArgsNotMultiColumnRow(er.ctxStack[stkLen-3:]...)
if er.err != nil {
return
}
expr, lexp, rexp := er.wrapExpWithCast()
coll, err := expression.CheckAndDeriveCollationFromExprs(er.sctx, "BETWEEN", types.ETInt, expr, lexp, rexp)
er.err = err
if er.err != nil {
return
}
// Handle enum or set. We need to know their real type to decide whether to cast them.
lt := expression.GetAccurateCmpType(expr, lexp)
rt := expression.GetAccurateCmpType(expr, rexp)
enumOrSetRealTypeIsStr := lt != types.ETInt && rt != types.ETInt
expr = expression.BuildCastCollationFunction(er.sctx, expr, coll, enumOrSetRealTypeIsStr)
lexp = expression.BuildCastCollationFunction(er.sctx, lexp, coll, enumOrSetRealTypeIsStr)
rexp = expression.BuildCastCollationFunction(er.sctx, rexp, coll, enumOrSetRealTypeIsStr)
var l, r expression.Expression
l, er.err = expression.NewFunction(er.sctx, ast.GE, &v.Type, expr, lexp)
if er.err != nil {
return
}
r, er.err = expression.NewFunction(er.sctx, ast.LE, &v.Type, expr, rexp)
if er.err != nil {
return
}
function, err := er.newFunction(ast.LogicAnd, &v.Type, l, r)
if err != nil {
er.err = err
return
}
if v.Not {
function, err = er.newFunction(ast.UnaryNot, &v.Type, function)
if err != nil {
er.err = err
return
}
}
er.ctxStackPop(3)
er.ctxStackAppend(function, types.EmptyName)
}
// rewriteFuncCall handles a FuncCallExpr and generates a customized function.
// It should return true if for the given FuncCallExpr a rewrite is performed so that original behavior is skipped.
// Otherwise it should return false to indicate (the caller) that original behavior needs to be performed.
func (er *expressionRewriter) rewriteFuncCall(v *ast.FuncCallExpr) bool {
switch v.FnName.L {
// when column is not null, ifnull on such column can be optimized to a cast.
case ast.Ifnull:
if len(v.Args) != 2 {
er.err = expression.ErrIncorrectParameterCount.GenWithStackByArgs(v.FnName.O)
return true
}
stackLen := len(er.ctxStack)
lhs := er.ctxStack[stackLen-2]
rhs := er.ctxStack[stackLen-1]
col, isColumn := lhs.(*expression.Column)
var isEnumSet bool
if lhs.GetType().GetType() == mysql.TypeEnum || lhs.GetType().GetType() == mysql.TypeSet {
isEnumSet = true
}
// if expr1 is a column with not null flag, then we can optimize it as a cast.
if isColumn && !isEnumSet && mysql.HasNotNullFlag(col.RetType.GetFlag()) {
retTp, err := expression.InferType4ControlFuncs(er.sctx, ast.Ifnull, lhs, rhs)
if err != nil {
er.err = err
return true
}
retTp.AddFlag((lhs.GetType().GetFlag() & mysql.NotNullFlag) | (rhs.GetType().GetFlag() & mysql.NotNullFlag))
if lhs.GetType().GetType() == mysql.TypeNull && rhs.GetType().GetType() == mysql.TypeNull {
retTp.SetType(mysql.TypeNull)
retTp.SetFlen(0)
retTp.SetDecimal(0)
types.SetBinChsClnFlag(retTp)
}
er.ctxStackPop(len(v.Args))
casted := expression.BuildCastFunction(er.sctx, lhs, retTp)
er.ctxStackAppend(casted, types.EmptyName)
return true
}
return false
case ast.Nullif:
if len(v.Args) != 2 {
er.err = expression.ErrIncorrectParameterCount.GenWithStackByArgs(v.FnName.O)
return true
}
stackLen := len(er.ctxStack)
param1 := er.ctxStack[stackLen-2]
param2 := er.ctxStack[stackLen-1]
// param1 = param2
funcCompare, err := er.constructBinaryOpFunction(param1, param2, ast.EQ)
if err != nil {
er.err = err
return true
}
// NULL
nullTp := types.NewFieldType(mysql.TypeNull)
flen, decimal := mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeNull)
nullTp.SetFlen(flen)
nullTp.SetDecimal(decimal)
paramNull := &expression.Constant{
Value: types.NewDatum(nil),
RetType: nullTp,
}
// if(param1 = param2, NULL, param1)
funcIf, err := er.newFunction(ast.If, &v.Type, funcCompare, paramNull, param1)
if err != nil {
er.err = err
return true
}
er.ctxStackPop(len(v.Args))
er.ctxStackAppend(funcIf, types.EmptyName)
return true
default:
return false
}
}
func (er *expressionRewriter) funcCallToExpression(v *ast.FuncCallExpr) {
stackLen := len(er.ctxStack)
args := er.ctxStack[stackLen-len(v.Args):]
er.err = expression.CheckArgsNotMultiColumnRow(args...)
if er.err != nil {
return
}
if er.rewriteFuncCall(v) {
return
}
var function expression.Expression
er.ctxStackPop(len(v.Args))
if _, ok := expression.DeferredFunctions[v.FnName.L]; er.useCache() && ok {
// When the expression is unix_timestamp and the number of argument is not zero,
// we deal with it as normal expression.
if v.FnName.L == ast.UnixTimestamp && len(v.Args) != 0 {
function, er.err = er.newFunction(v.FnName.L, &v.Type, args...)
er.ctxStackAppend(function, types.EmptyName)
} else {
function, er.err = expression.NewFunctionBase(er.sctx, v.FnName.L, &v.Type, args...)
c := &expression.Constant{Value: types.NewDatum(nil), RetType: function.GetType().Clone(), DeferredExpr: function}
er.ctxStackAppend(c, types.EmptyName)
}
} else if v.FnName.L == ast.Grouping {
// grouping function should fetch the underlying grouping-sets meta and rewrite the args here.
// eg: grouping(a) actually is try to find in which grouping-set that the column 'a' is remained,
// collecting those gid as a collection and filling it into the grouping function meta. Besides,
// the first arg of grouping function should be rewritten as gid column defined/passed by Expand
// from the bottom up.
if er.rollExpand == nil {
er.err = ErrInvalidGroupFuncUse
er.ctxStackAppend(nil, types.EmptyName)
} else {
// whether there is some duplicate grouping sets, gpos is only be used in shuffle keys and group keys
// rather than grouping function.
// eg: rollup(a,a,b), the decided grouping sets are {a,a,b},{a,a,null},{a,null,null},{null,null,null}
// for the second and third grouping set: {a,a,null} and {a,null,null}, a here is the col ref of original
// column `a`. So from the static layer, this two grouping set are equivalent, we don't need to copy col
// `a double times at the every beginning and resort to gpos to distinguish them.
// {col-a, col-b, gid, gpos}
// {a, b, 0, 1}, {a, null, 1, 2}, {a, null, 1, 3}, {null, null, 2, 4}
// grouping function still only need to care about gid is enough, gpos what group and shuffle keys cared.
if len(args) > 64 {
er.err = ErrInvalidNumberOfArgs.GenWithStackByArgs("GROUPING", 64)
er.ctxStackAppend(nil, types.EmptyName)
return
}
// resolve grouping args in group by items or not.
resolvedCols, err := er.rollExpand.resolveGroupingFuncArgsInGroupBy(args)
if err != nil {
er.err = err
er.ctxStackAppend(nil, types.EmptyName)
return
}
newArg := er.rollExpand.GID.Clone()
init := func(groupingFunc *expression.ScalarFunction) (expression.Expression, error) {
err = groupingFunc.Function.(*expression.BuiltinGroupingImplSig).SetMetadata(er.rollExpand.GroupingMode, er.rollExpand.GenerateGroupingMarks(resolvedCols))
return groupingFunc, err
}
function, er.err = er.newFunctionWithInit(v.FnName.L, &v.Type, init, newArg)
er.ctxStackAppend(function, types.EmptyName)
}
} else {
function, er.err = er.newFunction(v.FnName.L, &v.Type, args...)
er.ctxStackAppend(function, types.EmptyName)
}
}
// Now TableName in expression only used by sequence function like nextval(seq).
// The function arg should be evaluated as a table name rather than normal column name like mysql does.
func (er *expressionRewriter) toTable(v *ast.TableName) {
fullName := v.Name.L
if len(v.Schema.L) != 0 {
fullName = v.Schema.L + "." + fullName
}
val := &expression.Constant{
Value: types.NewDatum(fullName),
RetType: types.NewFieldType(mysql.TypeString),
}
er.ctxStackAppend(val, types.EmptyName)
}
func (er *expressionRewriter) toColumn(v *ast.ColumnName) {
idx, err := expression.FindFieldName(er.names, v)
if err != nil {
er.err = ErrAmbiguous.GenWithStackByArgs(v.Name, clauseMsg[fieldList])
return
}
if idx >= 0 {
column := er.schema.Columns[idx]
if column.IsHidden {
er.err = ErrUnknownColumn.GenWithStackByArgs(v.Name, clauseMsg[er.b.curClause])
return
}
er.ctxStackAppend(column, er.names[idx])
return
}
col, name, err := findFieldNameFromNaturalUsingJoin(er.p, v)
if err != nil {
er.err = err
return
} else if col != nil {
er.ctxStackAppend(col, name)
return
}
for i := len(er.b.outerSchemas) - 1; i >= 0; i-- {
outerSchema, outerName := er.b.outerSchemas[i], er.b.outerNames[i]
idx, err = expression.FindFieldName(outerName, v)
if idx >= 0 {
column := outerSchema.Columns[idx]
er.ctxStackAppend(&expression.CorrelatedColumn{Column: *column, Data: new(types.Datum)}, outerName[idx])
return
}
if err != nil {
er.err = ErrAmbiguous.GenWithStackByArgs(v.Name, clauseMsg[fieldList])
return
}
}
if _, ok := er.p.(*LogicalUnionAll); ok && v.Table.O != "" {
er.err = ErrTablenameNotAllowedHere.GenWithStackByArgs(v.Table.O, "SELECT", clauseMsg[er.b.curClause])
return
}
if er.b.curClause == globalOrderByClause {
er.b.curClause = orderByClause
}
er.err = ErrUnknownColumn.GenWithStackByArgs(v.String(), clauseMsg[er.b.curClause])
}
func findFieldNameFromNaturalUsingJoin(p LogicalPlan, v *ast.ColumnName) (col *expression.Column, name *types.FieldName, err error) {
switch x := p.(type) {
case *LogicalLimit, *LogicalSelection, *LogicalTopN, *LogicalSort, *LogicalMaxOneRow:
return findFieldNameFromNaturalUsingJoin(p.Children()[0], v)
case *LogicalJoin:
if x.fullSchema != nil {
idx, err := expression.FindFieldName(x.fullNames, v)
if err != nil {
return nil, nil, err
}
if idx >= 0 {
return x.fullSchema.Columns[idx], x.fullNames[idx], nil
}
}
}
return nil, nil, nil
}
func (er *expressionRewriter) evalDefaultExpr(v *ast.DefaultExpr) {
var name *types.FieldName
// Here we will find the corresponding column for default function. At the same time, we need to consider the issue
// of subquery and name space.
// For example, we have two tables t1(a int default 1, b int) and t2(a int default -1, c int). Consider the following SQL:
// select a from t1 where a > (select default(a) from t2)
// Refer to the behavior of MySQL, we need to find column a in table t2. If table t2 does not have column a, then find it
// in table t1. If there are none, return an error message.
// Based on the above description, we need to look in er.b.allNames from back to front.
for i := len(er.b.allNames) - 1; i >= 0; i-- {
idx, err := expression.FindFieldName(er.b.allNames[i], v.Name)
if err != nil {
er.err = err
return
}
if idx >= 0 {
name = er.b.allNames[i][idx]
break
}
}
if name == nil {
idx, err := expression.FindFieldName(er.names, v.Name)
if err != nil {
er.err = err
return
}
if idx < 0 {
er.err = ErrUnknownColumn.GenWithStackByArgs(v.Name.OrigColName(), "field list")
return
}
name = er.names[idx]
}
dbName := name.DBName
if dbName.O == "" {
// if database name is not specified, use current database name
dbName = model.NewCIStr(er.sctx.GetSessionVars().CurrentDB)
}
if name.OrigTblName.O == "" {
// column is evaluated by some expressions, for example:
// `select default(c) from (select (a+1) as c from t) as t0`
// in such case, a 'no default' error is returned
er.err = table.ErrNoDefaultValue.GenWithStackByArgs(name.ColName)
return
}
var tbl table.Table
tbl, er.err = er.b.is.TableByName(dbName, name.OrigTblName)
if er.err != nil {
return
}
colName := name.OrigColName.O
if colName == "" {
// in some cases, OrigColName is empty, use ColName instead
colName = name.ColName.O
}
col := table.FindCol(tbl.Cols(), colName)
if col == nil {
er.err = ErrUnknownColumn.GenWithStackByArgs(v.Name, "field_list")
return
}
isCurrentTimestamp := hasCurrentDatetimeDefault(col)
var val *expression.Constant
switch {
case isCurrentTimestamp && (col.GetType() == mysql.TypeDatetime || col.GetType() == mysql.TypeTimestamp):
t, err := expression.GetTimeValue(er.sctx, ast.CurrentTimestamp, col.GetType(), col.GetDecimal(), nil)
if err != nil {
return
}
val = &expression.Constant{
Value: t,
RetType: types.NewFieldType(col.GetType()),
}
default:
// for other columns, just use what it is
val, er.err = er.b.getDefaultValue(col)
}
if er.err != nil {
return
}
er.ctxStackAppend(val, types.EmptyName)
}
// hasCurrentDatetimeDefault checks if column has current_timestamp default value
func hasCurrentDatetimeDefault(col *table.Column) bool {
x, ok := col.DefaultValue.(string)
if !ok {
return false
}
return strings.ToLower(x) == ast.CurrentTimestamp
}
func decodeKeyFromString(ctx sessionctx.Context, s string) string {
sc := ctx.GetSessionVars().StmtCtx
key, err := hex.DecodeString(s)
if err != nil {
sc.AppendWarning(errors.Errorf("invalid key: %X", key))
return s
}
// Auto decode byte if needed.
_, bs, err := codec.DecodeBytes(key, nil)
if err == nil {
key = bs
}
tableID := tablecodec.DecodeTableID(key)
if tableID == 0 {
sc.AppendWarning(errors.Errorf("invalid key: %X", key))
return s
}
dm := domain.GetDomain(ctx)
if dm == nil {
sc.AppendWarning(errors.Errorf("domain not found when decoding key: %X", key))
return s
}
is := dm.InfoSchema()
if is == nil {
sc.AppendWarning(errors.Errorf("infoschema not found when decoding key: %X", key))
return s
}
tbl, _ := is.TableByID(tableID)
if tbl == nil {
tbl, _, _ = is.FindTableByPartitionID(tableID)
}
loc := ctx.GetSessionVars().Location()
if tablecodec.IsRecordKey(key) {
ret, err := decodeRecordKey(key, tableID, tbl, loc)
if err != nil {
sc.AppendWarning(err)
return s
}
return ret
} else if tablecodec.IsIndexKey(key) {
ret, err := decodeIndexKey(key, tableID, tbl, loc)
if err != nil {
sc.AppendWarning(err)
return s
}
return ret
} else if tablecodec.IsTableKey(key) {
ret, err := decodeTableKey(key, tableID, tbl)
if err != nil {
sc.AppendWarning(err)
return s
}
return ret
}
sc.AppendWarning(errors.Errorf("invalid key: %X", key))
return s
}
func decodeRecordKey(key []byte, tableID int64, tbl table.Table, loc *time.Location) (string, error) {
_, handle, err := tablecodec.DecodeRecordKey(key)
if err != nil {
return "", errors.Trace(err)
}
if handle.IsInt() {
ret := make(map[string]interface{})
if tbl != nil && tbl.Meta().Partition != nil {
ret["partition_id"] = tableID
tableID = tbl.Meta().ID
}
ret["table_id"] = strconv.FormatInt(tableID, 10)
// When the clustered index is enabled, we should show the PK name.
if tbl != nil && tbl.Meta().HasClusteredIndex() {
ret[tbl.Meta().GetPkName().String()] = handle.IntValue()
} else {
ret["_tidb_rowid"] = handle.IntValue()
}
retStr, err := json.Marshal(ret)
if err != nil {
return "", errors.Trace(err)
}
return string(retStr), nil
}
if tbl != nil {
tblInfo := tbl.Meta()
idxInfo := tables.FindPrimaryIndex(tblInfo)
if idxInfo == nil {
return "", errors.Trace(errors.Errorf("primary key not found when decoding record key: %X", key))
}
cols := make(map[int64]*types.FieldType, len(tblInfo.Columns))
for _, col := range tblInfo.Columns {
cols[col.ID] = &(col.FieldType)
}
handleColIDs := make([]int64, 0, len(idxInfo.Columns))
for _, col := range idxInfo.Columns {
handleColIDs = append(handleColIDs, tblInfo.Columns[col.Offset].ID)
}
if len(handleColIDs) != handle.NumCols() {
return "", errors.Trace(errors.Errorf("primary key length not match handle columns number in key"))
}
datumMap, err := tablecodec.DecodeHandleToDatumMap(handle, handleColIDs, cols, loc, nil)
if err != nil {
return "", errors.Trace(err)
}
ret := make(map[string]interface{})
if tbl.Meta().Partition != nil {
ret["partition_id"] = tableID
tableID = tbl.Meta().ID
}
ret["table_id"] = tableID
handleRet := make(map[string]interface{})
for colID := range datumMap {
dt := datumMap[colID]
dtStr, err := datumToJSONObject(&dt)
if err != nil {
return "", errors.Trace(err)
}
found := false
for _, colInfo := range tblInfo.Columns {
if colInfo.ID == colID {
found = true
handleRet[colInfo.Name.L] = dtStr
break
}
}
if !found {
return "", errors.Trace(errors.Errorf("column not found when decoding record key: %X", key))
}
}
ret["handle"] = handleRet
retStr, err := json.Marshal(ret)
if err != nil {
return "", errors.Trace(err)
}
return string(retStr), nil
}
ret := make(map[string]interface{})
ret["table_id"] = tableID
ret["handle"] = handle.String()
retStr, err := json.Marshal(ret)
if err != nil {
return "", errors.Trace(err)
}
return string(retStr), nil
}
func decodeIndexKey(key []byte, tableID int64, tbl table.Table, loc *time.Location) (string, error) {
if tbl != nil {
_, indexID, _, err := tablecodec.DecodeKeyHead(key)
if err != nil {
return "", errors.Trace(errors.Errorf("invalid record/index key: %X", key))
}
tblInfo := tbl.Meta()
var targetIndex *model.IndexInfo
for _, idx := range tblInfo.Indices {
if idx.ID == indexID {
targetIndex = idx
break
}
}
if targetIndex == nil {
return "", errors.Trace(errors.Errorf("index not found when decoding index key: %X", key))
}
colInfos := tables.BuildRowcodecColInfoForIndexColumns(targetIndex, tblInfo)
tps := tables.BuildFieldTypesForIndexColumns(targetIndex, tblInfo)
values, err := tablecodec.DecodeIndexKV(key, []byte{0}, len(colInfos), tablecodec.HandleNotNeeded, colInfos)
if err != nil {
return "", errors.Trace(err)
}
ds := make([]types.Datum, 0, len(colInfos))
for i := 0; i < len(colInfos); i++ {
d, err := tablecodec.DecodeColumnValue(values[i], tps[i], loc)
if err != nil {
return "", errors.Trace(err)
}
ds = append(ds, d)
}
ret := make(map[string]interface{})
if tbl.Meta().Partition != nil {
ret["partition_id"] = tableID
tableID = tbl.Meta().ID
}
ret["table_id"] = tableID
ret["index_id"] = indexID
idxValMap := make(map[string]interface{}, len(targetIndex.Columns))
for i := 0; i < len(targetIndex.Columns); i++ {
dtStr, err := datumToJSONObject(&ds[i])
if err != nil {
return "", errors.Trace(err)
}
idxValMap[targetIndex.Columns[i].Name.L] = dtStr
}
ret["index_vals"] = idxValMap
retStr, err := json.Marshal(ret)
if err != nil {
return "", errors.Trace(err)
}
return string(retStr), nil
}
_, indexID, indexValues, err := tablecodec.DecodeIndexKey(key)
if err != nil {
return "", errors.Trace(errors.Errorf("invalid index key: %X", key))
}
ret := make(map[string]interface{})
ret["table_id"] = tableID
ret["index_id"] = indexID
ret["index_vals"] = strings.Join(indexValues, ", ")
retStr, err := json.Marshal(ret)
if err != nil {
return "", errors.Trace(err)
}
return string(retStr), nil
}
func decodeTableKey(_ []byte, tableID int64, tbl table.Table) (string, error) {
ret := map[string]int64{}
if tbl != nil && tbl.Meta().GetPartitionInfo() != nil {
ret["partition_id"] = tableID
tableID = tbl.Meta().ID
}
ret["table_id"] = tableID
retStr, err := json.Marshal(ret)
if err != nil {
return "", errors.Trace(err)
}
return string(retStr), nil
}
func datumToJSONObject(d *types.Datum) (interface{}, error) {
if d.IsNull() {
return nil, nil
}
return d.ToString()
}
|
package mint
import (
"github.com/irisnet/irishub/app/v1/mint/tags"
sdk "github.com/irisnet/irishub/types"
)
// Called every block, process inflation on the first block of every hour
func BeginBlocker(ctx sdk.Context, k Keeper) sdk.Tags {
ctx = ctx.WithLogger(ctx.Logger().With("handler", "beginBlock").With("module", "iris/mint"))
logger := ctx.Logger()
// Get block BFT time and block height
blockTime := ctx.BlockHeader().Time
blockHeight := ctx.BlockHeader().Height
minter := k.GetMinter(ctx)
if blockHeight <= 1 { // don't inflate token in the first block
minter.LastUpdate = blockTime
k.SetMinter(ctx, minter)
return nil
}
// Calculate block mint amount
params := k.GetParamSet(ctx)
logger.Info("Mint parameters", "inflation_rate", params.Inflation.String())
annualProvisions := minter.NextAnnualProvisions(params)
logger.Info("Calculate annual provisions", "annual_provisions", annualProvisions.String())
mintedCoin := minter.BlockProvision(annualProvisions)
logger.Info("Mint result", "block_provisions", mintedCoin.String(), "time", blockTime.String())
// Increase loosen token and add minted coin to feeCollector
k.bk.IncreaseLoosenToken(ctx, sdk.Coins{mintedCoin})
k.fk.AddCollectedFees(ctx, sdk.Coins{mintedCoin})
// Update last block BFT time
lastInflationTime := minter.LastUpdate
minter.LastUpdate = blockTime
k.SetMinter(ctx, minter)
// Add tags
return sdk.NewTags(
tags.LastInflationTime, []byte(lastInflationTime.String()),
tags.InflationTime, []byte(blockTime.String()),
tags.MintCoin, []byte(mintedCoin.String()),
)
}
|
package api
import (
"github.com/jlyon1/Burndown/database"
"time"
)
type API struct {
Database database.DB
Key string
}
type Label struct {
Name string `json:"name"`
}
type Issue struct {
Name string `json:"title"`
Number int `json:"number"`
State string `json:"state"`
Created time.Time `json:"created_at"`
Closed time.Time `json:"closed_at"`
Labels []Label `json:"labels"`
URL string `json:"html_url"`
Weight int
}
type CommitInfo struct {
Message string `json:"message"`
URL string `json:"html_url"`
}
type User struct {
Name string `json:"login"`
Id int `json:"id"`
Avatar string `json:"avatar_url"`
Url string `json:"html_url"`
Date time.Time `json:"date"`
}
type Commit struct {
Info CommitInfo `json:"commit"`
Author User `json:"Author"`
Committer User `json:"committer"`
}
type Pull struct {
}
type Repository struct {
Name string `json:"full_name"`
Owner User `json:"owner"`
URL string `json:"html_url"`
Issues []Issue
Pulls []Pull
Commits []Commit
}
type Staleness struct {
Stale int64 `json:"staleness"`
Max int64 `json:"max"`
Ratio float32 `json:ratio`
Text string `json:text`
}
type Point struct {
Label string
Value int64
Link string
Date time.Time
}
type Dataset struct {
Label string
Points []Point
}
type Chart struct {
Data []Dataset
}
type IssueChart struct {
Name string
Data []Dataset
Open int
Closed int
AvgDuration time.Duration
MaxDuration time.Duration
}
type pointSlice []Point
|
package tomltest
type versionSpec struct {
inherit string
exclude []string
}
var versions = map[string]versionSpec{
"next": versionSpec{
exclude: []string{
"invalid/datetime/no-secs", // Times without seconds is no longer invalid.
"invalid/string/basic-byte-escapes", // \x is now valid.
"invalid/inline-table/trailing-comma",
"invalid/inline-table/linebreak-1",
"invalid/inline-table/linebreak-2",
"invalid/inline-table/linebreak-3",
"invalid/inline-table/linebreak-4",
"invalid/key/special-character", // Unicode can now be in bare keys.
},
},
"1.0.0": versionSpec{
exclude: []string{
"valid/string/escape-esc", // \e
"valid/string/hex-escape", "invalid/string/bad-hex-esc", // \x..
"valid/datetime/no-seconds", // Times without seconds
"valid/inline-table/newline",
"valid/key/unicode", // Unicode bare keys
},
},
}
|
package example_test
import (
"testing"
"time"
"github.com/bearchit/goclock/example"
"github.com/bearchit/goclock"
)
func TestApp_NewUser_Realtime(t *testing.T) {
clock := goclock.New()
now := clock.Now()
app := example.App{
Clock: clock,
}
user := app.NewUser("mock")
if user.CreatedAt == now {
t.FailNow()
}
t.Logf("now = %v, user.CreatedAt = %v\n", now, user.CreatedAt)
}
func TestApp_NewUser_MockingTime(t *testing.T) {
clock := goclock.NewMock()
now := clock.Now()
app := example.App{
Clock: clock,
}
user := app.NewUser("mock")
if user.CreatedAt != now {
t.FailNow()
}
t.Logf("now = %v, user.CreatedAt = %v\n", now, user.CreatedAt)
}
func TestApp_NewUser_MockingTime_Controlling(t *testing.T) {
clock := goclock.NewMock(
goclock.WithNow(time.Date(2020, 6, 11, 23, 0, 0, 0, time.Local)),
)
now := clock.Now()
app := example.App{
Clock: clock,
}
user := app.NewUser("mock")
if user.CreatedAt != now {
t.FailNow()
}
t.Logf("now = %v, user.CreatedAt = %v\n", now, user.CreatedAt)
clock.SetNow(time.Date(2020, 6, 12, 0, 0, 0, 0, time.Local))
if now == clock.Now() {
t.FailNow()
}
t.Logf("now = %v, clock.Now() = %v\n", now, clock.Now())
}
|
package mt
import (
"math"
"time"
)
type ToolCaps struct {
//mt:if _ = %s; false
NonNil bool
//mt:end
//mt:lenhdr 16
//mt:ifde
//mt:if r.N > 0 { %s.NonNil = true}; /**/
//mt:if %s.NonNil
// Version.
//mt:const uint8(5)
AttackCooldown float32
MaxDropLvl int16
//mt:len32
GroupCaps []ToolGroupCap
//mt:len32
DmgGroups []Group
//mt:32tou16
PunchUses int32
//mt:end
//mt:end
//mt:end
}
type ToolGroupCap struct {
Name string
//mt:32to16
Uses int32
MaxLvl int16
//mt:len32
Times []DigTime
}
type DigTime struct {
Rating int16
Time float32
}
func (tc ToolCaps) DigTime(groups map[string]int16) (time.Duration, bool) {
immDig := groups["dig_immediate"]
minTime := float32(math.Inf(1))
lvl := groups["level"]
for _, gc := range tc.GroupCaps {
if gc.Name == "dig_immediate" {
immDig = 0
}
if lvl > gc.MaxLvl {
continue
}
r := groups[gc.Name]
for _, dt := range gc.Times {
t := dt.Time
if lvl < gc.MaxLvl {
t /= float32(gc.MaxLvl - lvl)
}
if dt.Rating == r && t < minTime {
minTime = t
}
}
}
switch immDig {
case 2:
return time.Second / 2, true
case 3:
return 0, true
}
if math.IsInf(float64(minTime), 1) {
return 0, false
}
return time.Duration(math.Ceil(float64(minTime) * float64(time.Second))), true
}
|
/* Copyright (c) 2014-2015, Daniel Martí <mvdan@mvdan.cc> */
/* See LICENSE for licensing information */
package jutgelint
import (
"encoding/json"
"io"
"os/exec"
)
const (
CheckDeadAssign int = 1 << iota
CheckFors
CheckLocalDecl
CheckVariableInit
CheckAll int = -1
)
var optArgs = map[int]string{
CheckDeadAssign: "--dead-assign",
CheckFors: "--fors",
CheckLocalDecl: "--local-decl",
CheckVariableInit: "--variable-init",
}
type Warnings map[string][]Warning
type Warning struct {
Line int `json:"line"`
Func string `json:"function"`
Short string `json:"short_description"`
Long string `json:"long_description"`
}
func getCheckOpts(checks int) []string {
var opts []string
for c := range optArgs {
if checks&c > 0 {
opts = append(opts, optArgs[c])
}
}
return opts
}
func RunChecker(r io.Reader, checks int) (Warnings, error) {
cmd := exec.Command("check", getCheckOpts(checks)...)
cmd.Stdin = r
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
if err := cmd.Start(); err != nil {
return nil, err
}
var warns Warnings
if err := json.NewDecoder(stdout).Decode(&warns); err != nil {
return nil, err
}
if err := cmd.Wait(); err != nil {
return nil, err
}
return warns, nil
}
|
package vo
import "go-gin-start/app/ent"
/**
* Value Object Converter
* 关于 VO 类型数据的相关转换器,通常实现 PO2VO、VO2VO、ANY2MAP
*/
// One Po -> One Vo
func (*User) FromPo(item *ent.User) *User {
return &User{
UserName: item.UserName,
Password: item.Password,
}
}
// Some Po -> Some Vo
func (o *User) FromSomePo(items []*ent.User) []*User {
// vars
ranges := make([]*User, 0)
// range
for _, item := range items {
ranges = append(ranges, o.FromPo(item))
}
return ranges
}
// One Vo -> One Vo
func (*UserOnlyUserName) FromVoUser(item *User) *UserOnlyUserName {
return &UserOnlyUserName{
UserName: item.UserName,
}
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type AlterDefaultPrivilegesStmt struct {
Options *ast.List
Action *GrantStmt
}
func (n *AlterDefaultPrivilegesStmt) Pos() int {
return 0
}
|
package datatype
import (
"github.com/emicklei/go-restful"
api "github.com/emicklei/go-restful-openapi"
. "grm-service/util"
"data-manager/dbcentral/pg"
. "data-manager/types"
)
type DataTypeSvc struct {
SysDB *pg.SystemDB
}
// WebService creates a new service that can handle REST requests for resources.
func (s DataTypeSvc) WebService() *restful.WebService {
ws := new(restful.WebService)
ws.Path("/types").
//Consumes(restful.MIME_JSON, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_JSON)
tags := []string{TR("data type")}
// 获取所有数据类型集合
ws.Route(ws.GET("/").To(s.getDatatypeList).
Doc(TR("get all data types")).
Metadata(api.KeyOpenAPITags, tags).
Writes([]DataType{}))
// 编辑类型基本信息
ws.Route(ws.PUT("/{type-name}").To(s.updateTypeInfo).
Doc(TR("update information of data type")).
Param(ws.PathParameter("type-name", "type name").DataType("string")).
Metadata(api.KeyOpenAPITags, tags).
Reads(updateTypeInfoReq{}))
// 获取类型元元数据
ws.Route(ws.GET("/meta/{type-name}").To(s.getTypeMeta).
Doc(TR("get meta of data type")).
Param(ws.PathParameter("type-name", "type name").DataType("string")).
Metadata(api.KeyOpenAPITags, tags).
Writes(Meta{}))
// 修改元元数据信息字段
ws.Route(ws.PUT("/meta/{type-name}").To(s.updateMetaField).
Doc(TR("update meta field")).
Param(ws.PathParameter("type-name", "type name").DataType("string")).
Metadata(api.KeyOpenAPITags, tags).
Reads(MetaFieldReq{}))
// 添加元元数据信息字段
ws.Route(ws.POST("/meta/{type-name}").To(s.addMetaField).
Doc(TR("add meta field")).
Param(ws.PathParameter("type-name", "type name").DataType("string")).
Metadata(api.KeyOpenAPITags, tags).
Reads(MetaFieldReq{}))
// 删除元元数据信息字段
ws.Route(ws.DELETE("/meta/{type-name}/{group}/{field}").To(s.delMetaField).
Doc(TR("delete meta field")).
Param(ws.PathParameter("type-name", "type name").DataType("string")).
Param(ws.PathParameter("group", "group name").DataType("string")).
Param(ws.PathParameter("field", "field name").DataType("string")).
Metadata(api.KeyOpenAPITags, tags))
return ws
}
|
package ws
type Message struct {
Host string `json:"host"`
Name string `json:"name"`
Text string `json:"text"`
}
func (self *Message) String() string {
return self.Host + "::" + self.Name + "::" + self.Text
}
|
package controller
import (
"testing"
"time"
"github.com/reef-pi/reef-pi/controller/storage"
"github.com/reef-pi/reef-pi/controller/telemetry"
)
func TestHomestasis(t *testing.T) {
store, err := storage.TestDB()
if err != nil {
t.Fatal(store)
}
config := HomeoStasisConfig{
Name: "test",
Upper: "1",
Downer: "2",
Min: 10,
Max: 30,
Period: 2,
}
h := Homeostasis{
config: config,
t: telemetry.TestTelemetry(store),
eqs: &mockSubsystem{},
macros: &mockSubsystem{},
}
o := Observation{
Value: 21,
}
if err := h.Sync(&o); err != nil {
t.Error(err)
}
if o.Upper != 0 {
t.Error("Upper should not increase when value is within range")
}
if o.Downer != 0 {
t.Error("Downer should not increase when value is within range")
}
o.Value = 35
if err := h.Sync(&o); err != nil {
t.Error(err)
}
if o.Upper != 0 {
t.Error("Upper should not increase when value is above range")
}
if o.Downer != 2 {
t.Error("Downer should increase when value is above range")
}
o.Value = 5
if err := h.Sync(&o); err != nil {
t.Error(err)
}
if o.Upper != 2 {
t.Error("Upper should increase when value is below range")
}
if o.Downer != 2 {
t.Error("Downer should not increase when value is below range")
}
}
func TestObservation(t *testing.T) {
o1 := NewObservation(1.2)
o2 := NewObservation(1.2)
_, u1 := o1.Rollup(o2)
if u1 {
t.Error("Metric should not be updated if they are not more than an hour apart")
}
o1.Time = telemetry.TeleTime(time.Now().Add(-2 * time.Hour))
_, u2 := o1.Rollup(o2)
if !u2 {
t.Error("Metric should be updated if they are more than an hour apart")
}
if !o1.Before(o2) {
t.Error("Observation should be sorted by their time")
}
}
|
package main
import (
"fmt"
)
func main() {
a := 1
b := 1
fmt.Println(a)
for i := 0; i < 60; i++ {
fmt.Println(b)
tmp := a
a = b
b = tmp + a
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.